[Yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Jun 7 19:19:38 PDT 2011


2 new changesets in yt:

http://bitbucket.org/yt_analysis/yt/changeset/5c8d2edb845b/
changeset:   5c8d2edb845b
branch:      yt
user:        Cameron Hummels
date:        2011-06-08 04:18:54
summary:     Added pastebin and pastebin_grab options to command-line yt.  Now you can yt pastebin <filename> or yt pastebin_grab <pastebin id#> from the command line to interact with the anonymous pastebin.
affected #:  2 files (11.8 KB)

--- a/yt/utilities/command_line.py	Tue Jun 07 16:51:32 2011 -0400
+++ b/yt/utilities/command_line.py	Tue Jun 07 22:18:54 2011 -0400
@@ -635,10 +635,55 @@
         pp = PostInventory()
         pp.add_post(arg, desc=opts.desc)
 
+    @cmdln.option("-l", "--language", action="store",
+                  default = None, dest="language",
+                  help="Use syntax highlighter for the file in language")
+    @cmdln.option("-L", "--languages", action="store_true",
+                  default = False, dest="languages",
+                  help="Retrive a list of supported languages")
+    @cmdln.option("-e", "--encoding", action="store",
+                  default = 'utf-8', dest="encoding",
+                  help="Specify the encoding of a file (default is "
+                        "utf-8 or guessing if available)")
+    @cmdln.option("-b", "--open-browser", action="store_true",
+                  default = False, dest="open_browser",
+                  help="Open the paste in a web browser")
+    @cmdln.option("-p", "--private", action="store_true",
+                  default = False, dest="private",
+                  help="Paste as private")
+    @cmdln.option("-c", "--clipboard", action="store_true",
+                  default = False, dest="clipboard",
+                  help="File to output to; else, print.")
+    def do_pastebin(self, subcmd, opts, arg):
+        """
+        Post a script to an anonymous pastebin.
+
+        Usage: yt pastebin [options] <script>
+
+        ${cmd_option_list}
+        """
+        import yt.utilities.lodgeit as lo
+        lo.main( arg, languages=opts.languages, language=opts.language,
+                 encoding=opts.encoding, open_browser=opts.open_browser,
+                 private=opts.private, clipboard=opts.clipboard)
+
+    def do_pastebin_grab(self, subcmd, opts, arg):
+        """
+        Print an online pastebin to STDOUT for local use. Paste ID is 
+        the number at the end of the url.  So to locally access pastebin:
+        http://paste.enzotools.org/show/1688/
+
+        Usage: yt pastebin_grab <Paste ID> 
+        Ex: yt pastebin_grab 1688 > script.py
+
+        """
+        import yt.utilities.lodgeit as lo
+        lo.main( None, download=arg )
+
     @cmdln.option("-o", "--output", action="store",
                   default = None, dest="output_fn",
                   help="File to output to; else, print.")
-    def do_pastegrab(self, subcmd, opts, username, paste_id):
+    def do_pasteboard_grab(self, subcmd, opts, username, paste_id):
         """
         Download from your or another user's pasteboard.
         """


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/yt/utilities/lodgeit.py	Tue Jun 07 22:18:54 2011 -0400
@@ -0,0 +1,317 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+    LodgeIt!
+    ~~~~~~~~
+
+    A script that pastes stuff into the enzotools pastebin on
+    paste.enztools.org.
+
+    Modified (very, very slightly) from the original script by the authors
+    below.
+
+    .lodgeitrc / _lodgeitrc
+    -----------------------
+
+    Under UNIX create a file called ``~/.lodgeitrc``, under Windows
+    create a file ``%APPDATA%/_lodgeitrc`` to override defaults::
+
+        language=default_language
+        clipboard=true/false
+        open_browser=true/false
+        encoding=fallback_charset
+
+    :authors: 2007-2008 Georg Brandl <georg at python.org>,
+              2006 Armin Ronacher <armin.ronacher at active-4.com>,
+              2006 Matt Good <matt at matt-good.net>,
+              2005 Raphael Slinckx <raphael at slinckx.net>
+"""
+import os
+import sys
+from optparse import OptionParser
+
+
+SCRIPT_NAME = os.path.basename(sys.argv[0])
+VERSION = '0.3'
+SERVICE_URL = 'http://paste.enzotools.org/'
+SETTING_KEYS = ['author', 'title', 'language', 'private', 'clipboard',
+                'open_browser']
+
+# global server proxy
+_xmlrpc_service = None
+
+
+def fail(msg, code):
+    """Bail out with an error message."""
+    print >> sys.stderr, 'ERROR: %s' % msg
+    sys.exit(code)
+
+
+def load_default_settings():
+    """Load the defaults from the lodgeitrc file."""
+    settings = {
+        'language':     None,
+        'clipboard':    True,
+        'open_browser': False,
+        'encoding':     'iso-8859-15'
+    }
+    rcfile = None
+    if os.name == 'posix':
+        rcfile = os.path.expanduser('~/.lodgeitrc')
+    elif os.name == 'nt' and 'APPDATA' in os.environ:
+        rcfile = os.path.expandvars(r'$APPDATA\_lodgeitrc')
+    if rcfile:
+        try:
+            f = open(rcfile)
+            for line in f:
+                if line.strip()[:1] in '#;':
+                    continue
+                p = line.split('=', 1)
+                if len(p) == 2:
+                    key = p[0].strip().lower()
+                    if key in settings:
+                        if key in ('clipboard', 'open_browser'):
+                            settings[key] = p[1].strip().lower() in \
+                                            ('true', '1', 'on', 'yes')
+                        else:
+                            settings[key] = p[1].strip()
+            f.close()
+        except IOError:
+            pass
+    settings['tags'] = []
+    settings['title'] = None
+    return settings
+
+
+def make_utf8(text, encoding):
+    """Convert a text to UTF-8, brute-force."""
+    try:
+        u = unicode(text, 'utf-8')
+        uenc = 'utf-8'
+    except UnicodeError:
+        try:
+            u = unicode(text, encoding)
+            uenc = 'utf-8'
+        except UnicodeError:
+            u = unicode(text, 'iso-8859-15', 'ignore')
+            uenc = 'iso-8859-15'
+    try:
+        import chardet
+    except ImportError:
+        return u.encode('utf-8')
+    d = chardet.detect(text)
+    if d['encoding'] == uenc:
+        return u.encode('utf-8')
+    return unicode(text, d['encoding'], 'ignore').encode('utf-8')
+
+
+def get_xmlrpc_service():
+    """Create the XMLRPC server proxy and cache it."""
+    global _xmlrpc_service
+    import xmlrpclib
+    if _xmlrpc_service is None:
+        try:
+            _xmlrpc_service = xmlrpclib.ServerProxy(SERVICE_URL + 'xmlrpc/',
+                                                    allow_none=True)
+        except Exception, err:
+            fail('Could not connect to Pastebin: %s' % err, -1)
+    return _xmlrpc_service
+
+
+def copy_url(url):
+    """Copy the url into the clipboard."""
+    # try windows first
+    try:
+        import win32clipboard
+    except ImportError:
+        # then give pbcopy a try.  do that before gtk because
+        # gtk might be installed on os x but nobody is interested
+        # in the X11 clipboard there.
+        from subprocess import Popen, PIPE
+        try:
+            client = Popen(['pbcopy'], stdin=PIPE)
+        except OSError:
+            try:
+                import pygtk
+                pygtk.require('2.0')
+                import gtk
+                import gobject
+            except ImportError:
+                return
+            gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD).set_text(url)
+            gobject.idle_add(gtk.main_quit)
+            gtk.main()
+        else:
+            client.stdin.write(url)
+            client.stdin.close()
+            client.wait()
+    else:
+        win32clipboard.OpenClipboard()
+        win32clipboard.EmptyClipboard()
+        win32clipboard.SetClipboardText(url)
+        win32clipboard.CloseClipboard()
+
+
+def open_webbrowser(url):
+    """Open a new browser window."""
+    import webbrowser
+    webbrowser.open(url)
+
+
+def language_exists(language):
+    """Check if a language alias exists."""
+    xmlrpc = get_xmlrpc_service()
+    langs = xmlrpc.pastes.getLanguages()
+    return language in langs
+
+
+def get_mimetype(data, filename):
+    """Try to get MIME type from data."""
+    try:
+        import gnomevfs
+    except ImportError:
+        from mimetypes import guess_type
+        if filename:
+            return guess_type(filename)[0]
+    else:
+        if filename:
+            return gnomevfs.get_mime_type(os.path.abspath(filename))
+        return gnomevfs.get_mime_type_for_data(data)
+
+
+def print_languages():
+    """Print a list of all supported languages, with description."""
+    xmlrpc = get_xmlrpc_service()
+    languages = xmlrpc.pastes.getLanguages().items()
+    languages.sort(lambda a, b: cmp(a[1].lower(), b[1].lower()))
+    print 'Supported Languages:'
+    for alias, name in languages:
+        print '    %-30s%s' % (alias, name)
+
+
+def download_paste(uid):
+    """Download a paste given by ID."""
+    xmlrpc = get_xmlrpc_service()
+    paste = xmlrpc.pastes.getPaste(uid)
+    if not paste:
+        fail('Paste "%s" does not exist.' % uid, 5)
+    print paste['code'].encode('utf-8')
+
+
+def create_paste(code, language, filename, mimetype, private):
+    """Create a new paste."""
+    xmlrpc = get_xmlrpc_service()
+    rv = xmlrpc.pastes.newPaste(language, code, None, filename, mimetype,
+                                private)
+    if not rv:
+        fail('Could not create paste. Something went wrong '
+             'on the server side.', 4)
+    return rv
+
+
+def compile_paste(filenames, langopt):
+    """Create a single paste out of zero, one or multiple files."""
+    def read_file(f):
+        try:
+            return f.read()
+        finally:
+            f.close()
+    mime = ''
+    lang = langopt or ''
+    if not filenames:
+        data = read_file(sys.stdin)
+        if not langopt:
+            mime = get_mimetype(data, '') or ''
+        fname = ""
+    elif len(filenames) == 1:
+        fname = filenames[0]
+        data = read_file(open(filenames[0], 'rb'))
+        if not langopt:
+            mime = get_mimetype(data, filenames[0]) or ''
+    else:
+        result = []
+        for fname in filenames:
+            data = read_file(open(fname, 'rb'))
+            if langopt:
+                result.append('### %s [%s]\n\n' % (fname, langopt))
+            else:
+                result.append('### %s\n\n' % fname)
+            result.append(data)
+            result.append('\n\n')
+        data = ''.join(result)
+        lang = 'multi'
+    return data, lang, fname, mime
+
+
+def main( filename, languages=False, language=None, encoding='utf-8', 
+          open_browser=False, private=False, clipboard=False, 
+          download=None ):
+    """Paste a given script into a pastebin using the Lodgeit tool."""
+
+#    usage = ('Usage: %%prog [options] [FILE ...]\n\n'
+#             'Read the files and paste their contents to %s.\n'
+#             'If no file is given, read from standard input.\n'
+#             'If multiple files are given, they are put into a single paste.'
+#             % SERVICE_URL)
+#    parser = OptionParser(usage=usage)
+#
+#    settings = load_default_settings()
+#
+#    parser.add_option('-v', '--version', action='store_true',
+#                      help='Print script version')
+#    parser.add_option('-L', '--languages', action='store_true', default=False,
+#                      help='Retrieve a list of supported languages')
+#    parser.add_option('-l', '--language', default=settings['language'],
+#                      help='Used syntax highlighter for the file')
+#    parser.add_option('-e', '--encoding', default=settings['encoding'],
+#                      help='Specify the encoding of a file (default is '
+#                           'utf-8 or guessing if available)')
+#    parser.add_option('-b', '--open-browser', dest='open_browser',
+#                      action='store_true',
+#                      default=settings['open_browser'],
+#                      help='Open the paste in a web browser')
+#    parser.add_option('-p', '--private', action='store_true', default=False,
+#                      help='Paste as private')
+#    parser.add_option('--no-clipboard', dest='clipboard',
+#                      action='store_false',
+#                      default=settings['clipboard'],
+#                      help="Don't copy the url into the clipboard")
+#    parser.add_option('--download', metavar='UID',
+#                      help='Download a given paste')
+#
+#    opts, args = parser.parse_args()
+#
+    if languages:
+        print_languages()
+        return
+    elif download:
+        download_paste(download)
+        return
+
+    # check language if given
+    if language and not language_exists(language):
+        print 'Language %s is not supported.' % language
+        return
+
+    # load file(s)
+    args = [ filename ]
+    try:
+        data, language, filename, mimetype = compile_paste(args, language)
+    except Exception, err:
+        fail('Error while reading the file(s): %s' % err, 2)
+    if not data:
+        fail('Aborted, no content to paste.', 4)
+
+    # create paste
+    code = make_utf8(data, encoding)
+    pid = create_paste(code, language, filename, mimetype, private)
+    url = '%sshow/%s/' % (SERVICE_URL, pid)
+    print url
+    if open_browser:
+        open_webbrowser(url)
+    if clipboard:
+        copy_url(url)
+
+
+if __name__ == '__main__':
+    sys.exit(main())


http://bitbucket.org/yt_analysis/yt/changeset/fdeadc2de38a/
changeset:   fdeadc2de38a
branch:      yt
user:        Cameron Hummels
date:        2011-06-08 04:19:34
summary:     Merging.
affected #:  5 files (2.8 KB)

--- a/yt/frontends/castro/data_structures.py	Tue Jun 07 22:18:54 2011 -0400
+++ b/yt/frontends/castro/data_structures.py	Tue Jun 07 22:19:34 2011 -0400
@@ -46,6 +46,8 @@
            StaticOutput
 from yt.utilities.definitions import \
     mpc_conversion
+from yt.utilities.amr_utils import \
+    get_box_grids_level
 
 from .definitions import \
     castro2enzoDict, \
@@ -380,8 +382,13 @@
             grid._setup_dx()
 
     def __setup_grid_tree(self):
+        mask = na.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
-            children = self._get_grid_children(grid)
+            get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            children = self.grids[mask.astype("bool")]
+            #assert(len(children) == len(self._get_grid_children(grid)))
             for child in children:
                 self.gridReverseTree[child.id].append(i)
                 self.gridTree[i].append(weakref.proxy(child))


--- a/yt/frontends/ramses/_ramses_reader.pyx	Tue Jun 07 22:18:54 2011 -0400
+++ b/yt/frontends/ramses/_ramses_reader.pyx	Tue Jun 07 22:19:34 2011 -0400
@@ -397,10 +397,10 @@
         # We now have to get our field names to fill our array
         self.trees = <RAMSES_tree**>\
             malloc(sizeof(RAMSES_tree*) * self.rsnap.m_header.ncpu)
+        for ii in range(self.ndomains): self.trees[ii] = NULL
         self.hydro_datas = <RAMSES_hydro_data ***>\
                        malloc(sizeof(RAMSES_hydro_data**) * self.rsnap.m_header.ncpu)
         self.ndomains = self.rsnap.m_header.ncpu
-        #for ii in range(self.ndomains): self.trees[ii] = NULL
         # Note we don't do ncpu + 1
         for idomain in range(self.rsnap.m_header.ncpu):
             # we don't delete local_tree
@@ -415,8 +415,8 @@
                     new RAMSES_hydro_data(deref(local_tree))
             self.trees[idomain] = local_tree
             # We do not delete the final snapshot, which we'll use later
-            if idomain + 1 < self.rsnap.m_header.ncpu:
-                del local_hydro_data
+            #if idomain + 1 < self.rsnap.m_header.ncpu:
+            #    del local_hydro_data
         # Only once, we read all the field names
         self.nfields = local_hydro_data.m_nvars
         cdef string *field_name
@@ -434,7 +434,6 @@
             self.field_names.append(field_name.c_str())
             self.field_ind[self.field_names[-1]] = ifield
         # This all needs to be cleaned up in the deallocator
-        del local_hydro_data
 
     def __dealloc__(self):
         import traceback; traceback.print_stack()
@@ -473,21 +472,20 @@
         cdef np.ndarray[np.int64_t, ndim=1] cell_count
         cell_count = np.zeros(self.rsnap.m_header.levelmax + 1, 'int64')
         cdef int local_count = 0
+        cdef int tree_count
         for idomain in range(1, self.rsnap.m_header.ncpu + 1):
-            local_tree = new RAMSES_tree(deref(self.rsnap), idomain,
-                                         self.rsnap.m_header.levelmax, 0)
-            local_tree.read()
-            local_hydro_data = new RAMSES_hydro_data(deref(local_tree))
+            local_tree = self.trees[idomain - 1]
             for ilevel in range(local_tree.m_maxlevel + 1):
                 local_count = 0
+                tree_count = 0
                 local_level = &local_tree.m_AMR_levels[ilevel]
                 grid_it = local_tree.begin(ilevel)
                 grid_end = local_tree.end(ilevel)
                 while grid_it != grid_end:
                     local_count += (grid_it.get_domain() == idomain)
+                    tree_count += 1
                     grid_it.next()
                 cell_count[ilevel] += local_count
-            del local_tree, local_hydro_data
 
         return cell_count
 
@@ -582,10 +580,7 @@
         cdef np.ndarray[np.int64_t, ndim=1] level_cell_counts
         level_cell_counts = np.zeros(self.rsnap.m_header.levelmax + 1, 'int64')
         for idomain in range(1, self.rsnap.m_header.ncpu + 1):
-            local_tree = new RAMSES_tree(deref(self.rsnap), idomain,
-                                         self.rsnap.m_header.levelmax, 0)
-            local_tree.read()
-            local_hydro_data = new RAMSES_hydro_data(deref(local_tree))
+            local_tree = self.trees[idomain - 1]
             for ilevel in range(local_tree.m_maxlevel + 1):
                 # this gets overwritten for every domain, which is okay
                 level_cell_counts[ilevel] = grid_ind 
@@ -626,7 +621,6 @@
                     grid_ind += 1
                     grid_aind += 1
                     grid_it.next()
-            del local_tree, local_hydro_data
 
     def read_oct_grid(self, char *field, int level, int domain, int grid_id):
 
@@ -715,13 +709,29 @@
                         to_fill += 1
         return to_fill
 
+#def recursive_patch_splitting(ProtoSubgrid psg,
+#        np.ndarray[np.int64_t, ndim=1] dims,
+#        np.ndarray[np.int64_t, ndim=1] inds,
+#        np.ndarray[np.int64_t, ndim=2] left_index,
+#        np.ndarray[np.int64_t, ndim=2] right_index,
+#        np.ndarray[np.int64_t, ndim=2] gdims,
+#        np.ndarray[np.int64_t, ndim=2] fl,
+#        int num_deep = 0):
+#    cdef float min_eff = 0.1
+#    if num_deep > 40:
+#        psg.efficiency = min_eff
+#        return [psg]
+#    if psg.efficiency > min_eff or psg.efficiency < 0.0:
+#        return [psg]
+#    cdef 
+#
 cdef class ProtoSubgrid:
     cdef np.int64_t *signature[3]
     cdef np.int64_t left_edge[3]
     cdef np.int64_t right_edge[3]
     cdef np.int64_t dimensions[3]
     cdef public np.float64_t efficiency
-    cdef public object sigs
+    cdef np.int64_t *sigs[3]
     cdef public object grid_file_locations
     cdef public object dd
         
@@ -731,8 +741,6 @@
                    np.ndarray[np.int64_t, ndim=1] left_index,
                    np.ndarray[np.int64_t, ndim=1] dimensions, 
                    np.ndarray[np.int64_t, ndim=2] left_edges,
-                   np.ndarray[np.int64_t, ndim=2] right_edges,
-                   np.ndarray[np.int64_t, ndim=2] grid_dimensions,
                    np.ndarray[np.int64_t, ndim=2] grid_file_locations):
         # This also includes the shrinking step.
         cdef int i, ci, ng = left_edges.shape[0]
@@ -740,23 +748,22 @@
         cdef int l0, r0, l1, r1, l2, r2, i0, i1, i2
         cdef np.int64_t temp_l[3], temp_r[3], ncells
         cdef np.float64_t efficiency
-        self.sigs = []
         for i in range(3):
             temp_l[i] = left_index[i] + dimensions[i]
             temp_r[i] = left_index[i]
             self.signature[i] = NULL
         for gi in range(ng):
             if left_edges[gi,0] > left_index[0]+dimensions[0] or \
-               right_edges[gi,0] < left_index[0] or \
+               left_edges[gi,0] + 2 < left_index[0] or \
                left_edges[gi,1] > left_index[1]+dimensions[1] or \
-               right_edges[gi,1] < left_index[1] or \
+               left_edges[gi,1] + 2 < left_index[1] or \
                left_edges[gi,2] > left_index[2]+dimensions[2] or \
-               right_edges[gi,2] < left_index[2]:
+               left_edges[gi,2] + 2 < left_index[2]:
                #print "Skipping grid", gi, "which lies outside out box"
                continue
             for i in range(3):
                 temp_l[i] = i64min(left_edges[gi,i], temp_l[i])
-                temp_r[i] = i64max(right_edges[gi,i], temp_r[i])
+                temp_r[i] = i64max(left_edges[gi,i] + 2, temp_r[i])
         for i in range(3):
             self.left_edge[i] = i64max(temp_l[i], left_index[i])
             self.right_edge[i] = i64min(temp_r[i], left_index[i] + dimensions[i])
@@ -764,13 +771,14 @@
             if self.dimensions[i] <= 0:
                 self.efficiency = -1.0
                 return
-            self.sigs.append(np.zeros(self.dimensions[i], 'int64'))
-        #print self.sigs[0].size, self.sigs[1].size, self.sigs[2].size
+            self.sigs[i] = <np.int64_t *> malloc(
+                                sizeof(np.int64_t) * self.dimensions[i])
+            for gi in range(self.dimensions[i]): self.sigs[i][gi] = 0
         
         # My guess is that this whole loop could be done more efficiently.
         # However, this is clear and straightforward, so it is a good first
         # pass.
-        cdef np.ndarray[np.int64_t, ndim=1] sig0, sig1, sig2
+        cdef np.int64_t *sig0, *sig1, *sig2
         sig0 = self.sigs[0]
         sig1 = self.sigs[1]
         sig2 = self.sigs[2]
@@ -780,15 +788,15 @@
         for gi in range(ng):
             used = 0
             nnn = 0
-            for l0 in range(grid_dimensions[gi, 0]):
+            for l0 in range(2):
                 i0 = left_edges[gi, 0] + l0
                 if i0 < self.left_edge[0]: continue
                 if i0 >= self.right_edge[0]: break
-                for l1 in range(grid_dimensions[gi, 1]):
+                for l1 in range(2):
                     i1 = left_edges[gi, 1] + l1
                     if i1 < self.left_edge[1]: continue
                     if i1 >= self.right_edge[1]: break
-                    for l2 in range(grid_dimensions[gi, 2]):
+                    for l2 in range(2):
                         i2 = left_edges[gi, 2] + l2
                         if i2 < self.left_edge[2]: continue
                         if i2 >= self.right_edge[2]: break
@@ -813,6 +821,11 @@
         #print "Efficiency is %0.3e" % (efficiency)
         self.efficiency = efficiency
 
+    def __dealloc__(self):
+        free(self.sigs[0])
+        free(self.sigs[1])
+        free(self.sigs[2])
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def find_split(self):
@@ -821,7 +834,7 @@
         cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
         cdef np.int64_t strength, zcstrength, zcp
         axes = np.argsort(self.dd)[::-1]
-        cdef np.ndarray[np.int64_t] sig
+        cdef np.int64_t *sig
         for axi in range(3):
             ax = axes[axi]
             center = self.dimensions[ax] / 2
@@ -954,3 +967,36 @@
         hilbert_indices[o] = h
     return hilbert_indices
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind,
+                            np.ndarray[np.int64_t, ndim=1] uind):
+    cdef np.ndarray[np.int64_t, ndim=1] count = np.zeros(uind.shape[0], 'int64')
+    cdef int n, i
+    cdef np.int64_t mi, mui
+    for i in range(ind.shape[0]):
+        mi = ind[i]
+        for n in range(uind.shape[0]):
+            if uind[n] == mi:
+                count[n] += 1
+                break
+    cdef np.int64_t **inds
+    inds = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
+    cdef int *li = <int *> malloc(sizeof(int) * uind.shape[0])
+    cdef np.ndarray[np.int64_t, ndim=1] indices
+    all_indices = []
+    for n in range(uind.shape[0]):
+        indices = np.zeros(count[n], 'int64')
+        all_indices.append(indices)
+        inds[n] = <np.int64_t *> indices.data
+        li[n] = 0
+    for i in range(ind.shape[0]):
+        mi = ind[i]
+        for n in range(uind.shape[0]):
+            if uind[n] == mi:
+                inds[n][li[n]] = i
+                li[n] += 1
+                break
+    free(inds) # not inds[...]
+    free(li)
+    return all_indices


--- a/yt/frontends/ramses/data_structures.py	Tue Jun 07 22:18:54 2011 -0400
+++ b/yt/frontends/ramses/data_structures.py	Tue Jun 07 22:19:34 2011 -0400
@@ -38,6 +38,8 @@
 from .fields import RAMSESFieldContainer
 from yt.utilities.definitions import \
     mpc_conversion
+from yt.utilities.amr_utils import \
+    get_box_grids_level
 from yt.utilities.io_handler import \
     io_registry
 
@@ -166,7 +168,6 @@
             # left_index is integers of the index, with respect to this level
             left_index = na.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
             # we've got octs, so it's +2
-            right_index = left_index + 2
             pbar = get_pbar("Re-gridding ", left_index.shape[0])
             dlp = [None, None, None]
             i = 0
@@ -180,39 +181,42 @@
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
             unique_indices = na.unique(hilbert_indices)
-            for curve_index in unique_indices:
+            print "Level % 2i has % 10i unique indices for %0.3e octs" % (
+                        level, unique_indices.size, hilbert_indices.size)
+            all_indices = _ramses_reader.get_array_indices_lists(
+                        hilbert_indices, unique_indices)
+            for curve_index, my_octs in zip(unique_indices, all_indices):
                 #print "Handling", curve_index
-                my_octs = (hilbert_indices == curve_index)
+                #my_octs = (hilbert_indices == curve_index)
                 dleft_index = left_index[my_octs,:]
-                dright_index = left_index[my_octs,:] + 2
-                ddims = (dright_index * 0) + 2
                 dfl = fl[my_octs,:]
                 initial_left = na.min(dleft_index, axis=0)
-                idims = (na.max(dright_index, axis=0) - initial_left).ravel()
-                #if level > 6: insert_ipython()
+                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                #if level > 10: insert_ipython()
                 #print initial_left, idims
                 psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dright_index, ddims, dfl)
+                                dleft_index, dfl)
                 if psg.efficiency <= 0: continue
                 self.num_deep = 0
                 psgs.extend(self._recursive_patch_splitting(
                     psg, idims, initial_left, 
-                    dleft_index, dright_index, ddims, dfl))
+                    dleft_index, dfl))
+            print "Done with level % 2i" % (level)
             pbar.finish()
             self.proto_grids.append(psgs)
             sums = na.zeros(3, dtype='int64')
             mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
-            for g in self.proto_grids[level]:
-                sums += [s.sum() for s in g.sigs]
-            assert(na.all(sums == dims.prod(axis=1).sum()))
+            #for g in self.proto_grids[level]:
+            #    sums += [s.sum() for s in g.sigs]
+            #assert(na.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
 
     num_deep = 0
 
     @num_deep_inc
     def _recursive_patch_splitting(self, psg, dims, ind,
-            left_index, right_index, gdims, fl):
+            left_index, fl):
         min_eff = 0.1 # This isn't always respected.
         if self.num_deep > 40:
             # If we've recursed more than 100 times, we give up.
@@ -232,13 +236,13 @@
         li_l = ind.copy()
         if na.any(dims_l <= 0): return [psg]
         L = _ramses_reader.ProtoSubgrid(
-                li_l, dims_l, left_index, right_index, gdims, fl)
+                li_l, dims_l, left_index, fl)
         #print " " * self.num_deep + "L", tt, L.efficiency
         if L.efficiency > 1.0: raise RuntimeError
         if L.efficiency <= 0.0: L = []
         elif L.efficiency < min_eff:
             L = self._recursive_patch_splitting(L, dims_l, li_l,
-                    left_index, right_index, gdims, fl)
+                    left_index, fl)
         else:
             L = [L]
         dims_r = dims.copy()
@@ -247,13 +251,13 @@
         li_r[ax] += fp
         if na.any(dims_r <= 0): return [psg]
         R = _ramses_reader.ProtoSubgrid(
-                li_r, dims_r, left_index, right_index, gdims, fl)
+                li_r, dims_r, left_index, fl)
         #print " " * self.num_deep + "R", tt, R.efficiency
         if R.efficiency > 1.0: raise RuntimeError
         if R.efficiency <= 0.0: R = []
         elif R.efficiency < min_eff:
             R = self._recursive_patch_splitting(R, dims_r, li_r,
-                    left_index, right_index, gdims, fl)
+                    left_index, fl)
         else:
             R = [R]
         return L + R
@@ -276,18 +280,16 @@
                 gi += 1
         self.grids = na.array(grids, dtype='object')
 
-    def _get_grid_parents(self, grid, LE, RE):
-        mask = na.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(LE, RE)
-        mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
-        return self.grids[mask]
-
     def _populate_grid_objects(self):
+        mask = na.empty(self.grids.size, dtype='int32')
+        print self.grid_levels.dtype
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
                 g.Parent.extend(parents.tolist())
                 for p in parents: p.Children.append(g)


--- a/yt/utilities/_amr_utils/QuadTree.pyx	Tue Jun 07 22:18:54 2011 -0400
+++ b/yt/utilities/_amr_utils/QuadTree.pyx	Tue Jun 07 22:19:34 2011 -0400
@@ -41,18 +41,17 @@
     np.float64_t *val
     np.float64_t weight_val
     np.int64_t pos[2]
-    int level
-    int nvals
     QuadTreeNode *children[2][2]
 
 cdef void QTN_add_value(QuadTreeNode *self,
-        np.float64_t *val, np.float64_t weight_val):
+        np.float64_t *val, np.float64_t weight_val,
+        int nvals):
     cdef int i
-    for i in range(self.nvals):
+    for i in range(nvals):
         self.val[i] += val[i]
     self.weight_val += weight_val
 
-cdef void QTN_refine(QuadTreeNode *self):
+cdef void QTN_refine(QuadTreeNode *self, int nvals):
     cdef int i, j, i1, j1
     cdef np.int64_t npos[2]
     cdef QuadTreeNode *node
@@ -62,27 +61,22 @@
             npos[1] = self.pos[1] * 2 + j
             # We have to be careful with allocation...
             self.children[i][j] = QTN_initialize(
-                        npos,
-                        self.nvals, self.val, self.weight_val,
-                        self.level + 1)
-    for i in range(self.nvals): self.val[i] = 0.0
+                        npos, nvals, self.val, self.weight_val)
+    for i in range(nvals): self.val[i] = 0.0
     self.weight_val = 0.0
 
 cdef QuadTreeNode *QTN_initialize(np.int64_t pos[2], int nvals,
-                        np.float64_t *val, np.float64_t weight_val,
-                        int level):
+                        np.float64_t *val, np.float64_t weight_val):
     cdef QuadTreeNode *node
     cdef int i, j
     node = <QuadTreeNode *> malloc(sizeof(QuadTreeNode))
     node.pos[0] = pos[0]
     node.pos[1] = pos[1]
-    node.nvals = nvals
     node.val = <np.float64_t *> malloc(
                 nvals * sizeof(np.float64_t))
     for i in range(2):
         for j in range(2):
             node.children[i][j] = NULL
-    node.level = level
     if val != NULL:
         for i in range(nvals):
             node.val[i] = val[i]
@@ -106,6 +100,7 @@
     cdef QuadTreeNode ***root_nodes
     cdef np.int64_t top_grid_dims[2]
     cdef int merged
+    cdef int num_cells
 
     def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
                   int nvals):
@@ -136,7 +131,8 @@
             for j in range(top_grid_dims[1]):
                 pos[1] = j
                 self.root_nodes[i][j] = QTN_initialize(
-                    pos, nvals, vals, weight_val, 0)
+                    pos, nvals, vals, weight_val)
+        self.num_cells = self.top_grid_dims[0] * self.top_grid_dims[1]
 
     cdef int count_total_cells(self, QuadTreeNode *root):
         cdef int total = 0
@@ -184,7 +180,7 @@
             for j in range(2):
                 pos[0] = root.pos[0]*2 + i
                 pos[1] = root.pos[1]*2 + j
-                child = QTN_initialize(pos, self.nvals, NULL, 0.0, root.level+1)
+                child = QTN_initialize(pos, self.nvals, NULL, 0.0)
                 root.children[i][j] = child
                 curpos = self.unfill_buffer(child, curpos, refined, values, wval)
         return curpos
@@ -198,6 +194,7 @@
         self.merged = 1 # Just on the safe side
         cdef int curpos = 0
         cdef QuadTreeNode *root
+        self.num_cells = wval.shape[0]
         for i in range(self.top_grid_dims[0]):
             for j in range(self.top_grid_dims[1]):
                 curpos = self.unfill_buffer(self.root_nodes[i][j], curpos,
@@ -206,10 +203,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def tobuffer(self):
-        cdef int total = 0
-        for i in range(self.top_grid_dims[0]):
-            for j in range(self.top_grid_dims[1]):
-                total += self.count_total_cells(self.root_nodes[i][j])
+        cdef int total = self.num_cells
         # We now have four buffers:
         # Refined or not (total,) int32
         # Values in each node (total, nvals) float64
@@ -240,13 +234,14 @@
         cdef np.int64_t fac
         for L in range(level):
             if node.children[0][0] == NULL:
-                QTN_refine(node)
+                QTN_refine(node, self.nvals)
+                self.num_cells += 4
             # Maybe we should use bitwise operators?
             fac = self.po2[level - L - 1]
             i = (pos[0] >= fac*(2*node.pos[0]+1))
             j = (pos[1] >= fac*(2*node.pos[1]+1))
             node = node.children[i][j]
-        QTN_add_value(node, val, weight_val)
+        QTN_add_value(node, val, weight_val, self.nvals)
             
     @cython.cdivision(True)
     cdef QuadTreeNode *find_on_root_level(self, np.int64_t pos[2], int level):
@@ -292,7 +287,7 @@
         vals = []
         for i in range(self.top_grid_dims[0]):
             for j in range(self.top_grid_dims[1]):
-                total += self.count_at_level(self.root_nodes[i][j], level)
+                total += self.count_at_level(self.root_nodes[i][j], level, 0)
         if count_only: return total
         # Allocate our array
         cdef np.ndarray[np.int64_t, ndim=2] npos
@@ -313,14 +308,14 @@
                 for vi in range(self.nvals): vtoadd[vi] = 0.0
                 wtoadd = 0.0
                 curpos += self.fill_from_level(self.root_nodes[i][j],
-                    level, curpos, pdata, vdata, wdata, vtoadd, wtoadd)
+                    level, curpos, pdata, vdata, wdata, vtoadd, wtoadd, 0)
         return npos, nvals, nwvals
 
-    cdef int count_at_level(self, QuadTreeNode *node, int level):
+    cdef int count_at_level(self, QuadTreeNode *node, int level, int cur_level):
         cdef int i, j
         # We only really return a non-zero, calculated value if we are at the
         # level in question.
-        if node.level == level:
+        if cur_level == level:
             # We return 1 if there are no finer points at this level and zero
             # if there are
             return (node.children[0][0] == NULL)
@@ -328,7 +323,8 @@
         cdef int count = 0
         for i in range(2):
             for j in range(2):
-                count += self.count_at_level(node.children[i][j], level)
+                count += self.count_at_level(node.children[i][j], level,
+                                             cur_level + 1)
         return count
 
     cdef int fill_from_level(self, QuadTreeNode *node, int level,
@@ -337,9 +333,10 @@
                               np.float64_t *vdata,
                               np.float64_t *wdata,
                               np.float64_t *vtoadd,
-                              np.float64_t wtoadd):
+                              np.float64_t wtoadd,
+                              int cur_level):
         cdef int i, j
-        if node.level == level:
+        if cur_level == level:
             if node.children[0][0] != NULL: return 0
             for i in range(self.nvals):
                 vdata[self.nvals * curpos + i] = node.val[i] + vtoadd[i]
@@ -357,7 +354,7 @@
             for j in range(2):
                 added += self.fill_from_level(node.children[i][j],
                         level, curpos + added, pdata, vdata, wdata,
-                        vtoadd, wtoadd)
+                        vtoadd, wtoadd, cur_level + 1)
         if self.merged == 1:
             for i in range(self.nvals):
                 vtoadd[i] -= node.val[i]
@@ -372,7 +369,7 @@
             free(self.root_nodes[i])
         free(self.root_nodes)
 
-cdef void QTN_merge_nodes(QuadTreeNode *n1, QuadTreeNode *n2):
+cdef void QTN_merge_nodes(QuadTreeNode *n1, QuadTreeNode *n2, int nvals):
     # We have four choices when merging nodes.
     # 1. If both nodes have no refinement, then we add values of n2 to n1.
     # 2. If both have refinement, we call QTN_merge_nodes on all four children.
@@ -381,13 +378,13 @@
     # 4. If n1 has refinement and n2 does not, we add the value of n2 to n1.
     cdef int i, j
 
-    QTN_add_value(n1, n2.val, n2.weight_val)
+    QTN_add_value(n1, n2.val, n2.weight_val, nvals)
     if n1.children[0][0] == n2.children[0][0] == NULL:
         pass
     elif n1.children[0][0] != NULL and n2.children[0][0] != NULL:
         for i in range(2):
             for j in range(2):
-                QTN_merge_nodes(n1.children[i][j], n2.children[i][j])
+                QTN_merge_nodes(n1.children[i][j], n2.children[i][j], nvals)
     elif n1.children[0][0] == NULL and n2.children[0][0] != NULL:
         for i in range(2):
             for j in range(2):
@@ -400,8 +397,12 @@
 
 def merge_quadtrees(QuadTree qt1, QuadTree qt2):
     cdef int i, j
+    qt1.num_cells = 0
     for i in range(qt1.top_grid_dims[0]):
         for j in range(qt1.top_grid_dims[1]):
             QTN_merge_nodes(qt1.root_nodes[i][j],
-                            qt2.root_nodes[i][j])
+                            qt2.root_nodes[i][j],
+                            qt1.nvals)
+            qt1.num_cells += qt1.count_total_cells(
+                                qt1.root_nodes[i][j])
     qt1.merged = 1


--- a/yt/utilities/_amr_utils/misc_utilities.pyx	Tue Jun 07 22:18:54 2011 -0400
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx	Tue Jun 07 22:19:34 2011 -0400
@@ -50,3 +50,26 @@
             if v < mi: mi = v
             if v > ma: ma = v
     return (mi, ma)
+
+def get_box_grids_level(np.ndarray[np.float64_t, ndim=1] left_edge,
+                        np.ndarray[np.float64_t, ndim=1] right_edge,
+                        int level,
+                        np.ndarray[np.float64_t, ndim=2] left_edges,
+                        np.ndarray[np.float64_t, ndim=2] right_edges,
+                        np.ndarray[np.int32_t, ndim=2] levels,
+                        np.ndarray[np.int32_t, ndim=1] mask):
+    cdef int i, n
+    cdef int nx = left_edges.shape[0]
+    cdef int inside 
+    for i in range(nx):
+        if levels[i,0] != level:
+            mask[i] = 0
+            continue
+        inside = 1
+        for n in range(3):
+            if left_edge[n] > right_edges[i,n] or \
+               right_edge[n] < left_edges[i,n]:
+                inside = 0
+                break
+        if inside == 1: mask[i] = 1
+        else: mask[i] = 0

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list