diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 00000000..d43e6d09
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,49 @@
+cmake_minimum_required (VERSION 2.6)
+project (UQTk)
+
+# set( CMAKE_VERBOSE_MAKEFILE on ) # see all output
+include( CTest )
+
+# # set cpack for packagin
+# SET(CPACK_GENERATOR "STGZ;TGZ;TZ")
+# SET(CPACK_PACKAGE_NAME "UQTk")
+# SET(CPACK_PACKAGE_VERSION "3.0")
+# SET(CPACK_DEBIAN_PACKAGE_MAINTAINER "Bert Debusschere") #required
+# INCLUDE(CPack)
+
+
+IF(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
+ SET(CMAKE_INSTALL_PREFIX
+ "${PROJECT_BINARY_DIR}" CACHE PATH "FOO install prefix" FORCE
+ )
+ENDIF(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
+
+set(INSTALL_LIB_DIR lib )
+set(INSTALL_BIN_DIR bin )
+set(INSTALL_INCLUDE_DIR include)
+set(INSTALL_INCLUDE_DIR include)
+
+# Make relative paths absolute (needed later on)
+foreach(p LIB BIN INCLUDE)
+ set(var INSTALL_${p}_DIR)
+ if(NOT IS_ABSOLUTE "${${var}}")
+ set(${var} "${CMAKE_INSTALL_PREFIX}/${${var}}")
+ endif()
+endforeach()
+
+option(PyUQTk "PyUQTk" OFF)
+option(DFI "DFI" OFF)
+
+# CXX flags
+#set(CMAKE_CXX_FLAGS "-O2")
+set(CMAKE_CXX_FLAGS "-O2 -std=c++11")
+#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_HDF5 -I/opt/local/hdf5/include -L/opt/local/hdf5/lib -lhdf5_hl -lhdf5_cpp -lhdf5 -lhdf5_fortran -lhdf5_hl_cpp")
+
+add_definitions(-D__wsu)
+add_definitions(-fPIC)
+add_definitions(-w)
+
+add_subdirectory (dep )
+add_subdirectory (cpp )
+add_subdirectory (examples)
+add_subdirectory (PyUQTk )
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..0a041280
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,165 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/PyUQTk/.!99768!.DS_Store b/PyUQTk/.!99768!.DS_Store
new file mode 100644
index 00000000..e69de29b
diff --git a/PyUQTk/.!99769!.DS_Store b/PyUQTk/.!99769!.DS_Store
new file mode 100644
index 00000000..e69de29b
diff --git a/PyUQTk/.DS_Store b/PyUQTk/.DS_Store
new file mode 100644
index 00000000..c80ebc7c
Binary files /dev/null and b/PyUQTk/.DS_Store differ
diff --git a/PyUQTk/CMakeLists.txt b/PyUQTk/CMakeLists.txt
new file mode 100644
index 00000000..96176065
--- /dev/null
+++ b/PyUQTk/CMakeLists.txt
@@ -0,0 +1,30 @@
+project (UQTk)
+
+add_subdirectory (inference)
+add_subdirectory (plotting)
+add_subdirectory (sens)
+add_subdirectory (multirun)
+add_subdirectory (utils)
+
+SET(copy_FILES
+ __init__.py
+ )
+INSTALL(FILES ${copy_FILES}
+ PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
+ DESTINATION PyUQTk/
+)
+
+if ("${PyUQTk}" STREQUAL "ON")
+ include(numpy.cmake)
+
+ add_subdirectory (uqtkarray)
+ add_subdirectory (quad)
+ add_subdirectory (tools)
+ #add_subdirectory (kle)
+ add_subdirectory (pce)
+ add_subdirectory (bcs)
+ # add_subdirectory (mcmc)
+ #add_subdirectory (dfi)
+
+ add_subdirectory(pytests)
+endif()
diff --git a/PyUQTk/__init__.py b/PyUQTk/__init__.py
new file mode 100644
index 00000000..90cbb2c5
--- /dev/null
+++ b/PyUQTk/__init__.py
@@ -0,0 +1,77 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# swig interface modules (only compiled if PyUQTK=On)
+try:
+ import uqtkarray
+except ImportError:
+ print "PyUQTk SWIG array interface not created."
+
+try:
+ import quad
+except ImportError:
+ print "PyUQTk SWIG quad interface not created."
+
+try:
+ import tools
+except ImportError:
+ print "PyUQTk SWIG tools interface not created."
+
+try:
+ import kle
+except ImportError:
+ print "PyUQTk SWIG kle interface not created."
+
+try:
+ import pce
+except ImportError:
+ print "PyUQTk SWIG pce interface not created."
+
+try:
+ import bcs
+except ImportError:
+ print "PyUQTk SWIG bcs interface not created."
+
+try:
+ import mcmc
+except ImportError:
+ print "PyUQTk SWIG mcmc interface not created."
+
+try:
+ import dfi
+except:
+ print "PyUQTk SWIG dfi interface not created."
+
+# pure python tools (always included)
+try:
+ import inference
+ import plotting
+ import sens
+except:
+ print "Scipy and/or matplotlib may need to be installed"
+
+import utils
+import multirun
diff --git a/PyUQTk/bcs/CMakeLists.txt b/PyUQTk/bcs/CMakeLists.txt
new file mode 100644
index 00000000..4319d381
--- /dev/null
+++ b/PyUQTk/bcs/CMakeLists.txt
@@ -0,0 +1,65 @@
+FIND_PACKAGE(SWIG REQUIRED)
+INCLUDE(${SWIG_USE_FILE})
+
+FIND_PACKAGE(PythonLibs)
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH}/../../Extras/lib/python/numpy/core/include)
+
+#include source files
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/) # array classes, array input output, and array tools
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/include/) # utilities like error handlers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/) # tools like multindex, etc.
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/quad/) # quad class
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/kle/) # kle class
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/pce/) # PCSet and PCBasis classes
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/bcs/) # bcs
+
+# include dependencies
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/blas/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/lapack/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/dsfmt/) # dsfmt
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/figtree/) # figtree
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/slatec/) # slatec headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/cvode-2.7.0/include) # cvode
+INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/dep/cvode-2.7.0/include)
+INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/dep/cvode-2.7.0/include/nvector)
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../numpy/) # numpy headers
+
+SET(CMAKE_SWIG_FLAGS "")
+SET_SOURCE_FILES_PROPERTIES(bcs.i PROPERTIES CPLUSPLUS ON)
+
+# compile swig with cpp extensions
+SWIG_ADD_MODULE(
+ bcs python bcs.i
+ # array tools needed to compile misc tools source files
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arrayio.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arraytools.cpp
+
+ # source code for quad and kle class
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/quad/quad.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/kle/kle.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/bcs/bcs.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/pce/PCSet.cpp
+
+ # source code for tools
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/combin.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/gq.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/minmax.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/multiindex.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/pcmaps.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/probability.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/rosenblatt.cpp
+)
+
+# link python and 3rd party libraries, e.g., gfortran and blas
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ SWIG_LINK_LIBRARIES(bcs uqtkpce uqtktools uqtkquad uqtkarray depnvec deplapack depblas depslatec depdsfmt depann depfigtree depcvode gfortran ${PYTHON_LIBRARIES})
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel
+ SWIG_LINK_LIBRARIES(bcs uqtkpce uqtktools uqtkquad uqtkarray depnvec deplapack depblas depslatec depdsfmt depann depfigtree depcvode ifcore ${PYTHON_LIBRARIES})
+endif()
+
+INSTALL(TARGETS _bcs DESTINATION PyUQTk/)
+INSTALL(FILES ${CMAKE_BINARY_DIR}/${outdir}PyUQTk/bcs/bcs.py DESTINATION PyUQTk)
diff --git a/PyUQTk/bcs/bcs.i b/PyUQTk/bcs/bcs.i
new file mode 100644
index 00000000..2279bb0b
--- /dev/null
+++ b/PyUQTk/bcs/bcs.i
@@ -0,0 +1,139 @@
+%module(directors="1") bcs
+//=====================================================================================
+// The UQ Toolkit (UQTk) version 3.0.4
+// Copyright (2017) Sandia Corporation
+// http://www.sandia.gov/UQToolkit/
+//
+// Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+// with Sandia Corporation, the U.S. Government retains certain rights in this software.
+//
+// This file is part of The UQ Toolkit (UQTk)
+//
+// UQTk is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// UQTk is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with UQTk. If not, see .
+//
+// Questions? Contact Bert Debusschere
+// Sandia National Laboratories, Livermore, CA, USA
+//=====================================================================================
+
+%{
+#define SWIG_FILE_WITH_INIT
+#include
+#include
+#include
+#include
+#include
+// #include "../../cpp/lib/array/Array1D.h"
+// #include "../../cpp/lib/array/Array2D.h"
+// #include "../../cpp/lib/array/arrayio.h"
+// #include "../../cpp/lib/array/arraytools.h"
+// #include "../../cpp/lib/tools/combin.h"
+// #include "../../cpp/lib/tools/gq.h"
+// #include "../../cpp/lib/tools/minmax.h"
+// #include "../../cpp/lib/tools/multiindex.h"
+// #include "../../cpp/lib/tools/pcmaps.h"
+// #include "../../cpp/lib/tools/probability.h"
+// #include "../../cpp/lib/tools/rosenblatt.h"
+
+// #include "../../cpp/lib/quad/quad.h"
+// #include "../../cpp/lib/kle/kle.h"
+// #include "../../cpp/lib/pce/PCBasis.h"
+// #include "../../cpp/lib/pce/PCSet.h"
+#include "../../cpp/lib/bcs/bcs.h"
+
+%}
+
+/*************************************************************
+// Standard SWIG Templates
+*************************************************************/
+
+// Include standard SWIG templates
+// Numpy array templates and wrapping
+%include "pyabc.i"
+%include "../numpy/numpy.i"
+%include "std_vector.i"
+%include "std_string.i"
+%include "cpointer.i"
+
+%init %{
+ import_array();
+%}
+
+%pointer_functions(double, doublep);
+
+/*************************************************************
+// Numpy SWIG Interface files
+*************************************************************/
+
+// // Basic typemap for an Arrays and its length.
+// // Must come before %include statement below
+
+// // For Array1D setnumpyarray4py function
+// %apply (long* IN_ARRAY1, int DIM1) {(long* inarray, int n)}
+// %apply (double* IN_ARRAY1, int DIM1) {(double* inarray, int n)}
+// // get numpy int and double array
+// %apply (long* INPLACE_ARRAY1, int DIM1) {(long* outarray, int n)}
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* outarray, int n)}
+
+// // For Array2D numpysetarray4py function
+// %apply (double* IN_FARRAY2, int DIM1, int DIM2) {(double* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (double* INPLACE_FARRAY2, int DIM1, int DIM2) {(double* outarray, int n1, int n2)}
+// // For Array2D numpysetarray4py function
+// %apply (long* IN_FARRAY2, int DIM1, int DIM2) {(long* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (long* INPLACE_FARRAY2, int DIM1, int DIM2) {(long* outarray, int n1, int n2)}
+
+
+// // For mcmc test to get log probabilities
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* l, int n)}
+
+/*************************************************************
+// Include header files
+*************************************************************/
+
+// // The above typemap is applied to header files below
+// %include "../../cpp/lib/array/Array1D.h"
+// %include "../../cpp/lib/array/Array2D.h"
+// %include "../../cpp/lib/array/arrayio.h"
+// %include "../../cpp/lib/array/arraytools.h"
+// %include "../../cpp/lib/tools/combin.h"
+// %include "../../cpp/lib/tools/gq.h"
+// %include "../../cpp/lib/tools/minmax.h"
+// %include "../../cpp/lib/tools/multiindex.h"
+// %include "../../cpp/lib/tools/pcmaps.h"
+// %include "../../cpp/lib/tools/probability.h"
+// %include "../../cpp/lib/tools/rosenblatt.h"
+
+// %include "../../cpp/lib/quad/quad.h"
+// %include "../../cpp/lib/kle/kle.h"
+// %include "../../cpp/lib/pce/PCBasis.h"
+// %include "../../cpp/lib/pce/PCSet.h"
+%include "../../cpp/lib/bcs/bcs.h"
+
+// // Typemaps for standard vector
+// // Needed to prevent to memory leak due to lack of destructor
+// // must use namespace std
+// namespace std{
+// %template(dblVector) vector;
+// %template(intVector) vector;
+// %template(strVector) vector;
+
+// }
+
+
+%include "bcs_ext.py"
+
+
+
+
diff --git a/PyUQTk/bcs/bcs_ext.py b/PyUQTk/bcs/bcs_ext.py
new file mode 100644
index 00000000..182342e2
--- /dev/null
+++ b/PyUQTk/bcs/bcs_ext.py
@@ -0,0 +1,171 @@
+%pythoncode %{
+
+import numpy as np
+import matplotlib.pyplot as mpl
+import uqtkarray as uqtkarray
+import pce as uqtkpce
+import tools as uqtktools
+from uqtkarray import uqtk2numpy, numpy2uqtk
+# BCS already added to path in compilation and install
+
+
+class bcsreg:
+ '''
+ Class to compute the bcs regression coefficients for a scalar function of ndim dimensions.
+ '''
+ def __init__(self,ndim,pcorder,pctype):
+ '''
+ Construction has the following inputs:
+ ndim : (int) number of input dimensions (features)
+ pcorder : (int) the initial order of the polynomial (changes in the algorithm)
+ pctype : ('LU','HG') type of polynomial basis functions, e.g., Legendre, Hermite
+
+ '''
+ self.ndim = ndim # int
+ self.pcorder = pcorder # int
+ self.pctype = pctype # 'LU', 'HG'
+
+ # generate multi index
+ self.__mindex_uqtk = uqtkarray.intArray2D()
+ uqtktools.computeMultiIndex(self.ndim,self.pcorder,self.__mindex_uqtk);
+ self.mindex = uqtk2numpy(self.__mindex_uqtk)
+ self.__mindex0_uqtk = self.__mindex_uqtk # keep original
+
+ # get projection/ Vandermonde matrix
+ self.__Phi_uqtk = uqtkarray.dblArray2D()
+
+ # check if compiled
+ self.__compiled = False
+ self.compile()
+
+ def compile(self,l_init=0.0,adaptive=0,optimal=1,scale=.1,verbose=0):
+ '''
+ Setting up variables for the BCS algorithm. Most of the variables do not need to be set. Default settings are sufficient for more cases. See the C++ code for more information about variables.
+ '''
+ # now we begin BCS routine
+ # set work variables
+ self.__newmindex_uqtk = uqtkarray.intArray2D() # for uporder iteration
+ self.__sigma2_p = uqtktools.new_doublep() # initial noise variance
+ self.__lambda_init = uqtkarray.dblArray1D() # hierarchical prior parameter
+ self.__adaptive, self.__optimal, self.__scale, self.__verbose = adaptive,optimal,scale,verbose
+ self.__weights_uqtk = uqtkarray.dblArray1D() # weights/ coefficients for basis
+ self.__used_uqtk = uqtkarray.intArray1D() # index of weights retained (nonzero)
+ self.__errbars_uqtk = uqtkarray.dblArray1D() # error bars for each weight
+ self.__nextbasis_uqtk = uqtkarray.dblArray1D() # if adaptive
+ self.__alpha_uqtk = uqtkarray.dblArray1D() # prior hyperparameter (1/gamma)
+ self.__lambda_p = uqtktools.new_doublep()
+
+ uqtktools.doublep_assign(self.__lambda_p,l_init)
+
+ self.__compiled = True
+
+ def leastsq(self,X,y):
+ '''
+ perform simple least squares based on the original
+ pc order.
+ '''
+ # convert input to uqtk arrays
+ self.__X_uqtk = numpy2uqtk(X)
+ self.__y_uqtk = numpy2uqtk(y)
+
+ # get vandermonde matrix w.r.t. original pc basis
+ self.__V_uqtk = uqtkarray.dblArray2D()
+ self.__pcmodel0 = uqtkpce.PCSet("NISPnoq",self.__mindex0_uqtk,self.pctype,0.0,1.0) # initiate
+ self.__pcmodel0.EvalBasisAtCustPts(self.__X_uqtk,self.__V_uqtk)
+ self.Vandermonde = uqtk2numpy(self.__V_uqtk)
+ self.__sol = np.linalg.lstsq(self.Vandermonde,y)
+ return self.__sol[0], self.__sol[1]
+
+ def fit(self,X,y,tol=1e-8,sigsq=None,upit=0):
+ '''
+ Train bcs model coefficients with X and y data
+ X : 2d numpy of inputs/ feature data
+ y : 1d numpy array of labels/ outputs
+ tol : tolerance (smaller means we keep more coefficients)
+ sigsq : initial noise set automatically based on y data
+ upit : (int) number of iterations to add higher order terms
+
+ returns the polynomial coefficient (weights), and the mulitindex. One can also return the sensitivity indices by calling self.sens
+ '''
+ if self.__compiled == False:
+ print "Need to compile first!"
+
+ # convert numpy test data into uqtk data types
+ self.__X_uqtk = numpy2uqtk(X)
+ self.__y_uqtk = numpy2uqtk(y)
+ self.Xtrain = X
+ self.ytrain = y
+
+ if sigsq == None:
+ self.__sigma2 = np.var(y)/1e2
+ else: self.__sigma2 = sigsq
+ uqtktools.doublep_assign(self.__sigma2_p,self.__sigma2)
+
+ self.__tol = tol
+ self.__upit = upit
+
+ # begin uporder iterations
+ for iter in range(self.__upit+1):
+
+ # get projection/ Vandermonde matrix
+ self.__pcmodel = uqtkpce.PCSet("NISPnoq",self.__mindex_uqtk,self.pctype,0.0,1.0) # initiate with new mindex
+ self.__pcmodel.EvalBasisAtCustPts(self.__X_uqtk,self.__Phi_uqtk)
+ self.__Phi = uqtk2numpy(self.__Phi_uqtk)
+
+ # resest sigma parameter (if not, may get seg fault)
+ uqtktools.doublep_assign(self.__sigma2_p,self.__sigma2)
+
+ # change to uqtkbcs.BCS if testing outside source
+ BCS(self.__Phi_uqtk,self.__y_uqtk,self.__sigma2_p,self.__tol,self.__lambda_init,self.__adaptive,self.__optimal,self.__scale,self.__verbose,self.__weights_uqtk,self.__used_uqtk,self.__errbars_uqtk,self.__nextbasis_uqtk,self.__alpha_uqtk,self.__lambda_p)
+
+ # add new mulitindex to newmindex
+ uqtkarray.subMatrix_row_int(self.__mindex_uqtk,self.__used_uqtk,self.__newmindex_uqtk)
+
+ if iter < self.__upit :
+ # redefine mindex = newmindex if still iterating
+ self.__newmindex_added_uqtk = uqtkarray.intArray2D()
+ uqtktools.upOrder(self.__newmindex_uqtk,self.__newmindex_added_uqtk)
+ self.__mindex_uqtk = self.__newmindex_added_uqtk
+ print "New mindex basis: ", uqtk2numpy(self.__mindex_uqtk)[len(self.__newmindex_uqtk):]
+
+ # return new multiindex to create new pce model
+ self.__pcmodel_new = uqtkpce.PCSet("NISPnoq",self.__newmindex_uqtk,self.pctype,0.0,1.0)
+ self.mindex = uqtk2numpy(self.__newmindex_uqtk)
+ eff_dim = self.ndim - sum(sum(self.mindex,0) == 0)
+
+ self.weights = uqtk2numpy(self.__weights_uqtk)
+ self.weight_index = uqtk2numpy(self.__used_uqtk)
+ self.error_bars = uqtk2numpy(self.__errbars_uqtk)
+
+ # get main effect sensitivity indices
+ self.__main_eff_uqtk = uqtkarray.dblArray1D()
+ self.__tot_eff_uqtk = uqtkarray.dblArray1D()
+ self.__joint_eff_uqtk = uqtkarray.dblArray2D()
+ self.__pcmodel_new.ComputeMainSens(self.__weights_uqtk,self.__main_eff_uqtk)
+ self.__pcmodel_new.ComputeTotSens(self.__weights_uqtk,self.__tot_eff_uqtk)
+ self.__pcmodel_new.ComputeJointSens(self.__weights_uqtk,self.__joint_eff_uqtk)
+ self.main_eff = uqtk2numpy(self.__main_eff_uqtk)
+ self.tot_eff = uqtk2numpy(self.__tot_eff_uqtk)
+ self.joint_eff = uqtk2numpy(self.__joint_eff_uqtk)
+ self.senssum = {"main effect": self.main_eff, "total effect": self.tot_eff, "joint effect": self.joint_eff}
+ return self.weights, self.mindex
+
+ def predict(self,Xtest):
+ '''
+ Predict values after training the data
+ Xtest : 2d numpy array
+
+ returns 1d numpy scalar array of predictions
+ '''
+ self.__Xtest_uqtk = numpy2uqtk(Xtest)
+ self.__ytest_uqtk = uqtkarray.dblArray1D()
+ self.__pcmodel_new.EvalPCAtCustPoints(self.__ytest_uqtk,self.__Xtest_uqtk,self.__weights_uqtk)
+ self.__ytest = uqtk2numpy(self.__ytest_uqtk)
+ return self.__ytest
+ def getsens(self):
+ '''
+ return sensitivities as dictionary
+ '''
+ return self.senssum
+
+%}
diff --git a/PyUQTk/inference/CMakeLists.txt b/PyUQTk/inference/CMakeLists.txt
new file mode 100644
index 00000000..e972b60d
--- /dev/null
+++ b/PyUQTk/inference/CMakeLists.txt
@@ -0,0 +1,12 @@
+project (UQTk)
+
+SET(copy_FILES
+ __init__.py
+ mcmc.py
+ postproc.py
+ )
+
+INSTALL(FILES ${copy_FILES}
+ PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
+ DESTINATION PyUQTk/inference
+)
diff --git a/PyUQTk/inference/__init__.py b/PyUQTk/inference/__init__.py
new file mode 100755
index 00000000..160bea8d
--- /dev/null
+++ b/PyUQTk/inference/__init__.py
@@ -0,0 +1,28 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+import mcmc
+import postproc
diff --git a/PyUQTk/inference/arch/mcmc.py b/PyUQTk/inference/arch/mcmc.py
new file mode 100755
index 00000000..74e91b29
--- /dev/null
+++ b/PyUQTk/inference/arch/mcmc.py
@@ -0,0 +1,411 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+import numpy as npy
+import scipy.stats
+import scipy.linalg
+import math
+import matplotlib.pyplot as plt
+
+global Rmat,invRmat
+
+
+#---------------------------------------------------------------------------------------
+# Simple Hamiltonian MCMC routine
+# Uses Leapfrog for the time stepping
+#---------------------------------------------------------------------------------------
+def HMCMC(U,grad_U,dt,nT,q):
+ '''
+ Hamiltonian MCMC routine
+
+ Input:
+ -----
+
+ U - potential energy function, -log(posterior)
+ grad_U - gradient of potential energy function
+ dt - time step, dt, for leapfrog method
+ nT - number of time steps in leapfrog method
+ q - initial state of chain (position vector)
+
+ Output:
+ ------
+ Next vector in the chain state
+
+ Example:
+ -------
+ q_next = HMCMC(U,grad_U,1e-2,25,q_current)
+
+ '''
+ current_q = npy.copy(q) # save current
+
+ # generate current p
+ # propcov = 4*array([[ 0.01175383, 0.02065261],[ 0.02065261, 0.04296117]])
+ p = npy.random.randn(len(current_q))
+ # p = random.multivariate_normal([0,0],propcov)
+ current_p = npy.copy(p) # save current p
+
+ # make half step for momentum used for leap frog step
+ p = p - dt * grad_U(q)/2.0
+
+ for i in range(nT):
+ # p = p - dt * grad_U(q)/2.0
+ q = q + dt*p
+ # p = p - dt * grad_U(q)/2.0
+ if (i != nT-1): p = p - dt*grad_U(q)
+
+ # make a half step for momentum at the end
+ p = p - dt * grad_U(q)/2.0
+
+ # negate the momentum to make a symmetric proposal
+ p = -p
+
+ # Evaluate potential and kinetic energy
+ current_U = U(current_q)[0]
+ current_K = npy.sum(current_p**2)/2.0
+ proposed_U = U(q)[0]
+ proposed_K = npy.sum(p**2)/2.0
+
+ # Accept or reject the state at end of trajectory, returning either
+ # the position at the end of the trajectory or the initial position
+
+ if (npy.log(npy.random.rand()) < current_U-proposed_U+current_K-proposed_K):
+ return q
+ else:
+ return current_q
+
+
+#---------------------------------------------------------------------------------------
+# Example:
+# 1. Banana-shaped posterior density
+#---------------------------------------------------------------------------------------
+def norm_pdf_multivariate(x, mu, sigma):
+ """
+ Multi-variate normal pdf
+ x : list or numpy array
+ mu : 1D numpy array
+ sigma: 2D numpy array"""
+ size = len(x)
+ if size == len(mu) and (size, size) == sigma.shape:
+ det = npy.linalg.det(sigma)
+ if det == 0:
+ raise NameError("The covariance matrix can't be singular")
+ norm_const = 1.0/ ( math.pow((2*npy.pi),float(size)/2) * math.pow(det,1.0/2) )
+ x_mu = npy.matrix(x - mu)
+ inv = npy.linalg.inv(sigma)
+ result = math.pow(math.e, -0.5 * (x_mu * inv * x_mu.T))
+ return norm_const * result
+ else:
+ raise NameError("The dimensions of the input don't match")
+
+def tranB(x1,x2,a):
+ """
+ Coordinate transform for banana-shaped pdf
+ x1,x2: 2D numpy arrays
+ a: list containing the transform factors
+ """
+ a1 = a[0]; a2 = a[1];
+ y1 = a1*x1;
+ y2 = x2/a1 - a2*(y1**2 + a1**2);
+ return y1,y2
+
+def invTranB(x1,x2,a):
+ """ Inverse coordinate transform for banana-shaped pdf
+ x1,x2: 2D numpy arrays
+ a: list containing the transform factors
+ """
+ a1 = a[0]; a2 = a[1];
+ y1 = x1/a1;
+ y2 = x2*a1 + a1*a2*(x1**2 + a1**2);
+ return y1,y2
+
+def plotBanana():
+ """
+ Plot banana-shaped function; parameters are hard-wired
+ """
+ xb,yb = npy.mgrid[-3:3:.05, -11:1:.05]
+ x, y = invTranB(xb,yb,[1,1])
+ pos = npy.empty(x.shape + (2,))
+ pos[:, :, 0] = x; pos[:, :, 1] = y
+ mu = npy.array([0.0,0.0])
+ cov = npy.array([[1.0, 0.9], [0.9, 1.0]])
+ z = x.copy()
+ for i in range(x.shape[0]):
+ for j in range(x.shape[1]):
+ z[i,j] = norm_pdf_multivariate([x[i,j],y[i,j]], mu, cov)
+ plt.contour(xb,yb,z,50)
+ plt.show()
+ return
+
+def postBanana(spl,postinfo):
+ """
+ Posterior density for banana-shaped pdf
+ postinfo : setup for the posterior density
+
+ """
+ afac = postinfo['afac']
+ mu = postinfo['mu' ]
+ cov = postinfo['cov' ]
+ xb,yb = spl ;
+ x, y = invTranB(xb,yb,afac) ;
+ return npy.log(norm_pdf_multivariate([x,y], mu, cov))
+
+#---------------------------------------------------------------------------------------
+# DRAM
+#---------------------------------------------------------------------------------------
+def logPropRatio(iq,spls):
+ """
+ Gaussian n:th stage log proposal ratio
+ log of q_i(y_n,..,y_n-j) / q_i(x,y_1,...,y_j)
+ """
+ global invRmat
+ stage = len(spls)-1;
+ if stage == iq:
+ return (0.0); # symmetric
+ else:
+ iRmat = invRmat[iq-1]; # proposal^(-1/2)
+ y1 = spls[0] ; # y1
+ y2 = spls[iq] ; # y_i
+ y3 = spls[stage ] ; # y_n
+ y4 = spls[stage-iq] ; # y_(n-i)
+ return (-0.5*(npy.linalg.norm(npy.dot(y4-y3,iRmat))**2-npy.linalg.norm(npy.dot(y2-y1,iRmat))**2));
+
+def logPostRatio(p1,p2):
+ return (p2-p1);
+
+def getAlpha(spls,post):
+ stage = len(spls) - 1;
+ a1 = 1.0; a2 = 1.0;
+ for k in range(1,stage):
+ a1 = a1*(1-getAlpha(spls[:k+1],post[:k+1]));
+ a2 = a2*(1-getAlpha(spls[-1:-(k+1):-1],post[-1:-(k+1):-1]));
+ if a2 == 0.0:
+ return (0.0);
+ y = logPostRatio(post[0],post[-1]);
+ for k in range(1,stage+1):
+ y = y + logPropRatio(k,spls);
+ return min(1.0, npy.exp(y)*a2/a1);
+
+def ucov(spl,splmean,cov,lastup):
+ #
+ # update covariance
+ #
+ if len(spl.shape) == 1:
+ nspl = 1;
+ ndim = spl.shape[0];
+ else:
+ (nspl,ndim)=spl.shape;
+ if nspl>0:
+ for i in range(nspl):
+ iglb = lastup+i;
+ splmean = (iglb*splmean+spl[i])/(iglb+1);
+ rt = (iglb-1.0)/iglb;
+ st = (iglb+1.0)/iglb**2;
+ cov = rt*cov+st*npy.dot(npy.reshape(spl[i]-splmean,(ndim,1)),npy.reshape(spl[i]-splmean,(1,ndim)))
+ return lastup+nspl,splmean,cov
+
+def dram_ex(method,nsteps):
+ # define MCMC parameters
+ cini = npy.array([-1.0,-4.0])
+ spllo = npy.array([-4.0,-12.0])
+ splhi = npy.array([ 4.0, 2.0])
+ cvini = npy.array([[0.1,0.0],[0.0,0.1]])
+ opts={'method':method,'nsteps':nsteps,'nburn':1000,'nadapt':100,'nfinal':10000000,
+ 'inicov':cvini,'coveps':1.e-10,'burnsc':5,'ndr':2,'drscale':[5,4,3],
+ 'spllo':spllo,'splhi':splhi}
+ lpinfo={'afac':[1.0,1.0],'cov': npy.array([[1,0.9],[0.9,1]]),'mu':npy.array([0.0,0.0])}
+ sol=dram(opts,cini,postBanana,lpinfo)
+ return sol
+
+def dram(opts,cini,likTpr,lpinfo):
+ """
+ #
+ # DRAM
+ #
+ Delayed Rejection Adaptive MCMC
+ opts - dictionary of parameters for DRAM
+ method : either 'am' (adaptive metropolis) or 'dram' (am+delayed rejection)
+ nsteps : no. of mcmc steps
+ nburn : no. of mcmc steps for burn-in (proposal fixed to initial covariance)
+ nadapt : adapt every nadapt steps after nburn
+ nfinal : stop adapting after nfinal steps
+ inicov : initial covariance
+ coveps : small additive factor to ensure covariance matrix is positive definite
+ burnsc : factor to scale up/down proposal is acceptance rate is too high/low
+ ndr : no. of delayed rejection steps (if dram is requested)
+ drscale: scale factors for delayed rejection
+ cini - starting mcmc state
+ likTpr - log-posterior function
+ lpinfo - dictionary with settings that will be passed to the log-posterior function
+
+ """
+ # -------------------------------------------------------------------------------
+ # Parse options
+ # -------------------------------------------------------------------------------
+ if 'method' in opts:
+ method = opts['method']
+ else:
+ print 'Error in dram: method unspecified !'; quit()
+ nsteps = opts['nsteps']
+ nburn = opts['nburn' ]
+ nadapt = opts['nadapt']
+ nfinal = opts['nfinal']
+ inicov = opts['inicov']
+ coveps = opts['coveps']
+ burnsc = opts['burnsc']
+ spllo = opts['spllo' ]
+ splhi = opts['splhi' ]
+ if method=='dram':
+ ndr = opts['ndr']
+ drscale = opts['drscale']
+ rej = 0;
+ rejlim = 0;
+ rejsc = 0;
+ # -------------------------------------------------------------------------------
+ # Pre-processing
+ # -------------------------------------------------------------------------------
+ cdim = cini.shape[0]; # chain dimensionality
+ cov = npy.zeros((cdim,cdim)); # covariance matrix
+ spls = npy.zeros((nsteps,cdim)); # MCMC samples
+ na = 0; # counter for accepected jumps
+ sigcv = 2.4/npy.sqrt(cdim); # covariance factor
+ spls[0] = cini; # initial sample set
+ p1 = likTpr(spls[0],lpinfo); # and
+ pmode = p1; # store chain MAP
+ cmode = spls[0];
+ nref = 0;
+ for k in range(nsteps-1):
+ #
+ # Deal with covariance matrix
+ #
+ covMatUpd = False
+ if k == 0:
+ splmean = spls[0];
+ propcov = inicov ;
+ Rchol = scipy.linalg.cholesky(propcov) ;
+ lastup = 1; # last covariance update
+ covMatUpd = True ;
+ else:
+ if (nadapt>0) and ((k+1)%nadapt)==0:
+ if k0.95:
+ Rchol = Rchol/burnsc # scale down proposal
+ covMatUpd = True ;
+ print "Scaling down the proposal at step",k
+ elif float(rejsc)/nref<0.05:
+ Rchol = Rchol*burnsc # scale up proposal
+ covMatUpd = True ;
+ print "Scaling up the proposal at step",k
+ nref = 0 ;
+ rejsc = 0 ;
+ else:
+ lastup,splmean,cov=ucov(spls[lastup:lastup+nadapt,:],splmean,cov,lastup)
+ try:
+ Rchol = scipy.linalg.cholesky(cov)
+ except scipy.linalg.LinAlgError:
+ try:
+ # add to diagonal to make the matrix positive definite
+ Rchol = scipy.linalg.cholesky(cov+coveps*npy.identity(cdim))
+ except scipy.linalg.LinAlgError:
+ print "Covariance matrix is singular even after the correction"
+ Rchol = Rchol*sigcv
+ covMatUpd = True ;
+ if (method == 'dram') and covMatUpd:
+ Rmat = [Rchol]; invRmat = [scipy.linalg.inv(Rchol)]
+ for i in range(1,ndr):
+ Rmat.append(Rmat[i-1]/drscale[i-1])
+ invRmat.append(invRmat[i-1]*drscale[i-1])
+ #-Done with covariance matrix
+ nref = nref + 1 ;
+ #
+ # generate proposal and check bounds
+ #
+ u = spls[k]+npy.dot(npy.random.randn(1,cdim),Rchol)[0];
+ if npy.any(npy.less(u,spllo)) or npy.any(npy.greater(u,splhi)):
+ outofbound = True
+ accept = False
+ p2 = -1.e6
+ else:
+ outofbound = False
+ if not outofbound:
+ p2 = likTpr(u,lpinfo);
+ pr = npy.exp(p2-p1);
+ if (pr>=1.0) or (npy.random.random_sample()<=pr):
+ spls[k+1] = u.copy();
+ p1 = p2;
+ if p1 > pmode:
+ pmode = p1 ;
+ cmode = spls[k+1] ;
+ accept = True
+ else:
+ accept = False
+ #
+ # See if we can do anything about a rejected proposal
+ #
+ if not accept:
+ if (method == 'am'):
+ # if 'am' then reject
+ spls[k+1]=spls[k];
+ rej = rej + 1;
+ rejsc = rejsc + 1;
+ if outofbound:
+ rejlim = rejlim + 1;
+ elif (method == 'dram'):
+ # try delayed rejection
+ tryspls = [spls[k].copy(),u.copy()]
+ trypost = [p1,p2]
+ jdr = 1
+ while (not accept) and (jdr= 1.0) or (npy.random.random_sample() < alpha):
+ accept = True;
+ spls[k+1] = u.copy();
+ p1 = p2;
+ if p1 > pmode:
+ pmode = p1 ;
+ cmode = spls[k+1] ;
+ if not accept:
+ spls[k+1]=spls[k] ;
+ rej = rej + 1;
+ rejsc = rejsc + 1;
+ if outofbound:
+ rejlim = rejlim + 1;
+ else:
+ print "Unknown MCMC method ",method," -> Quit\n"; quit()
+ # Done with if over methods
+ # Done with if over original accept
+ # Done loop over all steps
+ return (spls,[cmode,pmode],[1.0-float(rej)/nsteps,1.0-float(rejlim)/nsteps],[rej,rejlim])
diff --git a/PyUQTk/inference/arch/postproc.py b/PyUQTk/inference/arch/postproc.py
new file mode 100755
index 00000000..a248bb87
--- /dev/null
+++ b/PyUQTk/inference/arch/postproc.py
@@ -0,0 +1,781 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+#
+# Does statistical analysis on samples from an MCMC chain.
+
+
+import os
+import sys
+import string
+import numpy as np
+import getopt
+import math
+import matplotlib.pyplot as plt
+from scipy import stats, mgrid, c_, reshape, random, rot90
+from matplotlib.ticker import MultipleLocator, FormatStrFormatter
+
+#import pyUQTk.utils as ut
+
+##################################################################################
+# Compute autocorrelation a one-dimensional set of samples
+# Main function is acor(X), where X is a numpy array of samples
+##################################################################################
+from numpy import *
+from matplotlib.pyplot import *
+def acor_in(X, MAXLAG, WINMULT):
+
+ # compute mean of X
+ L = len(X)
+ mu = mean(X)
+ Xhat = X - mu
+ std = sqrt(var(X))
+
+ iMax = L - MAXLAG
+
+ # compute autocorrelation
+ # sind = arange(MAXLAG+1)
+ iind = arange(iMax)
+ C = zeros(MAXLAG + 1)
+ for s in range(MAXLAG+1):
+ C[s] += sum(Xhat[iind]*Xhat[iind+s])
+ C *= 1./iMax
+
+ D = C[0] # diffusion coeff
+ D += 2*sum(C[1:])
+
+ sigma = sqrt(abs(D/L))
+ tau = D/C[0]
+
+ # print D, L, sigma, tau, tau*WINMULT, MAXLAG
+ return C[0], D, L, sigma, tau, tau*WINMULT, Xhat
+# take in 1d numpy array of samples X
+def acor(X,MAXLAG = 10, WINMULT = 5):
+ C0, D, L, sigma, tau, tauWINMULT, X = acor_in(X, MAXLAG, WINMULT)
+ # print tau, sigma
+ Ls = []
+ S = []
+ while tau*WINMULT >= MAXLAG:
+ Lh = L/2
+ Ls.append(Lh)
+ j1,j2 = 0,1
+ for i in range(Lh):
+ X[i] = X[j1] + X[j2]
+ j1 += 2
+ j2 += 2
+ _, D, L, sigma, tau, tauWINMULT, X = acor_in(X[:Lh], MAXLAG, WINMULT)
+ S.append(sigma)
+ if len(S) == 0:
+ S.append(sigma)
+ Ls.append(L)
+
+ sigma = S[-1]
+ Ls = 2*array(Ls[::-1])
+ for i in range(len(S)):
+ D = .25 * sigma**2 * Ls[i]
+ tau = D/C0
+ sigma = sqrt(D/Ls[i])
+
+ return tau
+
+###################################################################################################
+def read_remaining_lines(samples_file,n_burnin,stride):
+ """Read all remaining lines in MCMC sample filename, leaving out the first n_burnin,
+ and only one in every stride lines after that
+ """
+ samples_list = []
+ line_no = 0
+ done = 0
+ while not done:
+ line = samples_file.readline()
+ if (line == ""):
+ done = 1
+ else:
+ line_no += 1
+ if (line_no > n_burnin and (line_no - n_burnin) % stride == 0):
+ records = line.split()
+ num_records = [float(s) for s in records]
+ samples_list.append(num_records)
+
+ return samples_list
+
+###################################################################################################
+def remove_MAP_line(samples_list,debug):
+ """Remove the last line if is has a value < 0 (i.e. -1) in the acceptance_prob column (next to last)"""
+ if(samples_list[-1][-2] < 0):
+ # Remove the last line
+ del samples_list[-1]
+ if (debug > 0):
+ print "The last sample line has been deleted as it contained the MAP values"
+
+###################################################################################################
+def extract_vars(samples_file_name,n_burnin,v_names,debug,stride=1):
+ """From a file with samples in ascii format, with
+ the first line containing the label for each column, extract
+ the columns with the labels in v_names and return them
+ in a numpy array. Remove n_burnin samples from the top.
+ Only read one in every stride number of lines after that.
+ Assumes that the first column is the MCMC step number, the next to last column is the acceptance
+ probability for each step, and the last column is the posterior probability for each step. The
+ last line is removed if it contains -1 for the acceptance probability (which means this
+ line contains the MAP values)"""
+
+ # Open text file with all samples,
+ samples_file = open(samples_file_name,"r")
+
+ # Extract first line with the column labels and find the column
+ # numbers corresponding to the variables of interest.
+ labels_line = samples_file.readline().rstrip('\n')
+ col_labels = [lbl for lbl in labels_line.split()]
+
+ v_indices = []
+ for s_v in v_names:
+ try:
+ i_v = col_labels.index(s_v)
+ v_indices.append(i_v)
+ except ValueError:
+ print "Variable", s_v, "is not found in the list of labels", col_labels
+ sys.exit(1)
+
+ if (debug > 0):
+ print "Column labels in file",samples_file_name,"are:",col_labels
+ for i_v in range(len(v_names)):
+ print "The column number of",v_names[i_v],"is:",v_indices[i_v]
+
+ # Read subsequent lines
+ samples_list = read_remaining_lines(samples_file,n_burnin,stride)
+
+ # Close the file
+ samples_file.close()
+
+ # Remove MAP values, if present
+ remove_MAP_line(samples_list,debug)
+
+ # Convert list to array
+ steady_samples = np.array(samples_list)
+
+
+ # Extract all columns of interest
+ samples_cols = []
+ for i_v in v_indices:
+ samples_cols.append(steady_samples[:,i_v])
+
+ samples = np.array(samples_cols).T
+ if (debug > 0):
+ print "Shape of samples array:",samples.shape
+
+ n_samples = len(samples[:,0])
+ n_vars = len(samples[0,:])
+
+ if (debug > 0):
+ print "Read in", n_samples, "regular samples of", n_vars, "variables from file", samples_file_name
+
+ return samples
+
+###################################################################################################
+def extract_all_vars(samples_file_name,n_burnin,debug,stride=1,labels=True):
+ """Extract samples and labels from an MCMC chain file.
+ Returns a numpy array with the samples, and a list of column labels.
+ Assumes the following:
+ * The file is in ASCII format
+ * The first column contains the MCMC step number
+ * The next to last column contains the acceptance probability for the jump proposed in this step
+ * The last column contains the posterior probability of the state in this step
+ * The columns in between contain the sampled states
+ * Unless the argument labels == False, the first line contains labels for each column
+ * If the last line has -1 in the acceptance probability column, then this line contains
+ the MAP values. This line is removed before returning the samples to the calling program.
+ Arguments:
+ * samples_file_name: name of file to parse
+ * n_burnin: number of lines to skip from the top
+ * debug: higher values are more verbose in output
+ * stride: stride to take in parsing sample lines. [default = 1]
+ * labels: True if the file contains column labels in first line. False if not. [default = True]
+ If not column labels are present, they are manufactured as aa, ab, ac, ..., az, ba, bb, ...
+ """
+
+ # Open text file with all samples
+ samples_file = open(samples_file_name,"r")
+
+
+ if (labels): # Column labels are present in first line
+ # Extract first line with the column labels
+ labels_line = samples_file.readline().rstrip('\n')
+ col_labels = [lbl for lbl in labels_line.split()]
+
+ # Identify the MCMC vars, knowing that the first column is the step
+ # number and the last two columns are acceptance and posterior prob
+ n_cols = len(col_labels)
+ n_vars = n_cols - 3
+
+ v_names = col_labels[1:1+n_vars]
+
+ if (debug > 0):
+ print "Column labels in file", samples_file_name, "are:", col_labels
+ print "MCMC chain variables are", v_names
+ else:
+ # Extract first line to see how many columns we have
+ first_line = samples_file.readline().rstrip('\n')
+ first_line_items = [item for item in first_line.split()]
+
+ # Identify the MCMC vars, knowing that the first column is the step
+ # number and the last two columns are acceptance and posterior prob
+ n_cols = len(first_line_items)
+ n_vars = n_cols - 3
+
+ # Generate variable names as aa, ab, ..., az, ba, bb, ...
+ if (n_vars > 52*26): # only 52 entries in string.letters. If need be, could go higher by allowing aA, aB, ... , Aa, ...
+ print "In routine extract_all_vars: too many columns for automatic name generation"
+ sys.exit(1)
+
+ v_names = []
+ for i_v in range(n_vars):
+ name = ""
+ name += string.letters[i_v/26]
+ name += string.letters[i_v%26]
+ v_names.append(name)
+
+ if (debug > 0):
+ print "There are",n_cols," columns in file", samples_file_name
+ print "MCMC chain variables have been labeled", v_names
+
+ # Rewind file so the first line will be read just like the other sample lines
+ samples_file.seek(0)
+
+ # Read subsequent lines
+ samples_list = read_remaining_lines(samples_file,n_burnin,stride)
+
+ # Close the file
+ samples_file.close()
+
+ # Remove MAP values, if present
+ remove_MAP_line(samples_list,debug)
+
+ # Convert list to array
+ samples = np.array(samples_list)
+
+ n_samples = samples.shape[0]
+
+ if (debug > 0):
+ print "Read in", n_samples, "regular samples of", n_vars, "variables from file", samples_file_name
+
+ return samples, v_names
+
+###################################################################################################
+def effective_sample_sizes(var_samples,par_mean,par_cov):
+ """Computes the effective sample size for each column
+ by dividing the number of samples by the integral
+ of the autocorrelation between the samples. (i.e. the more
+ correlated successive samples are, the less independent samples there are
+ in the chain.)
+ The algorithm is based on:
+ Markov Chain Monte Carlo in Practice: A Roundtable Discussion
+ Robert E. Kass, Bradley P. Carlin, Andrew Gelman and Radford M. Neal
+ The American Statistician, Vol. 52, No. 2 (May, 1998), pp. 93-100
+ Published by: American Statistical Association
+ Article DOI: 10.2307/2685466
+ Article Stable URL: http://www.jstor.org/stable/2685466
+ """
+
+ # Number of variable samples in set
+ n_sam = var_samples.shape[0]
+ # Number of variables in this sample set
+ n_vars = var_samples.shape[1]
+
+ # Array to store effective sample sizes in
+ ess = []
+
+ # Cut-off point for autocorrelation
+ # Ideally, n_a should be chosen such that the autocorrelation goes to 0 at this lag.
+ # Chosing n_a too low will give inaccurate results (overpredicting ESS), but going
+ # to much higher lag will create a lot of noise in ESS estimate.
+ n_a = min(100,n_sam)
+ for i_v in range(n_vars):
+ # Subtract mean from current variable samples
+ v_nm = var_samples[:,i_v] - par_mean[i_v]
+ # Compute autocorrelation for this variable. np.autocorrelate returns vector with
+ # lag from -n_sam to n_sam, with the 0 shift in the middle. Only retain from lag 0 to n_a.
+ r_v = np.correlate(v_nm, v_nm, mode = 'full')[-n_sam:-n_sam+n_a]
+ # Devide by number of samples in each sum, and normalize by variance
+ # (note: 0 lag has n_sam samples in sum, lag i has (n_sam - i) samples in sum
+ r_a = r_v / (par_cov[i_v,i_v]*(np.arange(n_sam, n_sam-n_a, -1)))
+ # Plot autocorrelation to see if n_a is large enough
+ #pl1,=plt.plot(r_a)
+ #plt.show()
+ # Effective Sample Size (Number of samples devided by integral of autocorrelation)
+ # Integral relies on symmetry and the fact that r_a is 1 at zero lag
+ ess.append(n_sam / (1.0+2.0*np.sum(r_a[1:])))
+
+ return ess
+
+###################################################################################################
+def plot_all_posteriors(d0,vnames,np_kde,out_file_base,debug,dense=False):
+ """
+ Given a set of samples of random variables, this script plots a lower triangular
+ matrix of marginalized densities. The diagonal contains the density of individual
+ random variables, marginalized over all other variables. Plots below the diagonal
+ contain the 2D density of the associated pair of random variables, marginalized over
+ all other variables.
+ For chains with many variables, the "dense" option can be selected, which plots the
+ triangular set of densities for the full chain with minimum spacing and labels, so that
+ it is less cluttered. In this mode, this function also writes out a set of plots
+ with the same posterior information, but just for two variables at the time,
+ which is easier to read.
+
+ Arguments:
+ d0 : Set of samples, one column per variable
+ vnames : Variable names
+ np_kde : Number of points to use to compute posterior densities with KDE
+ out_file_base: Base name for output files with plots
+ debug : >0 writes more output to screen (and even more if >1)
+ dense : Set to True if dense output desired [Defaults to False]. The "dense" output
+ format puts all plots in the triangular format up against each other, without
+ any axis labels or space in between them. It is useful when plotting the
+ posteriors of a chain with many variables.
+ """
+ # Some settings to connect with code Cosmin gave me
+ nthin = 1 # take only every nthin state (for faster kde)
+ nskip = 0 # entries to skip
+ istart = 0 # number of column with first MCMC variable
+ cend = 0 # extra columns at end to be removed
+
+ nvars=d0.shape[1]-istart-cend # number of variables we will actually process
+ print 'Number of sample lines in file',d0.shape[0]
+ print 'Number of vars we will process in file',nvars
+
+ # Section 2
+ # set up 2D kde objects
+ print "Setting up 2D KDE objects"
+ kern_i_j=[]
+ for j in range(istart+1,istart+nvars):
+ for i in range(istart,j):
+ if (debug > 2):
+ print i,j
+ kern_i_j.append(stats.kde.gaussian_kde(c_[d0[nskip::nthin,i],d0[nskip::nthin,j]].T))
+
+ # Section 3
+ # set up 2D meshes and evaluate kde objects on those meshes
+ # no. of grid points is controlled with kde_idx, defaults to 100
+ print "Evaluating 2D KDE objects on meshes. This may take a while ..."
+ kde_idx = np_kde*1j # complex number to include end points
+ xmesh=[]; ymesh=[]; zmesh=[];
+ icount=0
+ cov_idx = np.zeros((nvars,nvars),dtype=np.int) # 2D array to keep track of which index in xmesh etc. the
+ # the plots corresponding to vars i,j belong to
+ for j in range(istart+1,istart+nvars):
+ for i in range(istart,j):
+ if (debug > 0):
+ print "Computing 2D marginal distribution between variables:",i,",",j,":",vnames[i]," & ",vnames[j]
+ x,y = mgrid[d0[nskip:,i].min():d0[nskip:,i].max():kde_idx, d0[nskip:,j].min():d0[nskip:,j].max():kde_idx]
+ z = reshape(kern_i_j[icount](c_[x.ravel(), y.ravel()].T).T, x.T.shape)
+ xmesh.append(x);
+ ymesh.append(y);
+ zmesh.append(z);
+ cov_idx[i,j] = icount
+ icount = icount+1
+
+ # Section 4
+ # evaluate 1D pdfs
+ print "Evaluating 1D marginal pdfs with KDE"
+ xlin=[]; pdflin=[];
+ for i in range(istart,istart+nvars):
+ xlin.append(np.linspace(d0[nskip:,i].min(),d0[nskip:,i].max(),np_kde)) ;
+ kernlin=stats.kde.gaussian_kde(d0[nskip::nthin,i]);
+ pdflin.append(kernlin(xlin[i-istart]));
+
+
+ if (not dense):
+ # Section 5
+ print "Assembling tri-diagonal plots in non-dense format"
+
+ # ds is the distance between subplots
+ # xs,ys are the coordinates (normalized) of the subplot in the lower left corner
+ # xe,ye are the distances left in the uppper right corner
+ # fsizex, fsizey are figure sizes
+ # ncont are no of contours for 2D pdfs
+ xs=0.12; ys=0.1; ds=0.04
+ xe=0.08; ye=0.05
+ fsizex=12; fsizey=12;
+ ncont=20;
+ sx=(1-(nvars-1)*ds-xs-xe)/nvars;
+ sy=(1-(nvars-1)*ds-ys-ye)/nvars;
+ fs1=20
+ majorFormatter = FormatStrFormatter('%6.0e')
+
+ figname=out_file_base+".tridiag.pdf" # figure name
+
+ fig = plt.figure(figsize=(fsizex,fsizey))
+
+ # Section 5.1
+ subs=[]
+ # add diagonal plots
+ for i in range(nvars):
+ subs.append(fig.add_axes([xs+i*(sx+ds),ys+(nvars-1-i)*(sy+ds),sx,sy]))
+
+ # add lower triangular plots
+ for i in range(nvars-1):
+ for j in range(i+1):
+ if (debug > 2):
+ print j,(nvars-2-i)
+ subs.append(fig.add_axes([xs+j*(sx+ds),ys+(nvars-2-i)*(sy+ds),sx,sy]))
+
+ subsnp=np.array(subs)
+
+ # Plot 1D pdfs
+ for i in range(nvars):
+ subsnp[i].plot(xlin[i],pdflin[i])
+
+ # Plot 2D pdfs
+ for i in range(nvars*(nvars-1)/2):
+ subsnp[nvars+i].contour(xmesh[i],ymesh[i],zmesh[i],ncont)
+
+ # Section 5.2
+ # just a few ticks and ticklabels
+ for subpl in subsnp:
+ # subpl.set_xticks([])
+ # subpl.set_yticks([])
+ subpl.locator_params(tight=True, nbins=5)
+
+ # for diagonal plots, put no ticks and lables on y-axis
+ # and no grid on the plots
+ for i in range(istart,istart+nvars):
+ # subsnp[i-istart].set_xticks([d0[nskip:,i].min(),d0[nskip:,i].max()]);
+ subsnp[i-istart].set_yticks([])
+ subsnp[i-istart].grid(False)
+
+ # Set y labels on the right for diagonal plots
+ for i in range(nvars):
+ subsnp[i].yaxis.tick_right()
+ subsnp[i].yaxis.set_label_position("right")
+ subsnp[i].set_ylabel(vnames[i], fontsize=fs1)
+ #subsnp[i].set_ylabel(r'$'+vnames[i]+'$', fontsize=fs1)
+
+ plt.savefig(figname)
+
+ else:
+ # Section 5
+ # Dense plot format: print full tri-diagonal matrix but w/o any white space, tick marks or lables.
+ print "Assembling tri-diagonal plots in dense format"
+
+ # ds is the distance between subplots
+ # xs,ys are the coordinates (normalized) of the subplot in the lower left corner
+ # xe,ye are the distances left in the uppper right corner
+ # fsizex, fsizey are figure sizes
+ # ncont are no of contours for 2D pdfs
+ xs=0.12; ys=0.1; ds=0.0
+ xe=0.08; ye=0.05
+ fsizex=12; fsizey=12;
+ ncont=10;
+ sx=(1-(nvars-1)*ds-xs-xe)/nvars;
+ sy=(1-(nvars-1)*ds-ys-ye)/nvars;
+ fs1=20
+ majorFormatter = FormatStrFormatter('%6.0e')
+
+ figname=out_file_base+".tridiag-dense.pdf" # figure name
+
+ fig_d = plt.figure(figsize=(fsizex,fsizey))
+
+ # Section 5.1
+ subs=[]
+ # add diagonal plots
+ for i in range(nvars):
+ subs.append(fig_d.add_axes([xs+i*(sx+ds),ys+(nvars-1-i)*(sy+ds),sx,sy]))
+
+ # add lower triangular plots
+ for i in range(nvars-1):
+ for j in range(i+1):
+ if (debug > 2):
+ print j,(nvars-2-i)
+ subs.append(fig_d.add_axes([xs+j*(sx+ds),ys+(nvars-2-i)*(sy+ds),sx,sy]))
+
+ subsnp=np.array(subs)
+
+ # Plot 1D pdfs along diagonals
+ for i in range(nvars):
+ subsnp[i].plot(xlin[i],pdflin[i])
+
+ # Plot 2D pdfs
+ for i in range(nvars*(nvars-1)/2):
+ subsnp[nvars+i].contour(xmesh[i],ymesh[i],zmesh[i],ncont)
+
+ # Section 5.2
+ # no ticks and ticklabels
+ for subpl in subsnp:
+ subpl.set_xticks([]);
+ subpl.set_yticks([]);
+
+ # Set variable names
+ # for i in range(nvars):
+ # subsnp[i].yaxis.set_label_position("right")
+ # subsnp[i].set_ylabel(vnames[i], fontsize=fs1)
+ # #subsnp[i].set_ylabel(r'$'+vnames[i]+'$', fontsize=fs1)
+
+
+ plt.savefig(figname)
+
+ print "Assembling marginal density plots for all pairs of MCMC variables"
+
+ # ds is the distance between subplots
+ # xs,ys are the coordinates (normalized) of the subplot in the lower left corner
+ # xe,ye are the distances left in the uppper right corner
+ # fsizex, fsizey are figure sizes
+ # ncont are no of contours for 2D pdfs
+ xs=0.12; ys=0.1; ds=0.04
+ xe=0.08; ye=0.05
+ fsizex=12; fsizey=12;
+ ncont=20;
+ nvars_sm=2
+ sx=(1-(nvars_sm-1)*ds-xs-xe)/nvars_sm;
+ sy=(1-(nvars_sm-1)*ds-ys-ye)/nvars_sm;
+ fs1=20
+ majorFormatter = FormatStrFormatter('%6.0e')
+
+
+ # loop over all pairs of MCMC variables.
+ for j in range(istart+1,istart+nvars):
+ for i in range(istart,j):
+
+ print "Plotting densities for variables",vnames[i],"and",vnames[j]
+ figname=out_file_base + "." + vnames[i] + "-" + vnames[j] + ".pdf"
+
+ fig_sm = plt.figure(figsize=(fsizex,fsizey))
+
+ subs=[]
+ # add diagonal plots
+ subs.append(fig_sm.add_axes([xs ,ys+(sy+ds),sx,sy])) # marginal for var i
+ subs.append(fig_sm.add_axes([xs+(sx+ds),ys ,sx,sy])) # marginal for var j
+
+ # add lower triangular plot
+ subs.append(fig_sm.add_axes([xs ,ys ,sx,sy])) # marginal for vars i,j
+
+ subsnp=np.array(subs)
+
+ # Plot 1D pdfs
+ subsnp[0].plot(xlin[i],pdflin[i])
+ subsnp[1].plot(xlin[j],pdflin[j])
+
+ # Plot 2D pdfs
+ i_2D = cov_idx[i,j]
+ subsnp[2].contour(xmesh[i_2D],ymesh[i_2D],zmesh[i_2D],ncont)
+
+ # set just a few ticks and ticklabels
+ for subpl in subsnp:
+ subpl.locator_params(tight=True, nbins=5)
+
+ # no ticks and ticklabels on y axes on diagonals (first two plots in subsnp array)
+ # no grid on diagonal plots
+ for subpl in subsnp[0:2]:
+ subpl.set_yticks([])
+ subpl.grid(False)
+
+ # for diagonal plots only put xmin and xmax
+ #subsnp[0].set_xticks([d0[nskip:,i].min(),d0[nskip:,i].max()]);
+ #subsnp[1].set_xticks([d0[nskip:,j].min(),d0[nskip:,j].max()]);
+
+
+ # Set y labels on the right for diagonal plots
+ #subsnp[0].yaxis.tick_right()
+ subsnp[0].yaxis.set_label_position("right")
+ subsnp[0].set_ylabel(vnames[i], fontsize=fs1)
+
+ #subsnp[1].yaxis.tick_right()
+ subsnp[1].yaxis.set_label_position("right")
+ subsnp[1].set_ylabel(vnames[j], fontsize=fs1)
+
+ # Write out figure
+ plt.savefig(figname)
+
+###################################################################################################
+def get_mcmc_stats(all_samples,v_names,out_file_base,debug):
+ """
+ Generate statistics of the passed in MCMC samples.
+ Assumes that the first column of all_samples contains the step number, and the last two
+ columns contain the acceptance probability and the posterior probability for each sampled state.
+ """
+
+ # Number of variables, columns, samples in the file
+ n_vars = len(v_names)
+ n_cols = all_samples.shape[1]
+ n_sam = all_samples.shape[0]
+
+ # Extract all MCMC chain variables in separate array
+ var_samples = all_samples[:,1:1+n_vars]
+ if (debug > 0):
+ print var_samples.shape
+
+ # Compute mean parameter values
+ par_mean = np.mean(var_samples,axis=0,dtype=np.float64)
+
+ #print "\nParameter mean values:\n"
+ #for i_v in range(n_vars):
+ # print " ", v_names[i_v], ":", par_mean[i_v]
+
+ # Compute the covariance
+ par_cov = np.cov(var_samples,rowvar=0)
+
+ print "\nParameter covariances:\n"
+ print par_cov
+
+ # write out covariance matrix to file
+ cov_file_name = out_file_base + ".covariance.dat"
+ np.savetxt(cov_file_name,par_cov)
+
+ # print the square root of the diagonal entries of the covariance
+ #print "\nParameter standard deviations (proposal width estimates):\n"
+ #for i_v in range(n_vars):
+ # print " ", v_names[i_v], ":", math.sqrt(par_cov[i_v,i_v])
+
+ #
+ # Compute the MAP values
+ # (could also get this from the last line of the MCMC output file
+ # but this line is not always there; and it is more fun
+ # to do it with Python)
+ #
+
+ # Sample index with max posterior prop (last column in MCMC file):
+ i_map = all_samples[:,-1].argmax()
+
+ print "\n",
+ print '%27s' % "Parameter :", '%15s' % "Mean Value", '%15s' % "MAP values", '%15s' % "Std. Dev."
+ for i_v in range(n_vars):
+ print '%25s' % v_names[i_v], ":", '%15.8e' % par_mean[i_v], '%15.8e' % var_samples[i_map,i_v],
+ print '%15.8e' % math.sqrt(par_cov[i_v,i_v])
+
+ # Write mean and MAP to file
+ mean_file_name = out_file_base + ".mean.dat"
+ np.savetxt(mean_file_name,par_mean)
+
+ map_file_name = out_file_base + ".map.dat"
+ np.savetxt(map_file_name,var_samples[i_map,:])
+
+ # Compute mean and standard deviation of acceptance probability
+ print "\nAcceptance Probability:\n"
+
+ # In some cases, the next to last column contains the ratio of posterior
+ # values rather than the acceptance probability. First convert this number
+ # to acceptance probabilities: acc_prob = min(alpha,1)
+ # (This does no harm if the next to last column already contains the actual acceptance probability)
+ acc_prob = np.minimum(all_samples[:,-2],np.ones_like(all_samples[:,-2]))
+ print "Mean :",acc_prob.mean(),
+ print "Std. Dev.:",acc_prob.std()
+
+ #
+ # Compute effective sample size (ESS)
+ #
+ print "\nEffective Sample Sizes:\n"
+
+ ess = effective_sample_sizes(var_samples,par_mean,par_cov)
+
+ for i_v in range(n_vars):
+ print " ",v_names[i_v],":",int(ess[i_v]),"out of",n_sam
+
+###################################################################################################
+
+help_string = """
+Usage:
+ mcmc_stats.py [-h] -i [--nb ] [-s ] [--nolabels]
+what
+ Compute elementary statistics of MCMC chain
+where
+ -h = print help info
+ -i = name of file containing MCMC data
+ -s = stride with which to read the file [defaults to 1]
+ --nb = number of burn-in samples to be removed from the chain [defaults to 0]
+ --nolabels Indicates that the MCMC data file does not contain column labels (in which case they are generated)
+"""
+
+if __name__ == "__main__":
+ #
+ # Process inputs
+ #
+ try:
+ opts,v_names = getopt.getopt(sys.argv[1:],"hi:s:",["nb=","nolabels"])
+ except getopt.GetoptError, err:
+ print str(err)
+ print help_string
+ sys.exit(1)
+
+ # Default values
+ samples_file_name=""
+ n_burnin = 0
+ stride = 1
+ labels_present = True
+
+ for o,a in opts:
+ if o == "-h":
+ print help_string
+ sys.exit(0)
+ elif o == "-i":
+ samples_file_name = a
+ elif o == "-s":
+ stride = int(a)
+ elif o == "--nb":
+ n_burnin = int(a)
+ elif o == "--nolabels":
+ labels_present = False
+ else:
+ assert False, "Unhandled command line parsing option. Use -h flag to get usage info."
+
+ # error checking
+ if(samples_file_name==""):
+ print "Sample file name must be specified"
+ print help_string
+ sys.exit(1)
+
+ if (n_burnin < 0):
+ print "The number of burn-in samples needs to be >= 0"
+ print help_string
+ sys.exit(1)
+
+ if (stride < 0):
+ print "The file read stride needs to be >= 0"
+ print help_string
+ sys.exit(1)
+
+ # Base name of file for outputting results
+ out_file_base = samples_file_name + ".nb" + str(n_burnin) + ".s" + str(stride)
+
+ # Set to 1 to get more output to screen
+ # Set to > 1 to get a lot of output to screen
+ debug = 1
+
+ # Set to 1 for showing plots interactively
+ interact = 0
+
+ #
+ # Import variables of interest from the MCMC data file
+ #
+ all_samples, v_names = extract_all_vars(samples_file_name,n_burnin,debug,stride,labels=labels_present)
+
+ # Get statistics
+ get_mcmc_stats(all_samples,v_names,out_file_base,debug)
+
+
+
+
+
+
diff --git a/PyUQTk/inference/mcmc.py b/PyUQTk/inference/mcmc.py
new file mode 100755
index 00000000..5bd2fd7d
--- /dev/null
+++ b/PyUQTk/inference/mcmc.py
@@ -0,0 +1,484 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+import numpy as npy
+import scipy.stats
+import scipy.linalg
+import math
+import uuid
+import matplotlib.pyplot as plt
+
+global Rmat,invRmat
+
+
+#---------------------------------------------------------------------------------------
+# Simple Hamiltonian MCMC routine
+# Uses Leapfrog for the time stepping
+#---------------------------------------------------------------------------------------
+def HMCMC(U,grad_U,dt,nT,q):
+ '''
+ Hamiltonian MCMC routine
+
+ Input:
+ -----
+
+ U - potential energy function, -log(posterior)
+ grad_U - gradient of potential energy function
+ dt - time step, dt, for leapfrog method
+ nT - number of time steps in leapfrog method
+ q - initial state of chain (position vector)
+
+ Output:
+ ------
+ Next vector in the chain state
+
+ Example:
+ -------
+ q_next = HMCMC(U,grad_U,1e-2,25,q_current)
+
+ '''
+ current_q = npy.copy(q) # save current
+
+ # generate current p
+ # propcov = 4*array([[ 0.01175383, 0.02065261],[ 0.02065261, 0.04296117]])
+ p = npy.random.randn(len(current_q))
+ # p = random.multivariate_normal([0,0],propcov)
+ current_p = npy.copy(p) # save current p
+
+ # make half step for momentum used for leap frog step
+ p = p - dt * grad_U(q)/2.0
+
+ for i in range(nT):
+ # p = p - dt * grad_U(q)/2.0
+ q = q + dt*p
+ # p = p - dt * grad_U(q)/2.0
+ if (i != nT-1): p = p - dt*grad_U(q)
+
+ # make a half step for momentum at the end
+ p = p - dt * grad_U(q)/2.0
+
+ # negate the momentum to make a symmetric proposal
+ p = -p
+
+ # Evaluate potential and kinetic energy
+ current_U = U(current_q)[0]
+ current_K = npy.sum(current_p**2)/2.0
+ proposed_U = U(q)[0]
+ proposed_K = npy.sum(p**2)/2.0
+
+ # Accept or reject the state at end of trajectory, returning either
+ # the position at the end of the trajectory or the initial position
+
+ if (npy.log(npy.random.rand()) < current_U-proposed_U+current_K-proposed_K):
+ return q
+ else:
+ return current_q
+
+
+#---------------------------------------------------------------------------------------
+# Example:
+# 1. Banana-shaped posterior density
+#---------------------------------------------------------------------------------------
+def norm_pdf_multivariate(x, mu, sigma):
+ """
+ Multi-variate normal pdf
+ x : list or numpy array
+ mu : 1D numpy array
+ sigma: 2D numpy array"""
+ size = len(x)
+ if size == len(mu) and (size, size) == sigma.shape:
+ det = npy.linalg.det(sigma)
+ if det == 0:
+ raise NameError("The covariance matrix can't be singular")
+ norm_const = 1.0/ ( math.pow((2*npy.pi),float(size)/2) * math.pow(det,1.0/2) )
+ x_mu = npy.matrix(x - mu)
+ inv = npy.linalg.inv(sigma)
+ result = math.pow(math.e, -0.5 * (x_mu * inv * x_mu.T))
+ return norm_const * result
+ else:
+ raise NameError("The dimensions of the input don't match")
+
+def tranB(x1,x2,a):
+ """
+ Coordinate transform for banana-shaped pdf
+ x1,x2: 2D numpy arrays
+ a: list containing the transform factors
+ """
+ a1 = a[0]; a2 = a[1];
+ y1 = a1*x1;
+ y2 = x2/a1 - a2*(y1**2 + a1**2);
+ return y1,y2
+
+def invTranB(x1,x2,a):
+ """ Inverse coordinate transform for banana-shaped pdf
+ x1,x2: 2D numpy arrays
+ a: list containing the transform factors
+ """
+ a1 = a[0]; a2 = a[1];
+ y1 = x1/a1;
+ y2 = x2*a1 + a1*a2*(x1**2 + a1**2);
+ return y1,y2
+
+def plotBanana():
+ """
+ Plot banana-shaped function; parameters are hard-wired
+ """
+ xb,yb = npy.mgrid[-3:3:.05, -11:1:.05]
+ x, y = invTranB(xb,yb,[1,1])
+ pos = npy.empty(x.shape + (2,))
+ pos[:, :, 0] = x; pos[:, :, 1] = y
+ mu = npy.array([0.0,0.0])
+ cov = npy.array([[1.0, 0.9], [0.9, 1.0]])
+ z = x.copy()
+ for i in range(x.shape[0]):
+ for j in range(x.shape[1]):
+ z[i,j] = norm_pdf_multivariate([x[i,j],y[i,j]], mu, cov)
+ plt.contour(xb,yb,z,50)
+ plt.show()
+ return
+
+def postBanana(spl,postinfo):
+ """
+ Computes the Log of the posterior density for banana-shaped pdf
+
+ Input:
+ spl: Current parameter set sample
+ postinfo : Contains parameters for the posterior density
+ Output:
+ The log of the posterior density
+ """
+
+ afac = postinfo['afac']
+ mu = postinfo['mu' ]
+ cov = postinfo['cov' ]
+ xb,yb = spl ;
+ x, y = invTranB(xb,yb,afac) ;
+ return npy.log(norm_pdf_multivariate([x,y], mu, cov))
+
+def dram_ex(method,nsteps):
+ """
+ Example using the DRAM sampler to explore the posterior of the banana-shaped
+ posterior density.
+
+ Input:
+ method: either 'am' or 'dram' (see below under the dram function)
+ nsteps: number of steps to take (samples to take)
+ Output:
+ A tuple with samples and other information. See the dram function for more info.
+ """
+ # define MCMC parameters (see below under the dram function for more info about these options)
+ cini = npy.array([-1.0,-4.0]) # Initial guesses
+ spllo = npy.array([-4.0,-12.0]) # Lower bounds on samples
+ splhi = npy.array([ 4.0, 2.0]) # Upper bounda on samples
+ cvini = npy.array([[0.1,0.0],[0.0,0.1]]) # Initial covariance matrix of proposal distribution
+ opts={'method':method,'nsteps':nsteps,'nburn':1000,'nadapt':100,'nfinal':10000000,
+ 'inicov':cvini,'coveps':1.e-10,'burnsc':5,'gamma':1.0,'ndr':2,'drscale':[5,4,3],
+ 'spllo':spllo,'splhi':splhi}
+ lpinfo={'afac':[1.0,1.0],'cov': npy.array([[1,0.9],[0.9,1]]),'mu':npy.array([0.0,0.0])}
+ sol=dram(opts,cini,postBanana,lpinfo)
+ return sol
+#---------------------------------------------------------------------------------------
+# DRAM
+#---------------------------------------------------------------------------------------
+def logPropRatio(iq,spls):
+ """
+ Gaussian n:th stage log proposal ratio
+ log of q_i(y_n,..,y_n-j) / q_i(x,y_1,...,y_j)
+ """
+ global invRmat
+ stage = len(spls)-1;
+ if stage == iq:
+ return (0.0); # symmetric
+ else:
+ iRmat = invRmat[iq-1]; # proposal^(-1/2)
+ y1 = spls[0] ; # y1
+ y2 = spls[iq] ; # y_i
+ y3 = spls[stage ] ; # y_n
+ y4 = spls[stage-iq] ; # y_(n-i)
+ return (-0.5*(npy.linalg.norm(npy.dot(y4-y3,iRmat))**2-npy.linalg.norm(npy.dot(y2-y1,iRmat))**2));
+
+def logPostRatio(p1,p2):
+ return (p2-p1);
+
+def getAlpha(spls,post):
+ stage = len(spls) - 1;
+ a1 = 1.0; a2 = 1.0;
+ for k in range(1,stage):
+ a1 = a1*(1-getAlpha(spls[:k+1],post[:k+1]));
+ a2 = a2*(1-getAlpha(spls[-1:-(k+1):-1],post[-1:-(k+1):-1]));
+ if a2 == 0.0:
+ return (0.0);
+ y = logPostRatio(post[0],post[-1]);
+ for k in range(1,stage+1):
+ y = y + logPropRatio(k,spls);
+ return min(1.0, npy.exp(y)*a2/a1);
+
+def ucov(spl,splmean,cov,lastup):
+ #
+ # update covariance
+ #
+ if len(spl.shape) == 1:
+ nspl = 1;
+ ndim = spl.shape[0];
+ else:
+ (nspl,ndim)=spl.shape;
+ if nspl>0:
+ for i in range(nspl):
+ iglb = lastup+i;
+ splmean = (iglb*splmean+spl[i])/(iglb+1);
+ rt = (iglb-1.0)/iglb;
+ st = (iglb+1.0)/iglb**2;
+ cov = rt*cov+st*npy.dot(npy.reshape(spl[i]-splmean,(ndim,1)),npy.reshape(spl[i]-splmean,(1,ndim)))
+ return lastup+nspl,splmean,cov
+
+def dram(opts,cini,likTpr,lpinfo):
+ """
+ #
+ # DRAM
+ #
+ Delayed Rejection Adaptive MCMC
+ opts - dictionary of parameters for DRAM
+ method : either 'am' (adaptive metropolis) or 'dram' (am+delayed rejection)
+ nsteps : no. of mcmc steps
+ nburn : no. of mcmc steps for burn-in (proposal fixed to initial covariance)
+ nadapt : adapt every nadapt steps after nburn
+ nfinal : stop adapting after nfinal steps
+ inicov : initial covariance
+ coveps : small additive factor to ensure covariance matrix is positive definite
+ (only added to diagonal if covariance matrix is singular without it)
+ burnsc : factor to scale up/down proposal if acceptance rate is too high/low
+ gamma : factor to multiply proposed jump size with in the chain past the burn-in phase
+ (Reduce this factor to get a higher acceptance rate.)
+ (Defaults to 1.0)
+ ndr : no. of delayed rejection steps (if dram is requested)
+ drscale: scale factors for delayed rejection
+ spllo : lower bounds for chain samples
+ splhi : upper bounds for chain samples
+ cini - starting mcmc state
+ likTpr - log-posterior function
+ lpinfo - dictionary with settings that will be passed to the log-posterior function
+
+ Output:
+ spls: chain samples (dimension nsteps x chain dimension)
+ [cmode,pmode]: MAP estimate (cmode) and posterior at MAP estimate (pmode)
+ [1.0-float(rej)/nsteps,
+ 1.0-float(rejlim)/nsteps]: acceptance ratio and fraction of samples inside the bounds
+ [rej,rejlim]: total number of rejected samples and total number
+ of samples outside the bounds
+ meta_info: acceptance probability and posterior probability for each sample (dimension nsteps x 2)
+
+ To Do:
+ Provide option to dump MCMC chain as the computations proceed, to avoid having such large
+ files to hold all states, and so that partial output is available during the MCMC run for
+ preliminary analysis.
+ """
+ # -------------------------------------------------------------------------------
+ # Parse options
+ # -------------------------------------------------------------------------------
+ if 'method' in opts:
+ method = opts['method']
+ else:
+ print 'Error in dram: method unspecified !'; quit()
+
+ nsteps = opts['nsteps']
+ nburn = opts['nburn' ]
+ nadapt = opts['nadapt']
+ nfinal = opts['nfinal']
+ inicov = opts['inicov']
+ coveps = opts['coveps']
+ burnsc = opts['burnsc']
+ spllo = opts['spllo' ]
+ splhi = opts['splhi' ]
+
+ if 'gamma' not in opts:
+ gamma = 1.0 # Default for backwards compatibility
+ else:
+ gamma = opts['gamma' ]
+
+ if method=='dram':
+ ndr = opts['ndr']
+ drscale = opts['drscale']
+
+ if 'ofreq' not in opts:
+ ofreq = 10000 # Default for backwards compatibility
+ else:
+ ofreq = opts['ofreq' ]
+
+ rej = 0; # Counts number of samples rejected
+ rejlim = 0; # Counts number of samples rejected as out of prior bounds
+ rejsc = 0; # Counts number of rejected samples since last rescaling
+ # -------------------------------------------------------------------------------
+ # Pre-processing
+ # -------------------------------------------------------------------------------
+ cdim = cini.shape[0]; # chain dimensionality
+ cov = npy.zeros((cdim,cdim)); # covariance matrix
+ spls = npy.zeros((nsteps,cdim)); # MCMC samples
+ meta_info = npy.zeros((nsteps,2)) # Column for acceptance probability and posterior prob. of current sample
+ na = 0; # counter for accepted jumps
+ sigcv = 2.4*gamma/npy.sqrt(cdim); # covariance factor
+ spls[0] = cini; # initial sample set
+ p1 = likTpr(spls[0],lpinfo); # and posterior probability of initial sample set
+ meta_info[0] = [0.e0,p1] # Arbitrary initial acceptance and posterior probability of initial guess
+ pmode = p1; # store current chain MAP probability value
+ cmode = spls[0]; # current MAP parameter Set
+ nref = 0; # Samples since last proposal rescaling
+ # -------------------------------------------------------------------------------
+ # Prepare temporary file
+ # -------------------------------------------------------------------------------
+ tmp_file = str(uuid.uuid4())+'.dat'
+ print 'Saving intermediate chains to ',tmp_file
+ # -------------------------------------------------------------------------------
+ # Main loop
+ # -------------------------------------------------------------------------------
+ for k in range(nsteps-1):
+ #
+ # Deal with covariance matrix
+ #
+ covMatUpd = False
+ if k == 0:
+ splmean = spls[0];
+ propcov = inicov ;
+ Rchol = scipy.linalg.cholesky(propcov) ;
+ lastup = 1; # last covariance update
+ covMatUpd = True ;
+ else:
+ if (nadapt>0) and ((k+1)%nadapt)==0:
+ if k0.95:
+ Rchol = Rchol/burnsc # scale down proposal
+ covMatUpd = True ;
+ print "Scaling down the proposal at step",k
+ elif float(rejsc)/nref<0.05:
+ Rchol = Rchol*burnsc # scale up proposal
+ covMatUpd = True ;
+ print "Scaling up the proposal at step",k
+ nref = 0 ;
+ rejsc = 0 ;
+ else:
+ lastup,splmean,cov=ucov(spls[lastup:lastup+nadapt,:],splmean,cov,lastup)
+ try:
+ Rchol = scipy.linalg.cholesky(cov)
+ except scipy.linalg.LinAlgError:
+ try:
+ # add to diagonal to make the matrix positive definite
+ Rchol = scipy.linalg.cholesky(cov+coveps*npy.identity(cdim))
+ except scipy.linalg.LinAlgError:
+ print "Covariance matrix is singular even after the correction"
+ Rchol = Rchol*sigcv
+ covMatUpd = True ;
+ if (method == 'dram') and covMatUpd:
+ Rmat = [Rchol]; invRmat = [scipy.linalg.inv(Rchol)]
+ for i in range(1,ndr):
+ Rmat.append(Rmat[i-1]/drscale[i-1])
+ invRmat.append(invRmat[i-1]*drscale[i-1])
+ #-Done with covariance matrix
+ nref = nref + 1 ;
+ #
+ # generate proposal and check bounds
+ #
+ u = spls[k]+npy.dot(npy.random.randn(1,cdim),Rchol)[0];
+ if npy.any(npy.less(u,spllo)) or npy.any(npy.greater(u,splhi)):
+ outofbound = True
+ accept = False
+ p2 = -1.e100 # Arbitrarily low posterior likelihood
+ pr = -1.e100 # Arbitrarily low acceptance probability
+ else:
+ outofbound = False
+ if not outofbound:
+ p2 = likTpr(u,lpinfo);
+ pr = npy.exp(p2-p1);
+ if (pr>=1.0) or (npy.random.random_sample()<=pr):
+ spls[k+1] = u.copy(); # Store accepted sample
+ meta_info[k+1] = [pr,p2] # and its meta information
+ p1 = p2;
+ if p1 > pmode:
+ pmode = p1 ;
+ cmode = spls[k+1] ;
+ accept = True
+ else:
+ accept = False
+ #
+ # See if we can do anything about a rejected proposal
+ #
+ if not accept:
+ if (method == 'am'):
+ # if 'am' then reject
+ spls[k+1]=spls[k];
+ meta_info[k+1,0] = pr # acceptance probability of failed sample
+ meta_info[k+1,1] = meta_info[k,1] # Posterior probability of sample k that has been retained
+ rej = rej + 1;
+ rejsc = rejsc + 1;
+ if outofbound:
+ rejlim = rejlim + 1;
+ elif (method == 'dram'):
+ # try delayed rejection
+ tryspls = [spls[k].copy(),u.copy()]
+ trypost = [p1,p2]
+ jdr = 1
+ while (not accept) and (jdr= 1.0) or (npy.random.random_sample() < alpha):
+ accept = True;
+ spls[k+1] = u.copy(); # Store accepted sample
+ meta_info[k+1] = [alpha,p2] # and its meta information
+ p1 = p2;
+ if p1 > pmode:
+ pmode = p1 ;
+ cmode = spls[k+1] ;
+ if not accept:
+ spls[k+1]=spls[k] ;
+ meta_info[k+1,0] = alpha # acceptance probability of failed sample
+ meta_info[k+1,1] = meta_info[k,1] # Posterior probability of sample k that has been retained
+ rej = rej + 1;
+ rejsc = rejsc + 1;
+ if outofbound:
+ rejlim = rejlim + 1;
+ else:
+ print "Unknown MCMC method ",method," -> Quit\n"; quit()
+ # Done with if over methods
+ # Done with if over original accept
+ if ((k+1)%ofreq==0):
+ print 'No. steps:',k+1,', No. of rej:',rej
+ fout = open(tmp_file, 'a+')
+ npy.savetxt(fout, spls[k-ofreq+1:k+1,:], fmt='%.8e',delimiter=' ', newline='\n')
+ fout.close()
+ # Done loop over all steps
+
+ # return output: samples, MAP sample and its posterior probability, overall acceptance probability
+ # and probability of having sample inside prior bounds, overall number of samples rejected, and rejected
+ # due to being out of bounds.
+ return (spls,[cmode,pmode],[1.0-float(rej)/nsteps,1.0-float(rejlim)/nsteps],[rej,rejlim],meta_info)
diff --git a/PyUQTk/inference/postproc.py b/PyUQTk/inference/postproc.py
new file mode 100755
index 00000000..27e7941b
--- /dev/null
+++ b/PyUQTk/inference/postproc.py
@@ -0,0 +1,999 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+#
+# Does statistical analysis on samples from an MCMC chain.
+
+
+import os
+import sys
+import string
+import numpy as np
+import getopt
+import math
+import matplotlib.pyplot as plt
+from scipy import stats, mgrid, c_, reshape, random, rot90
+from matplotlib.ticker import MultipleLocator, FormatStrFormatter
+
+try:
+ import pymc
+ have_pymc = True
+except ImportError:
+ print "PyMC is required for some of the MCMC postprocessing codes."
+ print "Will proceed without, but some convergence tests will not be available."
+ have_pymc = False
+
+##################################################################################
+# Compute autocorrelation a one-dimensional set of samples
+# Main function is acor(X), where X is a numpy array of samples
+##################################################################################
+from numpy import *
+from matplotlib.pyplot import *
+def acor_in(X, MAXLAG, WINMULT):
+
+ # compute mean of X
+ L = len(X)
+ mu = mean(X)
+ Xhat = X - mu
+ std = sqrt(var(X))
+
+ iMax = L - MAXLAG
+
+ # compute autocorrelation
+ # sind = arange(MAXLAG+1)
+ iind = arange(iMax)
+ C = zeros(MAXLAG + 1)
+ for s in range(MAXLAG+1):
+ C[s] += sum(Xhat[iind]*Xhat[iind+s])
+ C *= 1./iMax
+
+ D = C[0] # diffusion coeff
+ D += 2*sum(C[1:])
+
+ sigma = sqrt(abs(D/L))
+ tau = D/C[0]
+
+ # print D, L, sigma, tau, tau*WINMULT, MAXLAG
+ return C[0], D, L, sigma, tau, tau*WINMULT, Xhat
+# take in 1d numpy array of samples X
+def acor(X,MAXLAG = 10, WINMULT = 5):
+ C0, D, L, sigma, tau, tauWINMULT, X = acor_in(X, MAXLAG, WINMULT)
+ # print tau, sigma
+ Ls = []
+ S = []
+ while tau*WINMULT >= MAXLAG:
+ Lh = L/2
+ Ls.append(Lh)
+ j1,j2 = 0,1
+ for i in range(Lh):
+ X[i] = X[j1] + X[j2]
+ j1 += 2
+ j2 += 2
+ _, D, L, sigma, tau, tauWINMULT, X = acor_in(X[:Lh], MAXLAG, WINMULT)
+ S.append(sigma)
+ if len(S) == 0:
+ S.append(sigma)
+ Ls.append(L)
+
+ sigma = S[-1]
+ Ls = 2*array(Ls[::-1])
+ for i in range(len(S)):
+ D = .25 * sigma**2 * Ls[i]
+ tau = D/C0
+ sigma = sqrt(D/Ls[i])
+
+ return tau
+###################################################################################################
+def compute_group_auto_corr(v,maxlag):
+ """Compute autocorrelation of v, an array where each column is a set of samples,
+ for a lag ranging from 0 to maxlag-1. Ouputs numpy array with autocorrelation."""
+
+ # Get dimensions of input array with samples
+ n_pts = np.shape(v)[0]
+ n_var = np.shape(v)[1]
+
+ # Initialize array
+ auto_corr = np.zeros((maxlag,n_var))
+
+ # Get mean and variance of v for each variable over the samples provided
+ v_m = v.mean(0)
+ v_var = v.var(0)
+
+
+ # Subtract the mean of v
+ v_nm = v - v_m
+
+ # Compute autocovariance of v over all variables
+ for lag in range(maxlag):
+ n_sum = n_pts - lag # total number of terms in sum
+ for i in range(n_sum):
+ auto_corr[lag,:] += v_nm[i,:]*v_nm[i+lag,:]
+ auto_corr[lag,:] /= float(n_sum)
+
+ # Normalize by variance
+ auto_corr /= v_var
+
+ return auto_corr
+###################################################################################################
+def compute_auto_corr(v,maxlag):
+ """Compute autocorrelation of v (1D vector of samples) for a lag ranging from 0 to maxlag-1.
+ Ouputs numpy array with autocorrelation."""
+
+ # Initialize array
+ auto_corr = np.zeros(maxlag)
+
+ # Get mean and variance of v
+ v_m = v.mean()
+ v_var = v.var()
+ n_pts = len(v)
+
+ # Subtract the mean of v
+ v_nm = v - v_m
+
+ # Compute autocovariance of v
+ for lag in range(maxlag):
+ n_sum = n_pts - lag # total number of terms in sum
+ for i in range(n_sum):
+ auto_corr[lag] += v_nm[i]*v_nm[i+lag]
+ auto_corr[lag] /= float(n_sum)
+
+ # Normalize by variance
+ auto_corr /= v_var
+
+ return auto_corr
+###################################################################################################
+def plot_auto_corr(v,vname):
+ """Plot autocorrelation (in v), for variable with name vname"""
+
+ # Set up the figure
+ fig = plt.figure(figsize=(8,6))
+ ax = fig.add_axes([0.10, 0.10, 0.85, 0.85])
+ l1 = plt.plot(v,label=vname)
+
+ ax.set_xlabel("lag")
+ ax.set_ylabel("autocorrelation")
+ plt.ylim([-0.1,1])
+
+ plt.legend()
+ plt.savefig("corr_"+vname+".pdf")
+ plt.close()
+
+ return
+###################################################################################################
+def compute_effective_sample_size(n_sam,auto_corr):
+ """Computes the effective sample size for a vector of samples
+ by dividing the number of samples (n_sam) by the integral
+ of the autocorrelation (auto_corr) between the samples. (i.e. the more
+ correlated successive samples are, the less independent samples there are
+ in the chain.)
+ The algorithm is based on:
+ Markov Chain Monte Carlo in Practice: A Roundtable Discussion
+ Robert E. Kass, Bradley P. Carlin, Andrew Gelman and Radford M. Neal
+ The American Statistician, Vol. 52, No. 2 (May, 1998), pp. 93-100
+ Published by: American Statistical Association
+ Article DOI: 10.2307/2685466
+ Article Stable URL: http://www.jstor.org/stable/2685466
+ """
+
+ # Length of autocorrelation array
+ n_ac = len(auto_corr)
+
+ # Find the lag where the autocorrelation goes to zero (or below)
+ i_zero = 1 # start at lag 1 since the autocorrelation has value 1.0 at lag 0 by definition
+ done = False
+ while (i_zero < n_ac and not done):
+ if auto_corr[i_zero] > 0.0:
+ i_zero += 1
+ else:
+ done = True
+
+ if i_zero == n_ac:
+ print "WARNING: Autocorrelation did not go to zero within range provided"
+
+ # Integral relies on symmetry and the fact that autocorrelation is 1 at zero lag
+ ESS = int(n_sam / (1.0+2.0*np.sum(auto_corr[1:i_zero])))
+
+ return ESS
+###################################################################################################
+###################################################################################################
+def read_remaining_lines(samples_file,n_burnin,stride):
+ """Read all remaining lines in MCMC sample filename, leaving out the first n_burnin,
+ and only one in every stride lines after that
+ """
+ samples_list = []
+ line_no = 0
+ done = 0
+ while not done:
+ line = samples_file.readline()
+ if (line == ""):
+ done = 1
+ else:
+ line_no += 1
+ if (line_no > n_burnin and (line_no - n_burnin) % stride == 0):
+ records = line.split()
+ num_records = [float(s) for s in records]
+ samples_list.append(num_records)
+
+ return samples_list
+
+###################################################################################################
+def remove_MAP_line(samples_list,debug):
+ """Remove the last line if is has a value < 0 (i.e. -1) in the acceptance_prob column (next to last)"""
+ if(samples_list[-1][-2] < 0):
+ # Remove the last line
+ del samples_list[-1]
+ if (debug > 0):
+ print "The last sample line has been deleted as it contained the MAP values"
+
+###################################################################################################
+def extract_vars(samples_file_name,n_burnin,v_names,debug,stride=1):
+ """From a file with samples in ascii format, with
+ the first line containing the label for each column, extract
+ the columns with the labels in v_names and return them
+ in a numpy array. Remove n_burnin samples from the top.
+ Only read one in every stride number of lines after that.
+ Assumes that the first column is the MCMC step number, the next to last column is the acceptance
+ probability for each step, and the last column is the posterior probability for each step. The
+ last line is removed if it contains -1 for the acceptance probability (which means this
+ line contains the MAP values)"""
+
+ # Open text file with all samples,
+ samples_file = open(samples_file_name,"r")
+
+ # Extract first line with the column labels and find the column
+ # numbers corresponding to the variables of interest.
+ labels_line = samples_file.readline().rstrip('\n')
+ col_labels = [lbl for lbl in labels_line.split()]
+
+ v_indices = []
+ for s_v in v_names:
+ try:
+ i_v = col_labels.index(s_v)
+ v_indices.append(i_v)
+ except ValueError:
+ print "Variable", s_v, "is not found in the list of labels", col_labels
+ sys.exit(1)
+
+ if (debug > 0):
+ print "Column labels in file",samples_file_name,"are:",col_labels
+ for i_v in range(len(v_names)):
+ print "The column number of",v_names[i_v],"is:",v_indices[i_v]
+
+ # Read subsequent lines
+ samples_list = read_remaining_lines(samples_file,n_burnin,stride)
+
+ # Close the file
+ samples_file.close()
+
+ # Remove MAP values, if present
+ remove_MAP_line(samples_list,debug)
+
+ # Convert list to array
+ steady_samples = np.array(samples_list)
+
+
+ # Extract all columns of interest
+ samples_cols = []
+ for i_v in v_indices:
+ samples_cols.append(steady_samples[:,i_v])
+
+ samples = np.array(samples_cols).T
+ if (debug > 0):
+ print "Shape of samples array:",samples.shape
+
+ n_samples = len(samples[:,0])
+ n_vars = len(samples[0,:])
+
+ if (debug > 0):
+ print "Read in", n_samples, "regular samples of", n_vars, "variables from file", samples_file_name
+
+ return samples
+
+###################################################################################################
+def extract_all_vars(samples_file_name,n_burnin,debug,stride=1,labels=True):
+ """Extract samples and labels from an MCMC chain file.
+ Assumes the following:
+ * The file is in ASCII format
+ * The first column contains the MCMC step number
+ * The next to last column contains the acceptance probability for the jump proposed in this step
+ * The last column contains the posterior probability of the state in this step
+ * The columns in between contain the sampled states
+ * Unless the argument labels == False, the first line contains labels for each column
+ * If the last line has -1 in the acceptance probability column, then this line contains
+ the MAP values. This line is removed before returning the samples to the calling program.
+ Arguments:
+ * samples_file_name: name of file to parse
+ * n_burnin: number of lines to skip from the top
+ * debug: higher values are more verbose in output
+ * stride: stride to take in parsing sample lines. [default = 1]
+ * labels: True if the file contains column labels in first line. False if not. [default = True]
+ If not column labels are present, they are manufactured as aa, ab, ac, ..., az, ba, bb, ...
+ Returns:
+ * A numpy array with samples (one sample of all parameters per row)
+ * A list of variable names
+ """
+
+ # Open text file with all samples
+ samples_file = open(samples_file_name,"r")
+
+
+ if (labels): # Column labels are present in first line
+ # Extract first line with the column labels
+ labels_line = samples_file.readline().rstrip('\n')
+ col_labels = [lbl for lbl in labels_line.split()]
+
+ # Identify the MCMC vars, knowing that the first column is the step
+ # number and the last two columns are acceptance and posterior prob
+ n_cols = len(col_labels)
+ n_vars = n_cols - 3
+
+ v_names = col_labels[1:1+n_vars]
+
+ if (debug > 0):
+ print "Column labels in file", samples_file_name, "are:", col_labels
+ print "MCMC chain variables are", v_names
+ else:
+ # Extract first line to see how many columns we have
+ first_line = samples_file.readline().rstrip('\n')
+ first_line_items = [item for item in first_line.split()]
+
+ # Identify the MCMC vars, knowing that the first column is the step
+ # number and the last two columns are acceptance and posterior prob
+ n_cols = len(first_line_items)
+ n_vars = n_cols - 3
+
+ # Generate variable names as aa, ab, ..., az, ba, bb, ...
+ if (n_vars > 52*26): # only 52 entries in string.letters. If need be, could go higher by allowing aA, aB, ... , Aa, ...
+ print "In routine extract_all_vars: too many columns for automatic name generation"
+ sys.exit(1)
+
+ v_names = []
+ for i_v in range(n_vars):
+ name = ""
+ name += string.letters[i_v/26]
+ name += string.letters[i_v%26]
+ v_names.append(name)
+
+ if (debug > 0):
+ print "There are",n_cols," columns in file", samples_file_name
+ print "MCMC chain variables have been labeled", v_names
+
+ # Rewind file so the first line will be read just like the other sample lines
+ samples_file.seek(0)
+
+ # Read subsequent lines
+ samples_list = read_remaining_lines(samples_file,n_burnin,stride)
+
+ # Close the file
+ samples_file.close()
+
+ # Remove MAP values, if present
+ remove_MAP_line(samples_list,debug)
+
+ # Convert list to array
+ samples = np.array(samples_list)
+
+ n_samples = samples.shape[0]
+
+ if (debug > 0):
+ print "Read in", n_samples, "regular samples of", n_vars, "variables from file", samples_file_name
+
+ return samples, v_names
+
+###################################################################################################
+# def effective_sample_sizes(var_samples,par_mean,par_cov):
+# """Computes the effective sample size for each column
+# by dividing the number of samples by the integral
+# of the autocorrelation between the samples. (i.e. the more
+# correlated successive samples are, the less independent samples there are
+# in the chain.)
+# The algorithm is based on:
+# Markov Chain Monte Carlo in Practice: A Roundtable Discussion
+# Robert E. Kass, Bradley P. Carlin, Andrew Gelman and Radford M. Neal
+# The American Statistician, Vol. 52, No. 2 (May, 1998), pp. 93-100
+# Published by: American Statistical Association
+# Article DOI: 10.2307/2685466
+# Article Stable URL: http://www.jstor.org/stable/2685466
+# """
+#
+# # Number of variable samples in set
+# n_sam = var_samples.shape[0]
+# # Number of variables in this sample set
+# n_vars = var_samples.shape[1]
+#
+# # Array to store effective sample sizes in
+# ess = []
+#
+# # Cut-off point for autocorrelation
+# # Ideally, n_a should be chosen such that the autocorrelation goes to 0 at this lag.
+# # Chosing n_a too low will give inaccurate results (overpredicting ESS), but going
+# # to much higher lag will create a lot of noise in ESS estimate.
+# n_a = min(100,n_sam)
+# for i_v in range(n_vars):
+# # Subtract mean from current variable samples
+# v_nm = var_samples[:,i_v] - par_mean[i_v]
+# # Compute autocorrelation for this variable. np.autocorrelate returns vector with
+# # lag from -n_sam to n_sam, with the 0 shift in the middle. Only retain from lag 0 to n_a.
+# r_v = np.correlate(v_nm, v_nm, mode = 'full')[-n_sam:-n_sam+n_a]
+# # Devide by number of samples in each sum, and normalize by variance
+# # (note: 0 lag has n_sam samples in sum, lag i has (n_sam - i) samples in sum
+# r_a = r_v / (par_cov[i_v,i_v]*(np.arange(n_sam, n_sam-n_a, -1)))
+# # Plot autocorrelation to see if n_a is large enough
+# #pl1,=plt.plot(r_a)
+# #plt.show()
+# # Effective Sample Size (Number of samples devided by integral of autocorrelation)
+# # Integral relies on symmetry and the fact that r_a is 1 at zero lag
+# ess.append(n_sam / (1.0+2.0*np.sum(r_a[1:])))
+#
+# return ess
+
+###################################################################################################
+def plot_all_posteriors(d0,vnames,np_kde,out_file_base,debug,dense=False):
+ """
+ Given a set of samples of random variables, this script plots a lower triangular
+ matrix of marginalized densities. The diagonal contains the density of individual
+ random variables, marginalized over all other variables. Plots below the diagonal
+ contain the 2D density of the associated pair of random variables, marginalized over
+ all other variables.
+ For chains with many variables, the "dense" option can be selected, which plots the
+ triangular set of densities for the full chain with minimum spacing and labels, so that
+ it is less cluttered. In this mode, this function also writes out a set of plots
+ with the same posterior information, but just for two variables at the time,
+ which is easier to read.
+
+ Arguments:
+ d0 : Set of samples, one column per variable (no extra columns)
+ vnames : Variable names
+ np_kde : Number of points to use to compute posterior densities with KDE
+ out_file_base: Base name for output files with plots
+ debug : >0 writes more output to screen (and even more if >1)
+ dense : Set to True if dense output desired [Defaults to False]. The "dense" output
+ format puts all plots in the triangular format up against each other, without
+ any axis labels or space in between them. It is useful when plotting the
+ posteriors of a chain with many variables.
+ """
+ # Some settings to connect with code Cosmin gave me
+ nthin = 1 # take only every nthin state (for faster kde)
+ nskip = 0 # entries to skip
+ istart = 0 # number of column with first MCMC variable
+ cend = 0 # extra columns at end to be removed
+
+ nvars=d0.shape[1]-istart-cend # number of variables we will actually process
+ print 'Number of sample lines in file',d0.shape[0]
+ print 'Number of vars we will process in file',nvars
+
+ # Section 2
+ # set up 2D kde objects
+ print "Setting up 2D KDE objects"
+ kern_i_j=[]
+ for j in range(istart+1,istart+nvars):
+ for i in range(istart,j):
+ if (debug > 2):
+ print i,j
+ kern_i_j.append(stats.kde.gaussian_kde(c_[d0[nskip::nthin,i],d0[nskip::nthin,j]].T))
+
+ # Section 3
+ # set up 2D meshes and evaluate kde objects on those meshes
+ # no. of grid points is controlled with kde_idx, defaults to 100
+ print "Evaluating 2D KDE objects on meshes. This may take a while ..."
+ kde_idx = np_kde*1j # complex number to include end points
+ xmesh=[]; ymesh=[]; zmesh=[];
+ icount=0
+ cov_idx = np.zeros((nvars,nvars),dtype=np.int) # 2D array to keep track of which index in xmesh etc. the
+ # the plots corresponding to vars i,j belong to
+ for j in range(istart+1,istart+nvars):
+ for i in range(istart,j):
+ if (debug > 0):
+ print "Computing 2D marginal distribution between variables:",i,",",j,":",vnames[i]," & ",vnames[j]
+ x,y = mgrid[d0[nskip:,i].min():d0[nskip:,i].max():kde_idx, d0[nskip:,j].min():d0[nskip:,j].max():kde_idx]
+ z = reshape(kern_i_j[icount](c_[x.ravel(), y.ravel()].T).T, x.T.shape)
+ xmesh.append(x);
+ ymesh.append(y);
+ zmesh.append(z);
+ cov_idx[i,j] = icount
+ icount = icount+1
+
+ # Section 4
+ # evaluate 1D pdfs
+ print "Evaluating 1D marginal pdfs with KDE"
+ xlin=[]; pdflin=[];
+ for i in range(istart,istart+nvars):
+ xlin.append(np.linspace(d0[nskip:,i].min(),d0[nskip:,i].max(),np_kde)) ;
+ kernlin=stats.kde.gaussian_kde(d0[nskip::nthin,i]);
+ pdflin.append(kernlin(xlin[i-istart]));
+
+ # Formatting for plots
+ fs1=20 # Font size
+ lw1=2 # Line width
+
+ if (not dense):
+ # Section 5
+ print "Assembling lower-triangular plots in non-dense format"
+
+ # ds is the distance between subplots
+ # xs,ys are the coordinates (normalized) of the subplot in the lower left corner
+ # xe,ye are the distances left in the uppper right corner
+ # fsizex, fsizey are figure sizes
+ # ncont are no of contours for 2D pdfs
+ xs=0.12; ys=0.1; ds=0.04
+ xe=0.08; ye=0.05
+ fsizex=12; fsizey=12;
+ ncont=20;
+ sx=(1-(nvars-1)*ds-xs-xe)/nvars;
+ sy=(1-(nvars-1)*ds-ys-ye)/nvars;
+ majorFormatter = FormatStrFormatter('%6.0e')
+
+ figname=out_file_base+".lowertriangle.pdf" # figure name
+
+ fig = plt.figure(figsize=(fsizex,fsizey))
+
+ # Section 5.1
+ subs=[]
+ # add diagonal plots
+ for i in range(nvars):
+ subs.append(fig.add_axes([xs+i*(sx+ds),ys+(nvars-1-i)*(sy+ds),sx,sy]))
+
+ # add lower triangular plots
+ for i in range(nvars-1):
+ for j in range(i+1):
+ if (debug > 2):
+ print j,(nvars-2-i)
+ subs.append(fig.add_axes([xs+j*(sx+ds),ys+(nvars-2-i)*(sy+ds),sx,sy]))
+
+ subsnp=np.array(subs)
+
+ # Plot 1D pdfs
+ for i in range(nvars):
+ subsnp[i].plot(xlin[i],pdflin[i])
+
+ # Plot 2D pdfs
+ for i in range(nvars*(nvars-1)/2):
+ subsnp[nvars+i].contour(xmesh[i],ymesh[i],zmesh[i],ncont)
+
+ # Section 5.2
+ # just a few ticks and ticklabels
+ for subpl in subsnp:
+ # subpl.set_xticks([])
+ # subpl.set_yticks([])
+ subpl.locator_params(tight=True, nbins=5)
+
+ # for diagonal plots, put no ticks and lables on y-axis
+ # and no grid on the plots
+ for i in range(istart,istart+nvars):
+ # subsnp[i-istart].set_xticks([d0[nskip:,i].min(),d0[nskip:,i].max()]);
+ subsnp[i-istart].set_yticks([])
+ subsnp[i-istart].grid(False)
+
+ # Set y labels on the right for diagonal plots
+ for i in range(nvars):
+ subsnp[i].yaxis.tick_right()
+ subsnp[i].yaxis.set_label_position("right")
+ subsnp[i].set_ylabel(vnames[i], fontsize=fs1)
+ #subsnp[i].set_ylabel(r'$'+vnames[i]+'$', fontsize=fs1)
+
+ plt.savefig(figname)
+ plt.close()
+
+ else:
+ # Section 5
+ # Dense plot format: print full lower-triangular matrix but w/o any white space, tick marks or labels.
+ print "Assembling lower-triangular plots in dense format"
+
+ # ds is the distance between subplots
+ # xs,ys are the coordinates (normalized) of the subplot in the lower left corner
+ # xe,ye are the distances left in the uppper right corner
+ # fsizex, fsizey are figure sizes
+ # ncont are no of contours for 2D pdfs
+ xs=0.12; ys=0.1; ds=0.0
+ xe=0.08; ye=0.05
+ fsizex=12; fsizey=12;
+ ncont=10;
+ sx=(1-(nvars-1)*ds-xs-xe)/nvars;
+ sy=(1-(nvars-1)*ds-ys-ye)/nvars;
+ majorFormatter = FormatStrFormatter('%6.0e')
+
+ figname=out_file_base+".lowertriangle-dense.pdf" # figure name
+
+ fig_d = plt.figure(figsize=(fsizex,fsizey))
+
+ # Section 5.1
+ subs=[]
+ # add diagonal plots
+ for i in range(nvars):
+ subs.append(fig_d.add_axes([xs+i*(sx+ds),ys+(nvars-1-i)*(sy+ds),sx,sy]))
+
+ # add lower triangular plots
+ for i in range(nvars-1):
+ for j in range(i+1):
+ if (debug > 2):
+ print j,(nvars-2-i)
+ subs.append(fig_d.add_axes([xs+j*(sx+ds),ys+(nvars-2-i)*(sy+ds),sx,sy]))
+
+ subsnp=np.array(subs)
+
+ # Plot 1D pdfs along diagonals
+ for i in range(nvars):
+ subsnp[i].plot(xlin[i],pdflin[i])
+
+ # Plot 2D pdfs
+ for i in range(nvars*(nvars-1)/2):
+ subsnp[nvars+i].contour(xmesh[i],ymesh[i],zmesh[i],ncont)
+
+ # Section 5.2
+ # no ticks and ticklabels on most plots
+ for subpl in subsnp:
+ subpl.set_xticks([]);
+ subpl.set_yticks([]);
+
+ # Set variable names as title for diagonal marginal plots
+ for i in range(nvars):
+ subsnp[i].set_title(vnames[i], fontsize=fs1)
+ # subsnp[i].yaxis.set_label_position("right")
+ # # Plot variable names along diagonal on the right. Plot them at angle
+ # # to make them easier to read. Hack to add some white space in front of label
+ # # so that it does not overlap with plot frame.
+ # subsnp[i].set_ylabel(" "+vnames[i], fontsize=fs1, rotation=45)
+ # #subsnp[i].set_ylabel(r'$'+vnames[i]+'$', fontsize=fs1)
+
+
+ plt.savefig(figname)
+ plt.close()
+
+ print "Assembling marginal density plots for all pairs of MCMC variables"
+
+ # ds is the distance between subplots
+ # xs,ys are the coordinates (normalized) of the subplot in the lower left corner
+ # xe,ye are the distances left in the uppper right corner
+ # fsizex, fsizey are figure sizes
+ # ncont are no of contours for 2D pdfs
+ xs=0.12; ys=0.1; ds=0.04
+ xe=0.08; ye=0.05
+ fsizex=12; fsizey=12;
+ ncont=20;
+ nvars_sm=2
+ sx=(1-(nvars_sm-1)*ds-xs-xe)/nvars_sm;
+ sy=(1-(nvars_sm-1)*ds-ys-ye)/nvars_sm;
+ fs1=20
+ majorFormatter = FormatStrFormatter('%6.0e')
+
+
+ # loop over all pairs of MCMC variables.
+ for j in range(istart+1,istart+nvars):
+ for i in range(istart,j):
+
+ print "Plotting densities for variables",vnames[i],"and",vnames[j]
+ figname=out_file_base + "." + vnames[i] + "-" + vnames[j] + ".pdf"
+
+ fig_sm = plt.figure(figsize=(fsizex,fsizey))
+
+ subs=[]
+ # add diagonal plots
+ subs.append(fig_sm.add_axes([xs ,ys+(sy+ds),sx,sy])) # marginal for var i
+ subs.append(fig_sm.add_axes([xs+(sx+ds),ys ,sx,sy])) # marginal for var j
+
+ # add lower triangular plot
+ subs.append(fig_sm.add_axes([xs ,ys ,sx,sy])) # marginal for vars i,j
+
+ subsnp=np.array(subs)
+
+ # Plot 1D pdfs
+ subsnp[0].plot(xlin[i],pdflin[i])
+ subsnp[1].plot(xlin[j],pdflin[j])
+
+ # Plot 2D pdfs
+ i_2D = cov_idx[i,j]
+ subsnp[2].contour(xmesh[i_2D],ymesh[i_2D],zmesh[i_2D],ncont)
+
+ # set just a few ticks and ticklabels
+ for subpl in subsnp:
+ subpl.locator_params(tight=True, nbins=5)
+
+ # no ticks and ticklabels on y axes on diagonals (first two plots in subsnp array)
+ # no grid on diagonal plots
+ for subpl in subsnp[0:2]:
+ subpl.set_yticks([])
+ subpl.grid(False)
+
+ # for diagonal plots only put xmin and xmax
+ #subsnp[0].set_xticks([d0[nskip:,i].min(),d0[nskip:,i].max()]);
+ #subsnp[1].set_xticks([d0[nskip:,j].min(),d0[nskip:,j].max()]);
+
+
+ # Set y labels on the right for diagonal plots
+ #subsnp[0].yaxis.tick_right()
+ subsnp[0].yaxis.set_label_position("right")
+ subsnp[0].set_ylabel(vnames[i], fontsize=fs1)
+
+ #subsnp[1].yaxis.tick_right()
+ subsnp[1].yaxis.set_label_position("right")
+ subsnp[1].set_ylabel(vnames[j], fontsize=fs1)
+
+ # Write out figure
+ plt.savefig(figname)
+ plt.close()
+
+###################################################################################################
+def get_mcmc_stats(all_samples,v_names,out_file_base,debug):
+ """
+ Generate statistics of the passed in MCMC samples.
+ Assumes that the first column of all_samples contains the step number, and the last two
+ columns contain the acceptance probability and the posterior probability for each sampled state.
+
+ Inputs:
+ all_samples : Array with all samples (one sample set per row). Has step number in first
+ column and acceptance probability and posterior in last two columns
+ v_names : Actual variable names
+ out_file_base : Base for output file names
+ debug : Writes out more info if number is larger
+
+ Outputs:
+ Various statistics written to the screen
+ Correlation functions written to pdf files
+ Returns array of map values
+ """
+
+ # Number of variables, columns, samples in the file
+ n_vars = len(v_names)
+ n_cols = all_samples.shape[1]
+ n_sam = all_samples.shape[0]
+
+ # Extract all MCMC chain variables in separate array
+ var_samples = all_samples[:,1:1+n_vars]
+ if (debug > 0):
+ print var_samples.shape
+
+ # Compute mean parameter values
+ par_mean = np.mean(var_samples,axis=0,dtype=np.float64)
+
+ #print "\nParameter mean values:\n"
+ #for i_v in range(n_vars):
+ # print " ", v_names[i_v], ":", par_mean[i_v]
+
+ # Compute the covariance
+ par_cov = np.cov(var_samples,rowvar=0)
+
+ print "\nParameter covariances:\n"
+ print par_cov
+
+ # write out covariance matrix to file
+ cov_file_name = out_file_base + ".covariance.dat"
+ np.savetxt(cov_file_name,par_cov)
+
+ # print the square root of the diagonal entries of the covariance
+ #print "\nParameter standard deviations (proposal width estimates):\n"
+ #for i_v in range(n_vars):
+ # print " ", v_names[i_v], ":", math.sqrt(par_cov[i_v,i_v])
+
+ #
+ # Compute the MAP values
+ # (could also get this from the last line of the MCMC output file
+ # but this line is not always there; and it is more fun
+ # to do it with Python)
+ #
+
+ # Sample index with max posterior prop (last column in MCMC file):
+ i_map = all_samples[:,-1].argmax()
+
+ print "\n",
+ print '%27s' % "Parameter :", '%15s' % "Mean Value", '%15s' % "MAP values", '%15s' % "Std. Dev."
+ for i_v in range(n_vars):
+ print '%25s' % v_names[i_v], ":", '%15.8e' % par_mean[i_v], '%15.8e' % var_samples[i_map,i_v],
+ print '%15.8e' % math.sqrt(par_cov[i_v,i_v])
+
+ # Write mean and MAP to file
+ mean_file_name = out_file_base + ".mean.dat"
+ np.savetxt(mean_file_name,par_mean)
+
+ map_file_name = out_file_base + ".map.dat"
+ np.savetxt(map_file_name,var_samples[i_map,:])
+
+ # Compute mean and standard deviation of acceptance probability
+ print "\nAcceptance Probability:\n"
+
+ # In some cases, the next to last column contains the ratio of posterior
+ # values rather than the acceptance probability. First convert this number
+ # to acceptance probabilities: acc_prob = min(alpha,1)
+ # (This does no harm if the next to last column already contains the actual acceptance probability)
+ acc_prob = np.minimum(all_samples[:,-2],np.ones_like(all_samples[:,-2]))
+ # In some cases, a very large negative number is shown in the column for acceptance
+ # probability to indicate a proposed value was out of bounds. In that case, replace
+ # the value with 0. Again, this does no harm if the next to last column already contains
+ # the actual acceptance probability.
+ acc_prob = np.maximum(acc_prob,np.zeros_like(acc_prob))
+ print "Mean :",acc_prob.mean(),
+ print "Std. Dev.:",acc_prob.std()
+
+ # #
+ # # Compute effective sample size (ESS)
+ # #
+ # print "\nEffective Sample Sizes:\n"
+ #
+ # ess = effective_sample_sizes(var_samples,par_mean,par_cov)
+ #
+ # for i_v in range(n_vars):
+ # print " ",v_names[i_v],":",int(ess[i_v]),"out of",n_sam
+
+ #
+ # Compute autocorrelations and effective sample size (ESS)
+ #
+ print "\nAutocorrelations and Effective Sample Sizes:\n"
+
+ # Number of variable samples in this file
+ n_sam = var_samples.shape[0]
+
+ # Cut-off point for autocorrelation
+ # Ideally, n_a should be chosen such that the autocorrelation goes to 0 at this lag.
+ # Chosing n_a too low will give inaccurate results (overpredicting ESS), but going
+ # to much higher lag will create a lot of noise in ESS estimate.
+ n_a = min(1000,n_sam)
+
+ # Autocorrelation computation
+ auto_corr_vars = compute_group_auto_corr(var_samples,n_a)
+
+ # Plotting and computation of effective sample size
+ for i_v in range(n_vars):
+ # Plot autocorrelation to see if n_a is large enough
+ plot_auto_corr(auto_corr_vars[:,i_v],v_names[i_v])
+ # Effective Sample Size (Number of samples divided by integral of autocorrelation)
+ ESS = compute_effective_sample_size(n_sam,auto_corr_vars[:,i_v])
+ print " ",v_names[i_v],":",ESS,"out of",n_sam," ; skip factor:",n_sam/ESS
+
+ print "\n See plots corr-*.pdf for autocorrelations of chain samples for all variables."
+
+ # The following operations rely on PyMC
+ if have_pymc:
+ #
+ # Compute Raftery-Lewis convergence test
+ #
+ print "\nComputing Raftery-Lewis criteria for all variables\n"
+ quant = 0.025 # Quantile level to be estimated
+ relacc = 0.01 # Error in quantile level (relative to the mean of the parameter)
+ conf = 0.95 # Confidence in the achieved accuray
+ print " Computing # of samples needed to compute quantile",quant
+ print " for an accuracy",relacc*100,"% relative to parameter mean, with confidence",conf*100,"%:\n"
+ print " Variable name: # initial samples to skip, # additional samples to take, thinning factor"
+ for i_v in range(n_vars):
+ output = pymc.raftery_lewis(var_samples[:,i_v], q=quant, r=relacc*par_mean[i_v], s=conf, verbose=0)
+ print " ",'%25s' % v_names[i_v], ":", '%8d' % output[2],",",'%8d' % output[3],",",'%8d' % output[4]
+
+ print "\n"
+
+ quant = 0.5 # Quantile level to be estimated
+ print " Computing # of samples needed to compute quantile",quant
+ print " for an accuracy",relacc*100,"% relative to parameter mean, with confidence",conf*100,"%:\n"
+ print " Variable name: # initial samples to skip, # additional samples to take, thinning factor"
+ for i_v in range(n_vars):
+ output = pymc.raftery_lewis(var_samples[:,i_v], q=quant, r=relacc*par_mean[i_v], s=conf, verbose=0)
+ print " ",'%25s' % v_names[i_v], ":", '%8d' % output[2],",",'%8d' % output[3],",",'%8d' % output[4]
+
+ #
+ # Geweke test
+ #
+ print "\nComputing Geweke test for all variables\n"
+ print "Geweke Test temporarily disabled. Needs to be debugged."
+ # for i_v in range(n_vars):
+ # var_scores = pymc.geweke(var_samples[:,i_v], intervals=20)
+ # pymc.Matplot.geweke_plot(var_scores, v_names[i_v])
+ # print " See plots *-diagnostic.png"
+
+ #
+ # Autocorrelations (done above already)
+ #
+ # print "\nComputing autocorrelations for all variables\n"
+ # for i_v in range(n_vars):
+ # pymc.Matplot.autocorrelation(var_samples[:,i_v], v_names[i_v])
+ # print " See plots *-acf.png"
+
+ return var_samples[i_map,:]
+
+###################################################################################################
+
+help_string = """
+Usage:
+ postproc.py [-h] -i [--nb ] [-s ] [--nolabels]
+What:
+ Compute elementary statistics of MCMC chain
+Where
+ -h = print help info
+ -i = name of file containing MCMC data
+ -s = stride with which to read the file [defaults to 1]
+ --nb = number of burn-in samples to be removed from the chain [defaults to 0]
+ --nolabels Indicates that the MCMC data file does not contain column labels (in which case they are generated)
+Assumes the following:
+ * The file is in ASCII format
+ * The first column contains the MCMC step number
+ * The next to last column contains the acceptance probability for the jump proposed in this step
+ * The last column contains the posterior probability of the state in this step
+ * The columns in between contain the sampled states
+ * Unless the argument labels == False, the first line contains labels for each column
+"""
+
+if __name__ == "__main__":
+ #
+ # Process inputs
+ #
+ try:
+ opts,v_names = getopt.getopt(sys.argv[1:],"hi:s:",["nb=","nolabels"])
+ except getopt.GetoptError, err:
+ print str(err)
+ print help_string
+ sys.exit(1)
+
+ # Default values
+ samples_file_name=""
+ n_burnin = 0
+ stride = 1
+ labels_present = True
+
+ for o,a in opts:
+ if o == "-h":
+ print help_string
+ sys.exit(0)
+ elif o == "-i":
+ samples_file_name = a
+ elif o == "-s":
+ stride = int(a)
+ elif o == "--nb":
+ n_burnin = int(a)
+ elif o == "--nolabels":
+ labels_present = False
+ else:
+ assert False, "Unhandled command line parsing option. Use -h flag to get usage info."
+
+ # error checking
+ if(samples_file_name==""):
+ print "Sample file name must be specified"
+ print help_string
+ sys.exit(1)
+
+ if (n_burnin < 0):
+ print "The number of burn-in samples needs to be >= 0"
+ print help_string
+ sys.exit(1)
+
+ if (stride < 0):
+ print "The file read stride needs to be >= 0"
+ print help_string
+ sys.exit(1)
+
+ # Base name of file for outputting results
+ out_file_base = samples_file_name + ".nb" + str(n_burnin) + ".s" + str(stride)
+
+ # Set to 1 to get more output to screen
+ # Set to > 1 to get a lot of output to screen
+ debug = 1
+
+ # Set to 1 for showing plots interactively
+ interact = 0
+
+ #
+ # Import variables of interest from the MCMC data file
+ #
+ all_samples, v_names = extract_all_vars(samples_file_name,n_burnin,debug,stride,labels=labels_present)
+
+ # Get statistics
+ get_mcmc_stats(all_samples,v_names,out_file_base,debug)
diff --git a/PyUQTk/mcmc/CMakeLists.txt b/PyUQTk/mcmc/CMakeLists.txt
new file mode 100644
index 00000000..a4c7ce72
--- /dev/null
+++ b/PyUQTk/mcmc/CMakeLists.txt
@@ -0,0 +1,67 @@
+FIND_PACKAGE(SWIG REQUIRED)
+INCLUDE(${SWIG_USE_FILE})
+
+FIND_PACKAGE(PythonLibs)
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH}/../../Extras/lib/python/numpy/core/include)
+
+#include source files
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/) # array classes, array input output, and array tools
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/include/) # utilities like error handlers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/) # tools like multindex, etc.
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/quad/) # quad class
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/kle/) # kle class
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/pce/) # PCSet and PCBasis classes
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/bcs/) # bcs
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/mcmc/) # mcmc
+
+# include dependencies
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/blas/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/lapack/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/lbfgs/) # blas library
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/dsfmt/) # dsfmt
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/figtree/) # figtree
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/slatec/) # slatec headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/cvode-2.7.0/include) # cvode
+INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/dep/cvode-2.7.0/include)
+INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/dep/cvode-2.7.0/include/nvector)
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../numpy/) # numpy headers
+
+SET(CMAKE_SWIG_FLAGS "")
+SET_SOURCE_FILES_PROPERTIES(mcmc.i PROPERTIES CPLUSPLUS ON)
+
+# compile swig with cpp extensions
+SWIG_ADD_MODULE(
+ mcmc python mcmc.i
+ # array tools needed to compile misc tools source files
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arrayio.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arraytools.cpp
+
+ # source code for quad and kle class
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/quad/quad.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/kle/kle.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/mcmc/mcmc.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/pce/PCSet.cpp
+
+ # source code for tools
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/combin.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/gq.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/minmax.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/multiindex.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/pcmaps.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/probability.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/rosenblatt.cpp
+)
+
+# link python and 3rd party libraries, e.g., gfortran and blas
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ SWIG_LINK_LIBRARIES(mcmc deplbfgs uqtkbcs uqtkpce uqtktools uqtkquad uqtkarray depnvec deplapack depblas depslatec depdsfmt depann depfigtree depcvode gfortran ${PYTHON_LIBRARIES})
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel
+ SWIG_LINK_LIBRARIES(mcmc deplbfgs uqtkbcs uqtkpce uqtktools uqtkquad uqtkarray depnvec deplapack depblas depslatec depdsfmt depann depfigtree depcvode ifcore ifport ${PYTHON_LIBRARIES})
+endif()
+
+INSTALL(TARGETS _mcmc DESTINATION PyUQTk/)
+INSTALL(FILES ${CMAKE_BINARY_DIR}/${outdir}PyUQTk/mcmc/mcmc.py DESTINATION PyUQTk)
diff --git a/PyUQTk/mcmc/mcmc.i b/PyUQTk/mcmc/mcmc.i
new file mode 100644
index 00000000..4a886c94
--- /dev/null
+++ b/PyUQTk/mcmc/mcmc.i
@@ -0,0 +1,143 @@
+%module(directors="1") mcmc
+//=====================================================================================
+// The UQ Toolkit (UQTk) version 3.0.4
+// Copyright (2017) Sandia Corporation
+// http://www.sandia.gov/UQToolkit/
+//
+// Copyright (2013) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+// with Sandia Corporation, the U.S. Government retains certain rights in this software.
+//
+// This file is part of The UQ Toolkit (UQTk)
+//
+// UQTk is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// UQTk is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with UQTk. If not, see .
+//
+// Questions? Contact Bert Debusschere
+// Sandia National Laboratories, Livermore, CA, USA
+//=====================================================================================
+
+%feature("autodoc", "3");
+%rename(Assign) *::operator=;
+%ignore *::operator[];
+
+%{
+#define SWIG_FILE_WITH_INIT
+#include
+#include
+#include
+#include
+#include
+#include "../../cpp/lib/array/Array1D.h"
+#include "../../cpp/lib/array/Array2D.h"
+// #include "../../cpp/lib/array/arrayio.h"
+// #include "../../cpp/lib/array/arraytools.h"
+// #include "../../cpp/lib/tools/combin.h"
+// #include "../../cpp/lib/tools/gq.h"
+// #include "../../cpp/lib/tools/minmax.h"
+// #include "../../cpp/lib/tools/multiindex.h"
+// #include "../../cpp/lib/tools/pcmaps.h"
+// #include "../../cpp/lib/tools/probability.h"
+// #include "../../cpp/lib/tools/rosenblatt.h"
+
+// #include "../../cpp/lib/quad/quad.h"
+// #include "../../cpp/lib/kle/kle.h"
+// #include "../../cpp/lib/pce/PCBasis.h"
+// #include "../../cpp/lib/pce/PCSet.h"
+#include "../../cpp/lib/mcmc/mcmc.h"
+
+%}
+
+%feature("director") LikelihoodBase;
+/*************************************************************
+// Standard SWIG Templates
+*************************************************************/
+
+// Include standard SWIG templates
+// Numpy array templates and wrapping
+%include "pyabc.i"
+%include "../numpy/numpy.i"
+%include "std_vector.i"
+%include "std_string.i"
+%include "cpointer.i"
+
+%init %{
+ import_array();
+%}
+
+%pointer_functions(double, doublep);
+
+/*************************************************************
+// Numpy SWIG Interface files
+*************************************************************/
+
+// // Basic typemap for an Arrays and its length.
+// // Must come before %include statement below
+
+// // For Array1D setnumpyarray4py function
+// %apply (long* IN_ARRAY1, int DIM1) {(long* inarray, int n)}
+// %apply (double* IN_ARRAY1, int DIM1) {(double* inarray, int n)}
+// // get numpy int and double array
+// %apply (long* INPLACE_ARRAY1, int DIM1) {(long* outarray, int n)}
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* outarray, int n)}
+
+// // For Array2D numpysetarray4py function
+// %apply (double* IN_FARRAY2, int DIM1, int DIM2) {(double* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (double* INPLACE_FARRAY2, int DIM1, int DIM2) {(double* outarray, int n1, int n2)}
+// // For Array2D numpysetarray4py function
+// %apply (long* IN_FARRAY2, int DIM1, int DIM2) {(long* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (long* INPLACE_FARRAY2, int DIM1, int DIM2) {(long* outarray, int n1, int n2)}
+
+
+// // For mcmc test to get log probabilities
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* l, int n)}
+
+/*************************************************************
+// Include header files
+*************************************************************/
+
+// // The above typemap is applied to header files below
+%include "../../cpp/lib/array/Array1D.h"
+%include "../../cpp/lib/array/Array2D.h"
+// %include "../../cpp/lib/array/arrayio.h"
+// %include "../../cpp/lib/array/arraytools.h"
+// %include "../../cpp/lib/tools/combin.h"
+// %include "../../cpp/lib/tools/gq.h"
+// %include "../../cpp/lib/tools/minmax.h"
+// %include "../../cpp/lib/tools/multiindex.h"
+// %include "../../cpp/lib/tools/pcmaps.h"
+// %include "../../cpp/lib/tools/probability.h"
+// %include "../../cpp/lib/tools/rosenblatt.h"
+
+// %include "../../cpp/lib/quad/quad.h"
+// %include "../../cpp/lib/kle/kle.h"
+// %include "../../cpp/lib/pce/PCBasis.h"
+// %include "../../cpp/lib/pce/PCSet.h"
+%include "../../cpp/lib/mcmc/mcmc.h"
+
+// // Typemaps for standard vector
+// // Needed to prevent to memory leak due to lack of destructor
+// // must use namespace std
+// namespace std{
+// %template(dblVector) vector;
+// %template(intVector) vector;
+// %template(strVector) vector;
+
+// }
+
+
+// %include "swigi/arrayext.i"
+
+
+
diff --git a/PyUQTk/multirun/CMakeLists.txt b/PyUQTk/multirun/CMakeLists.txt
new file mode 100644
index 00000000..717a4fa8
--- /dev/null
+++ b/PyUQTk/multirun/CMakeLists.txt
@@ -0,0 +1,12 @@
+project (UQTk)
+
+SET(copy_FILES
+ __init__.py
+ multirun.py
+ srun.x
+ )
+
+INSTALL(FILES ${copy_FILES}
+ PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
+ DESTINATION PyUQTk/multirun
+)
diff --git a/PyUQTk/multirun/__init__.py b/PyUQTk/multirun/__init__.py
new file mode 100755
index 00000000..924173a4
--- /dev/null
+++ b/PyUQTk/multirun/__init__.py
@@ -0,0 +1,27 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+import multirun
diff --git a/PyUQTk/multirun/multirun.py b/PyUQTk/multirun/multirun.py
new file mode 100755
index 00000000..1713b43c
--- /dev/null
+++ b/PyUQTk/multirun/multirun.py
@@ -0,0 +1,221 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+# A quick hack to submit jobs in a multi-node SMP runtime environment.
+# Written in March 2006 by Helgi Adalsteinsson and modified for our
+# purpose in April 2006 by Bert Debusschere.
+# Modified further by Khachik Sargsyan 2008-15
+
+import os
+import thread
+import time
+import shutil
+import sys
+import string
+import getopt
+
+
+
+
+# Get an array containing names for all of the CPUs we plan on using. We assume
+# that there are a given number of cpus available for us in the SMP machine. We
+# give them fictitional names cpu0, cpu1, ..., cpun. The number of cpus that is
+# specified determines the number of parallel threads we can have going.
+def avail_cpus(ncpus = 3):
+ """The optional argument specified how many cpus we plan on using."""
+ cpus = []
+ for icpu in range(ncpus):
+ cpus.append('cpu' + str(icpu))
+ return cpus
+
+# Use fork/exec to run the given command with arguments,
+# returns control when the shell command completes
+def run_command(cmd, *args):
+ """Given a command and its argument list, use fork/execvp to run the
+ command. Note that this is a direct wrapper for the C library function,
+ so the first argument in args should (by convention) be the command name"""
+ if len(args) == 0:
+ # execvp does not permit an empty tuple as an argument list
+ args = ("",)
+ # Fork off
+ pid = os.fork()
+ if pid < 0:
+ raise RuntimeError("run_command: Failed to fork")
+ elif pid == 0:
+ # I am the child
+ os.execvp(cmd, args)
+ else:
+ # I am the parent
+ (retid, status) = os.wait()
+ return status
+
+# Use fork/exec to run the given command with arguments in specified directory
+# returns control when the shell command completes
+def run_command_in_dir(dir, cmd, *args):
+ """Given a command and its argument list, use fork/execvp to run the
+ command. Note that this is a direct wrapper for the C library function,
+ so the first argument in args should (by convention) be the command name"""
+ if len(args) == 0:
+ # execvp does not permit an empty tuple as an argument list
+ args = ("",)
+ # Fork off
+ pid = os.fork()
+ if pid < 0:
+ raise RuntimeError("run_command: Failed to fork")
+ elif pid == 0:
+ # I am the child
+ os.chdir(dir)
+ os.execvp(cmd, args)
+ else:
+ # I am the parent
+ (retid, status) = os.wait()
+ return status
+
+# This is what each thread does.
+def thread_command(running, cpu, lock, tasks):
+ """A task entity for each of the running threads. All arguments are
+ passed by reference (in Python, objects are passed by reference while
+ literals are passed by value)."""
+ while True:
+ lock.acquire_lock()
+ if len(tasks) > 0:
+ my_task = tasks.pop()
+ lock.release_lock()
+ #print "Running",my_task,"on cpu",cpu
+ else:
+ lock.release_lock()
+ #print "Task queue on cpu",cpu,"is done"
+ break
+ # This does not work...
+ # Need to first change to the directory where the task script is
+ # and then run the script without the path in it...
+ #run_command(my_task,my_task)
+ dir = my_task[0]
+ script = my_task[1]
+ #print cpu, dir, script
+ starttime = time.time()
+ run_command_in_dir(dir,script,script)
+ stoptime = time.time()
+ print "CPU",cpu,"finished task in",dir,"in",(stoptime-starttime),"seconds"
+ # print "=============================================================================="
+ lock.acquire_lock()
+ running[0] -= 1
+ lock.release_lock()
+
+
+
+
+#######################################################################
+def get_tasks(list_args):
+
+ param_file='args.in'
+ script='./srun.x'
+ tasks = []
+
+ #os.system('rm -rf task_*')
+
+ #f = open(param_file,'r')
+
+ for it in range(len(list_args)):
+
+ dir = 'task_' + str(it+1) #str(list_args[it])
+ if not os.path.exists(dir):
+ print "Creating directory ", dir
+ os.mkdir(dir,0755)
+
+
+
+ fo = open(dir+os.sep+param_file,'w')
+ #print >>fo," ".join(f.readline().split(' '))
+ print >>fo,list_args[it]
+ fo.close()
+
+
+ shutil.copy(os.path.dirname(os.path.realpath(__file__))+os.sep+script,dir)
+
+
+ os.popen("chmod +x " + dir + os.sep + script)
+ tasks.append( [dir,script] )
+
+#f.close()
+
+ tasks.reverse() # to convert 'pop()' into 'shift()'
+
+ return tasks;
+
+
+
+# Main routine.
+def main(args):
+
+ # First argument is a file of tasks (each row is a command line task)
+ tasks_file=args[0]
+ # Second argument is number of CPUs requested
+ ncpus=int(args[1])
+
+ # Informational print
+ print "Running tasks in file", tasks_file,"on", ncpus,"CPUs"
+
+ # Turn the rows in the file into a list
+ list_args=open(tasks_file).read().splitlines()
+
+ # Get the tasks
+ tasks = get_tasks(list_args)
+
+ # Serial mode
+ if (ncpus==1):
+ for it in range(len(list_args)):
+ dir = 'task_' + str(it+1)
+ os.chdir(dir)
+ os.system('./srun.x')
+ os.chdir('../')
+ # Parallel mode
+ else:
+ running = [0] # only arrays and lists are passed by reference
+ cpus = avail_cpus(ncpus) # can give optional argument with number of cpus available in the system
+ lock = thread.allocate_lock()
+ # Make the same number of threads as there are cpus.
+ for cpu in cpus:
+ lock.acquire_lock()
+ running[0] += 1
+ lock.release_lock()
+ thread.start_new_thread(thread_command, (running, cpu, lock, tasks))
+
+ # Wait for threads to finish (I don't think there is a wait_threads)
+ while running[0] > 0:
+ time.sleep(2) # sleep for 10 seconds before checking if the threads have finished.
+ # to avoid spending too much cpu time waiting
+
+ # All done.
+ print "All threads have exited"
+
+
+# Safeguard against import
+if __name__ == "__main__":
+ main(sys.argv[1:])
+
diff --git a/PyUQTk/multirun/srun.x b/PyUQTk/multirun/srun.x
new file mode 100755
index 00000000..6f657f8e
--- /dev/null
+++ b/PyUQTk/multirun/srun.x
@@ -0,0 +1,25 @@
+#!/bin/bash
+#=====================================================================================
+
+# This script is run automatically via multirun.py
+
+
+# Get the script name (first entry of args.in)
+SCRIPT=`cut -f 1 -d" " args.in`
+# Get the output file name to dump the screen-output (second entry of args.in)
+OUT=`cut -f 2 -d" " args.in`
+# The rest of entries in args.in are parameters of the script
+ARGUM=`cut -f 3- -d" " args.in`
+
+# Informational print
+THIS=`basename $PWD`
+echo "Running $SCRIPT $ARGUM > $OUT in $THIS"
+
+##echo $(< args.in)
+
+# Running the script
+cd ..
+SCRIPT_ABS=`echo "$(cd "$(dirname "$SCRIPT")"; pwd)/$(basename "$SCRIPT")"`
+cd -
+ln -sf $SCRIPT_ABS linkToScript
+./linkToScript $ARGUM > $OUT
diff --git a/PyUQTk/numpy.cmake b/PyUQTk/numpy.cmake
new file mode 100644
index 00000000..46f34a65
--- /dev/null
+++ b/PyUQTk/numpy.cmake
@@ -0,0 +1,27 @@
+IF (NUMPY_INCLUDE_DIR)
+ SET(NUMPY_FIND_QUIETLY TRUE)
+endif (NUMPY_INCLUDE_DIR)
+
+# To set the variables PYTHON_EXECUTABLE
+FIND_PACKAGE(PythonInterp QUIET REQUIRED)
+FIND_PACKAGE(PythonLibs QUIET REQUIRED)
+
+# Look for the include path
+# WARNING: The variable PYTHON_EXECUTABLE is defined by the script FindPythonInterp.cmake
+EXECUTE_PROCESS(COMMAND "${PYTHON_EXECUTABLE}" -c "import numpy; print (numpy.get_include()); print (numpy.version.version)"
+ OUTPUT_VARIABLE NUMPY_OUTPUT
+ ERROR_VARIABLE NUMPY_ERROR)
+
+IF(NOT NUMPY_ERROR)
+ STRING(REPLACE "\n" ";" NUMPY_OUTPUT ${NUMPY_OUTPUT})
+ LIST(GET NUMPY_OUTPUT 0 NUMPY_INCLUDE_DIR)
+ LIST(GET NUMPY_OUTPUT 1 NUMPY_VERSION)
+ENDIF(NOT NUMPY_ERROR)
+
+INCLUDE(FindPackageHandleStandardArgs)
+
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(NumPy DEFAULT_MSG NUMPY_VERSION NUMPY_INCLUDE_DIR)
+
+MARK_AS_ADVANCED(NUMPY_INCLUDE_DIR)
+
+INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR})
diff --git a/PyUQTk/numpy/numpy.i b/PyUQTk/numpy/numpy.i
new file mode 100644
index 00000000..18162505
--- /dev/null
+++ b/PyUQTk/numpy/numpy.i
@@ -0,0 +1,3083 @@
+/* -*- C -*- (not really, but good for syntax highlighting) */
+#ifdef SWIGPYTHON
+
+%{
+#ifndef SWIG_FILE_WITH_INIT
+#define NO_IMPORT_ARRAY
+#endif
+#include "stdio.h"
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#include
+%}
+
+/**********************************************************************/
+
+%fragment("NumPy_Backward_Compatibility", "header")
+{
+%#if NPY_API_VERSION < 0x00000007
+%#define NPY_ARRAY_DEFAULT NPY_DEFAULT
+%#define NPY_ARRAY_FARRAY NPY_FARRAY
+%#define NPY_FORTRANORDER NPY_FORTRAN
+%#endif
+}
+
+/**********************************************************************/
+
+/* The following code originally appeared in
+ * enthought/kiva/agg/src/numeric.i written by Eric Jones. It was
+ * translated from C++ to C by John Hunter. Bill Spotz has modified
+ * it to fix some minor bugs, upgrade from Numeric to numpy (all
+ * versions), add some comments and functionality, and convert from
+ * direct code insertion to SWIG fragments.
+ */
+
+%fragment("NumPy_Macros", "header")
+{
+/* Macros to extract array attributes.
+ */
+%#if NPY_API_VERSION < 0x00000007
+%#define is_array(a) ((a) && PyArray_Check((PyArrayObject*)a))
+%#define array_type(a) (int)(PyArray_TYPE((PyArrayObject*)a))
+%#define array_numdims(a) (((PyArrayObject*)a)->nd)
+%#define array_dimensions(a) (((PyArrayObject*)a)->dimensions)
+%#define array_size(a,i) (((PyArrayObject*)a)->dimensions[i])
+%#define array_strides(a) (((PyArrayObject*)a)->strides)
+%#define array_stride(a,i) (((PyArrayObject*)a)->strides[i])
+%#define array_data(a) (((PyArrayObject*)a)->data)
+%#define array_descr(a) (((PyArrayObject*)a)->descr)
+%#define array_flags(a) (((PyArrayObject*)a)->flags)
+%#define array_enableflags(a,f) (((PyArrayObject*)a)->flags) = f
+%#else
+%#define is_array(a) ((a) && PyArray_Check(a))
+%#define array_type(a) PyArray_TYPE((PyArrayObject*)a)
+%#define array_numdims(a) PyArray_NDIM((PyArrayObject*)a)
+%#define array_dimensions(a) PyArray_DIMS((PyArrayObject*)a)
+%#define array_strides(a) PyArray_STRIDES((PyArrayObject*)a)
+%#define array_stride(a,i) PyArray_STRIDE((PyArrayObject*)a,i)
+%#define array_size(a,i) PyArray_DIM((PyArrayObject*)a,i)
+%#define array_data(a) PyArray_DATA((PyArrayObject*)a)
+%#define array_descr(a) PyArray_DESCR((PyArrayObject*)a)
+%#define array_flags(a) PyArray_FLAGS((PyArrayObject*)a)
+%#define array_enableflags(a,f) PyArray_ENABLEFLAGS((PyArrayObject*)a,f)
+%#endif
+%#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS((PyArrayObject*)a))
+%#define array_is_native(a) (PyArray_ISNOTSWAPPED((PyArrayObject*)a))
+%#define array_is_fortran(a) (PyArray_ISFORTRAN((PyArrayObject*)a))
+}
+
+/**********************************************************************/
+
+%fragment("NumPy_Utilities",
+ "header")
+{
+ /* Given a PyObject, return a string describing its type.
+ */
+ const char* pytype_string(PyObject* py_obj)
+ {
+ if (py_obj == NULL ) return "C NULL value";
+ if (py_obj == Py_None ) return "Python None" ;
+ if (PyCallable_Check(py_obj)) return "callable" ;
+ if (PyString_Check( py_obj)) return "string" ;
+ if (PyInt_Check( py_obj)) return "int" ;
+ if (PyFloat_Check( py_obj)) return "float" ;
+ if (PyDict_Check( py_obj)) return "dict" ;
+ if (PyList_Check( py_obj)) return "list" ;
+ if (PyTuple_Check( py_obj)) return "tuple" ;
+%#if PY_MAJOR_VERSION < 3
+ if (PyFile_Check( py_obj)) return "file" ;
+ if (PyModule_Check( py_obj)) return "module" ;
+ if (PyInstance_Check(py_obj)) return "instance" ;
+%#endif
+
+ return "unkown type";
+ }
+
+ /* Given a NumPy typecode, return a string describing the type.
+ */
+ const char* typecode_string(int typecode)
+ {
+ static const char* type_names[25] = {"bool",
+ "byte",
+ "unsigned byte",
+ "short",
+ "unsigned short",
+ "int",
+ "unsigned int",
+ "long",
+ "unsigned long",
+ "long long",
+ "unsigned long long",
+ "float",
+ "double",
+ "long double",
+ "complex float",
+ "complex double",
+ "complex long double",
+ "object",
+ "string",
+ "unicode",
+ "void",
+ "ntypes",
+ "notype",
+ "char",
+ "unknown"};
+ return typecode < 24 ? type_names[typecode] : type_names[24];
+ }
+
+ /* Make sure input has correct numpy type. This now just calls
+ PyArray_EquivTypenums().
+ */
+ int type_match(int actual_type,
+ int desired_type)
+ {
+ return PyArray_EquivTypenums(actual_type, desired_type);
+ }
+
+%#ifdef SWIGPY_USE_CAPSULE
+ void free_cap(PyObject * cap)
+ {
+ void* array = (void*) PyCapsule_GetPointer(cap,SWIGPY_CAPSULE_NAME);
+ if (array != NULL) free(array);
+ }
+%#endif
+
+
+}
+
+/**********************************************************************/
+
+%fragment("NumPy_Object_to_Array",
+ "header",
+ fragment="NumPy_Backward_Compatibility",
+ fragment="NumPy_Macros",
+ fragment="NumPy_Utilities")
+{
+ /* Given a PyObject pointer, cast it to a PyArrayObject pointer if
+ * legal. If not, set the python error string appropriately and
+ * return NULL.
+ */
+ PyArrayObject* obj_to_array_no_conversion(PyObject* input,
+ int typecode)
+ {
+ PyArrayObject* ary = NULL;
+ if (is_array(input) && (typecode == NPY_NOTYPE ||
+ PyArray_EquivTypenums(array_type(input), typecode)))
+ {
+ ary = (PyArrayObject*) input;
+ }
+ else if is_array(input)
+ {
+ const char* desired_type = typecode_string(typecode);
+ const char* actual_type = typecode_string(array_type(input));
+ PyErr_Format(PyExc_TypeError,
+ "Array of type '%s' required. Array of type '%s' given",
+ desired_type, actual_type);
+ ary = NULL;
+ }
+ else
+ {
+ const char* desired_type = typecode_string(typecode);
+ const char* actual_type = pytype_string(input);
+ PyErr_Format(PyExc_TypeError,
+ "Array of type '%s' required. A '%s' was given",
+ desired_type,
+ actual_type);
+ ary = NULL;
+ }
+ return ary;
+ }
+
+ /* Convert the given PyObject to a NumPy array with the given
+ * typecode. On success, return a valid PyArrayObject* with the
+ * correct type. On failure, the python error string will be set and
+ * the routine returns NULL.
+ */
+ PyArrayObject* obj_to_array_allow_conversion(PyObject* input,
+ int typecode,
+ int* is_new_object)
+ {
+ PyArrayObject* ary = NULL;
+ PyObject* py_obj;
+ if (is_array(input) && (typecode == NPY_NOTYPE ||
+ PyArray_EquivTypenums(array_type(input),typecode)))
+ {
+ ary = (PyArrayObject*) input;
+ *is_new_object = 0;
+ }
+ else
+ {
+ py_obj = PyArray_FROMANY(input, typecode, 0, 0, NPY_ARRAY_DEFAULT);
+ /* If NULL, PyArray_FromObject will have set python error value.*/
+ ary = (PyArrayObject*) py_obj;
+ *is_new_object = 1;
+ }
+ return ary;
+ }
+
+ /* Given a PyArrayObject, check to see if it is contiguous. If so,
+ * return the input pointer and flag it as not a new object. If it is
+ * not contiguous, create a new PyArrayObject using the original data,
+ * flag it as a new object and return the pointer.
+ */
+ PyArrayObject* make_contiguous(PyArrayObject* ary,
+ int* is_new_object,
+ int min_dims,
+ int max_dims)
+ {
+ PyArrayObject* result;
+ if (array_is_contiguous(ary))
+ {
+ result = ary;
+ *is_new_object = 0;
+ }
+ else
+ {
+ result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary,
+ array_type(ary),
+ min_dims,
+ max_dims);
+ *is_new_object = 1;
+ }
+ return result;
+ }
+
+ /* Given a PyArrayObject, check to see if it is Fortran-contiguous.
+ * If so, return the input pointer, but do not flag it as not a new
+ * object. If it is not Fortran-contiguous, create a new
+ * PyArrayObject using the original data, flag it as a new object
+ * and return the pointer.
+ */
+ PyArrayObject* make_fortran(PyArrayObject* ary,
+ int* is_new_object)
+ {
+ PyArrayObject* result;
+ if (array_is_fortran(ary))
+ {
+ result = ary;
+ *is_new_object = 0;
+ }
+ else
+ {
+ Py_INCREF(array_descr(ary));
+ result = (PyArrayObject*) PyArray_FromArray(ary,
+ array_descr(ary),
+ NPY_FORTRANORDER);
+ *is_new_object = 1;
+ }
+ return result;
+ }
+
+ /* Convert a given PyObject to a contiguous PyArrayObject of the
+ * specified type. If the input object is not a contiguous
+ * PyArrayObject, a new one will be created and the new object flag
+ * will be set.
+ */
+ PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input,
+ int typecode,
+ int* is_new_object)
+ {
+ int is_new1 = 0;
+ int is_new2 = 0;
+ PyArrayObject* ary2;
+ PyArrayObject* ary1 = obj_to_array_allow_conversion(input,
+ typecode,
+ &is_new1);
+ if (ary1)
+ {
+ ary2 = make_contiguous(ary1, &is_new2, 0, 0);
+ if ( is_new1 && is_new2)
+ {
+ Py_DECREF(ary1);
+ }
+ ary1 = ary2;
+ }
+ *is_new_object = is_new1 || is_new2;
+ return ary1;
+ }
+
+ /* Convert a given PyObject to a Fortran-ordered PyArrayObject of the
+ * specified type. If the input object is not a Fortran-ordered
+ * PyArrayObject, a new one will be created and the new object flag
+ * will be set.
+ */
+ PyArrayObject* obj_to_array_fortran_allow_conversion(PyObject* input,
+ int typecode,
+ int* is_new_object)
+ {
+ int is_new1 = 0;
+ int is_new2 = 0;
+ PyArrayObject* ary2;
+ PyArrayObject* ary1 = obj_to_array_allow_conversion(input,
+ typecode,
+ &is_new1);
+ if (ary1)
+ {
+ ary2 = make_fortran(ary1, &is_new2);
+ if (is_new1 && is_new2)
+ {
+ Py_DECREF(ary1);
+ }
+ ary1 = ary2;
+ }
+ *is_new_object = is_new1 || is_new2;
+ return ary1;
+ }
+} /* end fragment */
+
+/**********************************************************************/
+
+%fragment("NumPy_Array_Requirements",
+ "header",
+ fragment="NumPy_Backward_Compatibility",
+ fragment="NumPy_Macros")
+{
+ /* Test whether a python object is contiguous. If array is
+ * contiguous, return 1. Otherwise, set the python error string and
+ * return 0.
+ */
+ int require_contiguous(PyArrayObject* ary)
+ {
+ int contiguous = 1;
+ if (!array_is_contiguous(ary))
+ {
+ PyErr_SetString(PyExc_TypeError,
+ "Array must be contiguous. A non-contiguous array was given");
+ contiguous = 0;
+ }
+ return contiguous;
+ }
+
+ /* Require that a numpy array is not byte-swapped. If the array is
+ * not byte-swapped, return 1. Otherwise, set the python error string
+ * and return 0.
+ */
+ int require_native(PyArrayObject* ary)
+ {
+ int native = 1;
+ if (!array_is_native(ary))
+ {
+ PyErr_SetString(PyExc_TypeError,
+ "Array must have native byteorder. "
+ "A byte-swapped array was given");
+ native = 0;
+ }
+ return native;
+ }
+
+ /* Require the given PyArrayObject to have a specified number of
+ * dimensions. If the array has the specified number of dimensions,
+ * return 1. Otherwise, set the python error string and return 0.
+ */
+ int require_dimensions(PyArrayObject* ary,
+ int exact_dimensions)
+ {
+ int success = 1;
+ if (array_numdims(ary) != exact_dimensions)
+ {
+ PyErr_Format(PyExc_TypeError,
+ "Array must have %d dimensions. Given array has %d dimensions",
+ exact_dimensions,
+ array_numdims(ary));
+ success = 0;
+ }
+ return success;
+ }
+
+ /* Require the given PyArrayObject to have one of a list of specified
+ * number of dimensions. If the array has one of the specified number
+ * of dimensions, return 1. Otherwise, set the python error string
+ * and return 0.
+ */
+ int require_dimensions_n(PyArrayObject* ary,
+ int* exact_dimensions,
+ int n)
+ {
+ int success = 0;
+ int i;
+ char dims_str[255] = "";
+ char s[255];
+ for (i = 0; i < n && !success; i++)
+ {
+ if (array_numdims(ary) == exact_dimensions[i])
+ {
+ success = 1;
+ }
+ }
+ if (!success)
+ {
+ for (i = 0; i < n-1; i++)
+ {
+ sprintf(s, "%d, ", exact_dimensions[i]);
+ strcat(dims_str,s);
+ }
+ sprintf(s, " or %d", exact_dimensions[n-1]);
+ strcat(dims_str,s);
+ PyErr_Format(PyExc_TypeError,
+ "Array must have %s dimensions. Given array has %d dimensions",
+ dims_str,
+ array_numdims(ary));
+ }
+ return success;
+ }
+
+ /* Require the given PyArrayObject to have a specified shape. If the
+ * array has the specified shape, return 1. Otherwise, set the python
+ * error string and return 0.
+ */
+ int require_size(PyArrayObject* ary,
+ npy_intp* size,
+ int n)
+ {
+ int i;
+ int success = 1;
+ int len;
+ char desired_dims[255] = "[";
+ char s[255];
+ char actual_dims[255] = "[";
+ for(i=0; i < n;i++)
+ {
+ if (size[i] != -1 && size[i] != array_size(ary,i))
+ {
+ success = 0;
+ }
+ }
+ if (!success)
+ {
+ for (i = 0; i < n; i++)
+ {
+ if (size[i] == -1)
+ {
+ sprintf(s, "*,");
+ }
+ else
+ {
+ sprintf(s, "%ld,", (long int)size[i]);
+ }
+ strcat(desired_dims,s);
+ }
+ len = strlen(desired_dims);
+ desired_dims[len-1] = ']';
+ for (i = 0; i < n; i++)
+ {
+ sprintf(s, "%ld,", (long int)array_size(ary,i));
+ strcat(actual_dims,s);
+ }
+ len = strlen(actual_dims);
+ actual_dims[len-1] = ']';
+ PyErr_Format(PyExc_TypeError,
+ "Array must have shape of %s. Given array has shape of %s",
+ desired_dims,
+ actual_dims);
+ }
+ return success;
+ }
+
+ /* Require the given PyArrayObject to to be Fortran ordered. If the
+ * the PyArrayObject is already Fortran ordered, do nothing. Else,
+ * set the Fortran ordering flag and recompute the strides.
+ */
+ int require_fortran(PyArrayObject* ary)
+ {
+ int success = 1;
+ int nd = array_numdims(ary);
+ int i;
+ npy_intp * strides = array_strides(ary);
+ if (array_is_fortran(ary)) return success;
+ /* Set the Fortran ordered flag */
+ array_enableflags(ary,NPY_ARRAY_FARRAY);
+ /* Recompute the strides */
+ strides[0] = strides[nd-1];
+ for (i=1; i < nd; ++i)
+ strides[i] = strides[i-1] * array_size(ary,i-1);
+ return success;
+ }
+}
+
+/* Combine all NumPy fragments into one for convenience */
+%fragment("NumPy_Fragments",
+ "header",
+ fragment="NumPy_Backward_Compatibility",
+ fragment="NumPy_Macros",
+ fragment="NumPy_Utilities",
+ fragment="NumPy_Object_to_Array",
+ fragment="NumPy_Array_Requirements")
+{
+}
+
+/* End John Hunter translation (with modifications by Bill Spotz)
+ */
+
+/* %numpy_typemaps() macro
+ *
+ * This macro defines a family of 74 typemaps that allow C arguments
+ * of the form
+ *
+ * 1. (DATA_TYPE IN_ARRAY1[ANY])
+ * 2. (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
+ * 3. (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
+ *
+ * 4. (DATA_TYPE IN_ARRAY2[ANY][ANY])
+ * 5. (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ * 6. (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
+ * 7. (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ * 8. (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
+ *
+ * 9. (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
+ * 10. (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ * 11. (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ * 12. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3)
+ * 13. (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ * 14. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3)
+ *
+ * 15. (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
+ * 16. (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ * 17. (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ * 18. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, , DIM_TYPE DIM4, DATA_TYPE* IN_ARRAY4)
+ * 19. (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ * 20. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_FARRAY4)
+ *
+ * 21. (DATA_TYPE INPLACE_ARRAY1[ANY])
+ * 22. (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1)
+ * 23. (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1)
+ *
+ * 24. (DATA_TYPE INPLACE_ARRAY2[ANY][ANY])
+ * 25. (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ * 26. (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2)
+ * 27. (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ * 28. (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2)
+ *
+ * 29. (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY])
+ * 30. (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ * 31. (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ * 32. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3)
+ * 33. (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ * 34. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3)
+ *
+ * 35. (DATA_TYPE INPLACE_ARRAY4[ANY][ANY][ANY][ANY])
+ * 36. (DATA_TYPE* INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ * 37. (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ * 38. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_ARRAY4)
+ * 39. (DATA_TYPE* INPLACE_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ * 40. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_FARRAY4)
+ *
+ * 41. (DATA_TYPE ARGOUT_ARRAY1[ANY])
+ * 42. (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
+ * 43. (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
+ *
+ * 44. (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
+ *
+ * 45. (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
+ *
+ * 46. (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
+ *
+ * 47. (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1)
+ * 48. (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1)
+ *
+ * 49. (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+ * 50. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2)
+ * 51. (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+ * 52. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2)
+ *
+ * 53. (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
+ * 54. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
+ * 55. (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
+ * 56. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)
+ *
+ * 57. (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ * 58. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEW_ARRAY4)
+ * 59. (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ * 60. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEW_FARRAY4)
+ *
+ * 61. (DATA_TYPE** ARGOUTVIEWM_ARRAY1, DIM_TYPE* DIM1)
+ * 62. (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1)
+ *
+ * 63. (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+ * 64. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2)
+ * 65. (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+ * 66. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2)
+ *
+ * 67. (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
+ * 68. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEWM_ARRAY3)
+ * 69. (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
+ * 70. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEWM_FARRAY3)
+ *
+ * 71. (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ * 72. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_ARRAY4)
+ * 73. (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ * 74. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_FARRAY4)
+ *
+ * where "DATA_TYPE" is any type supported by the NumPy module, and
+ * "DIM_TYPE" is any int-like type suitable for specifying dimensions.
+ * The difference between "ARRAY" typemaps and "FARRAY" typemaps is
+ * that the "FARRAY" typemaps expect Fortran ordering of
+ * multidimensional arrays. In python, the dimensions will not need
+ * to be specified (except for the "DATA_TYPE* ARGOUT_ARRAY1"
+ * typemaps). The IN_ARRAYs can be a numpy array or any sequence that
+ * can be converted to a numpy array of the specified type. The
+ * INPLACE_ARRAYs must be numpy arrays of the appropriate type. The
+ * ARGOUT_ARRAYs will be returned as new numpy arrays of the
+ * appropriate type.
+ *
+ * These typemaps can be applied to existing functions using the
+ * %apply directive. For example:
+ *
+ * %apply (double* IN_ARRAY1, int DIM1) {(double* series, int length)};
+ * double prod(double* series, int length);
+ *
+ * %apply (int DIM1, int DIM2, double* INPLACE_ARRAY2)
+ * {(int rows, int cols, double* matrix )};
+ * void floor(int rows, int cols, double* matrix, double f);
+ *
+ * %apply (double IN_ARRAY3[ANY][ANY][ANY])
+ * {(double tensor[2][2][2] )};
+ * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY])
+ * {(double low[2][2][2] )};
+ * %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY])
+ * {(double upp[2][2][2] )};
+ * void luSplit(double tensor[2][2][2],
+ * double low[2][2][2],
+ * double upp[2][2][2] );
+ *
+ * or directly with
+ *
+ * double prod(double* IN_ARRAY1, int DIM1);
+ *
+ * void floor(int DIM1, int DIM2, double* INPLACE_ARRAY2, double f);
+ *
+ * void luSplit(double IN_ARRAY3[ANY][ANY][ANY],
+ * double ARGOUT_ARRAY3[ANY][ANY][ANY],
+ * double ARGOUT_ARRAY3[ANY][ANY][ANY]);
+ */
+
+%define %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE)
+
+/************************/
+/* Input Array Typemaps */
+/************************/
+
+/* Typemap suite for (DATA_TYPE IN_ARRAY1[ANY])
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE IN_ARRAY1[ANY])
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE IN_ARRAY1[ANY])
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[1] = { $1_dim0 };
+ array = obj_to_array_contiguous_allow_conversion($input,
+ DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 1) ||
+ !require_size(array, size, 1)) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+%typemap(freearg)
+ (DATA_TYPE IN_ARRAY1[ANY])
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[1] = { -1 };
+ array = obj_to_array_contiguous_allow_conversion($input,
+ DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 1) ||
+ !require_size(array, size, 1)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+}
+%typemap(freearg)
+ (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[1] = {-1};
+ array = obj_to_array_contiguous_allow_conversion($input,
+ DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 1) ||
+ !require_size(array, size, 1)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DATA_TYPE*) array_data(array);
+}
+%typemap(freearg)
+ (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE IN_ARRAY2[ANY][ANY])
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE IN_ARRAY2[ANY][ANY])
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE IN_ARRAY2[ANY][ANY])
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[2] = { $1_dim0, $1_dim1 };
+ array = obj_to_array_contiguous_allow_conversion($input,
+ DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 2) ||
+ !require_size(array, size, 2)) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+%typemap(freearg)
+ (DATA_TYPE IN_ARRAY2[ANY][ANY])
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[2] = { -1, -1 };
+ array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 2) ||
+ !require_size(array, size, 2)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+}
+%typemap(freearg)
+ (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[2] = { -1, -1 };
+ array = obj_to_array_contiguous_allow_conversion($input,
+ DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 2) ||
+ !require_size(array, size, 2)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DATA_TYPE*) array_data(array);
+}
+%typemap(freearg)
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[2] = { -1, -1 };
+ array = obj_to_array_fortran_allow_conversion($input,
+ DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 2) ||
+ !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+}
+%typemap(freearg)
+ (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[2] = { -1, -1 };
+ array = obj_to_array_fortran_allow_conversion($input,
+ DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 2) ||
+ !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DATA_TYPE*) array_data(array);
+}
+%typemap(freearg)
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 };
+ array = obj_to_array_contiguous_allow_conversion($input,
+ DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 3) ||
+ !require_size(array, size, 3)) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+%typemap(freearg)
+ (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[3] = { -1, -1, -1 };
+ array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 3) ||
+ !require_size(array, size, 3)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+ $4 = (DIM_TYPE) array_size(array,2);
+}
+%typemap(freearg)
+ (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ /* for now, only concerned with lists */
+ $1 = PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ (DATA_TYPE** array=NULL, PyArrayObject** object_array=NULL, int* is_new_object_array=NULL)
+{
+ npy_intp size[2] = { -1, -1 };
+ PyArrayObject* temp_array;
+ Py_ssize_t i;
+ int is_new_object;
+
+ /* length of the list */
+ $2 = PyList_Size($input);
+
+ /* the arrays */
+ array = (DATA_TYPE **)malloc($2*sizeof(DATA_TYPE *));
+ object_array = (PyArrayObject **)calloc($2,sizeof(PyArrayObject *));
+ is_new_object_array = (int *)calloc($2,sizeof(int));
+
+ if (array == NULL || object_array == NULL || is_new_object_array == NULL)
+ {
+ SWIG_fail;
+ }
+
+ for (i=0; i<$2; i++)
+ {
+ temp_array = obj_to_array_contiguous_allow_conversion(PySequence_GetItem($input,i), DATA_TYPECODE, &is_new_object);
+
+ /* the new array must be stored so that it can be destroyed in freearg */
+ object_array[i] = temp_array;
+ is_new_object_array[i] = is_new_object;
+
+ if (!temp_array || !require_dimensions(temp_array, 2)) SWIG_fail;
+
+ /* store the size of the first array in the list, then use that for comparison. */
+ if (i == 0)
+ {
+ size[0] = array_size(temp_array,0);
+ size[1] = array_size(temp_array,1);
+ }
+
+ if (!require_size(temp_array, size, 2)) SWIG_fail;
+
+ array[i] = (DATA_TYPE*) array_data(temp_array);
+ }
+
+ $1 = (DATA_TYPE**) array;
+ $3 = (DIM_TYPE) size[0];
+ $4 = (DIM_TYPE) size[1];
+}
+%typemap(freearg)
+ (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ Py_ssize_t i;
+
+ if (array$argnum!=NULL) free(array$argnum);
+
+ /*freeing the individual arrays if needed */
+ if (object_array$argnum!=NULL)
+ {
+ if (is_new_object_array$argnum!=NULL)
+ {
+ for (i=0; i<$2; i++)
+ {
+ if (object_array$argnum[i] != NULL && is_new_object_array$argnum[i])
+ { Py_DECREF(object_array$argnum[i]); }
+ }
+ free(is_new_object_array$argnum);
+ }
+ free(object_array$argnum);
+ }
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
+ * DATA_TYPE* IN_ARRAY3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[3] = { -1, -1, -1 };
+ array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 3) ||
+ !require_size(array, size, 3)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DIM_TYPE) array_size(array,2);
+ $4 = (DATA_TYPE*) array_data(array);
+}
+%typemap(freearg)
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[3] = { -1, -1, -1 };
+ array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 3) ||
+ !require_size(array, size, 3) | !require_fortran(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+ $4 = (DIM_TYPE) array_size(array,2);
+}
+%typemap(freearg)
+ (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
+ * DATA_TYPE* IN_FARRAY3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[3] = { -1, -1, -1 };
+ array = obj_to_array_fortran_allow_conversion($input,
+ DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 3) ||
+ !require_size(array, size, 3) || !require_fortran(array)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DIM_TYPE) array_size(array,2);
+ $4 = (DATA_TYPE*) array_data(array);
+}
+%typemap(freearg)
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[4] = { $1_dim0, $1_dim1, $1_dim2 , $1_dim3};
+ array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 4) ||
+ !require_size(array, size, 4)) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+%typemap(freearg)
+ (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3, DIM_TYPE DIM4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[4] = { -1, -1, -1, -1 };
+ array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 4) ||
+ !require_size(array, size, 4)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+ $4 = (DIM_TYPE) array_size(array,2);
+ $5 = (DIM_TYPE) array_size(array,3);
+}
+%typemap(freearg)
+ (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3, DIM_TYPE DIM4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ /* for now, only concerned with lists */
+ $1 = PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ (DATA_TYPE** array=NULL, PyArrayObject** object_array=NULL, int* is_new_object_array=NULL)
+{
+ npy_intp size[3] = { -1, -1, -1 };
+ PyArrayObject* temp_array;
+ Py_ssize_t i;
+ int is_new_object;
+
+ /* length of the list */
+ $2 = PyList_Size($input);
+
+ /* the arrays */
+ array = (DATA_TYPE **)malloc($2*sizeof(DATA_TYPE *));
+ object_array = (PyArrayObject **)calloc($2,sizeof(PyArrayObject *));
+ is_new_object_array = (int *)calloc($2,sizeof(int));
+
+ if (array == NULL || object_array == NULL || is_new_object_array == NULL)
+ {
+ SWIG_fail;
+ }
+
+ for (i=0; i<$2; i++)
+ {
+ temp_array = obj_to_array_contiguous_allow_conversion(PySequence_GetItem($input,i), DATA_TYPECODE, &is_new_object);
+
+ /* the new array must be stored so that it can be destroyed in freearg */
+ object_array[i] = temp_array;
+ is_new_object_array[i] = is_new_object;
+
+ if (!temp_array || !require_dimensions(temp_array, 3)) SWIG_fail;
+
+ /* store the size of the first array in the list, then use that for comparison. */
+ if (i == 0)
+ {
+ size[0] = array_size(temp_array,0);
+ size[1] = array_size(temp_array,1);
+ size[2] = array_size(temp_array,2);
+ }
+
+ if (!require_size(temp_array, size, 3)) SWIG_fail;
+
+ array[i] = (DATA_TYPE*) array_data(temp_array);
+ }
+
+ $1 = (DATA_TYPE**) array;
+ $3 = (DIM_TYPE) size[0];
+ $4 = (DIM_TYPE) size[1];
+ $5 = (DIM_TYPE) size[2];
+}
+%typemap(freearg)
+ (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ Py_ssize_t i;
+
+ if (array$argnum!=NULL) free(array$argnum);
+
+ /*freeing the individual arrays if needed */
+ if (object_array$argnum!=NULL)
+ {
+ if (is_new_object_array$argnum!=NULL)
+ {
+ for (i=0; i<$2; i++)
+ {
+ if (object_array$argnum[i] != NULL && is_new_object_array$argnum[i])
+ { Py_DECREF(object_array$argnum[i]); }
+ }
+ free(is_new_object_array$argnum);
+ }
+ free(object_array$argnum);
+ }
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4,
+ * DATA_TYPE* IN_ARRAY4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_ARRAY4)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_ARRAY4)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[4] = { -1, -1, -1 , -1};
+ array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 4) ||
+ !require_size(array, size, 4)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DIM_TYPE) array_size(array,2);
+ $4 = (DIM_TYPE) array_size(array,3);
+ $5 = (DATA_TYPE*) array_data(array);
+}
+%typemap(freearg)
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_ARRAY4)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3, DIM_TYPE DIM4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[4] = { -1, -1, -1, -1 };
+ array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 4) ||
+ !require_size(array, size, 4) | !require_fortran(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+ $4 = (DIM_TYPE) array_size(array,2);
+ $5 = (DIM_TYPE) array_size(array,3);
+}
+%typemap(freearg)
+ (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4,
+ * DATA_TYPE* IN_FARRAY4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_FARRAY4)
+{
+ $1 = is_array($input) || PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_FARRAY4)
+ (PyArrayObject* array=NULL, int is_new_object=0)
+{
+ npy_intp size[4] = { -1, -1, -1 , -1 };
+ array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE,
+ &is_new_object);
+ if (!array || !require_dimensions(array, 4) ||
+ !require_size(array, size, 4) || !require_fortran(array)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DIM_TYPE) array_size(array,2);
+ $4 = (DIM_TYPE) array_size(array,3);
+ $5 = (DATA_TYPE*) array_data(array);
+}
+%typemap(freearg)
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_FARRAY4)
+{
+ if (is_new_object$argnum && array$argnum)
+ { Py_DECREF(array$argnum); }
+}
+
+/***************************/
+/* In-Place Array Typemaps */
+/***************************/
+
+/* Typemap suite for (DATA_TYPE INPLACE_ARRAY1[ANY])
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE INPLACE_ARRAY1[ANY])
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE INPLACE_ARRAY1[ANY])
+ (PyArrayObject* array=NULL)
+{
+ npy_intp size[1] = { $1_dim0 };
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,1) || !require_size(array, size, 1) ||
+ !require_contiguous(array) || !require_native(array)) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1)
+ (PyArrayObject* array=NULL, int i=1)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,1) || !require_contiguous(array)
+ || !require_native(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = 1;
+ for (i=0; i < array_numdims(array); ++i) $2 *= array_size(array,i);
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1)
+ (PyArrayObject* array=NULL, int i=0)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,1) || !require_contiguous(array)
+ || !require_native(array)) SWIG_fail;
+ $1 = 1;
+ for (i=0; i < array_numdims(array); ++i) $1 *= array_size(array,i);
+ $2 = (DATA_TYPE*) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE INPLACE_ARRAY2[ANY][ANY])
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE INPLACE_ARRAY2[ANY][ANY])
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE INPLACE_ARRAY2[ANY][ANY])
+ (PyArrayObject* array=NULL)
+{
+ npy_intp size[2] = { $1_dim0, $1_dim1 };
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,2) || !require_size(array, size, 2) ||
+ !require_contiguous(array) || !require_native(array)) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,2) || !require_contiguous(array)
+ || !require_native(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,2) || !require_contiguous(array) ||
+ !require_native(array)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DATA_TYPE*) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,2) || !require_contiguous(array)
+ || !require_native(array) || !require_fortran(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,2) || !require_contiguous(array) ||
+ !require_native(array) || !require_fortran(array)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DATA_TYPE*) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY])
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY])
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY])
+ (PyArrayObject* array=NULL)
+{
+ npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 };
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,3) || !require_size(array, size, 3) ||
+ !require_contiguous(array) || !require_native(array)) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,3) || !require_contiguous(array) ||
+ !require_native(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+ $4 = (DIM_TYPE) array_size(array,2);
+}
+
+/* Typemap suite for (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ $1 = PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ (DATA_TYPE** array=NULL, PyArrayObject** object_array=NULL)
+{
+ npy_intp size[2] = { -1, -1 };
+ PyArrayObject* temp_array;
+ Py_ssize_t i;
+
+ /* length of the list */
+ $2 = PyList_Size($input);
+
+ /* the arrays */
+ array = (DATA_TYPE **)malloc($2*sizeof(DATA_TYPE *));
+ object_array = (PyArrayObject **)calloc($2,sizeof(PyArrayObject *));
+
+ if (array == NULL || object_array == NULL)
+ {
+ SWIG_fail;
+ }
+
+ for (i=0; i<$2; i++)
+ {
+ temp_array = obj_to_array_no_conversion(PySequence_GetItem($input,i), DATA_TYPECODE);
+
+ /* the new array must be stored so that it can be destroyed in freearg */
+ object_array[i] = temp_array;
+
+ if ( !temp_array || !require_dimensions(temp_array, 2) ||
+ !require_contiguous(temp_array) ||
+ !require_native(temp_array) ||
+ !PyArray_EquivTypenums(array_type(temp_array), DATA_TYPECODE)
+ ) SWIG_fail;
+
+ /* store the size of the first array in the list, then use that for comparison. */
+ if (i == 0)
+ {
+ size[0] = array_size(temp_array,0);
+ size[1] = array_size(temp_array,1);
+ }
+
+ if (!require_size(temp_array, size, 2)) SWIG_fail;
+
+ array[i] = (DATA_TYPE*) array_data(temp_array);
+ }
+
+ $1 = (DATA_TYPE**) array;
+ $3 = (DIM_TYPE) size[0];
+ $4 = (DIM_TYPE) size[1];
+}
+%typemap(freearg)
+ (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ if (array$argnum!=NULL) free(array$argnum);
+ if (object_array$argnum!=NULL) free(object_array$argnum);
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
+ * DATA_TYPE* INPLACE_ARRAY3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,3) || !require_contiguous(array)
+ || !require_native(array)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DIM_TYPE) array_size(array,2);
+ $4 = (DATA_TYPE*) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,3) || !require_contiguous(array) ||
+ !require_native(array) || !require_fortran(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+ $4 = (DIM_TYPE) array_size(array,2);
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
+ * DATA_TYPE* INPLACE_FARRAY3)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,3) || !require_contiguous(array)
+ || !require_native(array) || !require_fortran(array)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DIM_TYPE) array_size(array,2);
+ $4 = (DATA_TYPE*) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE INPLACE_ARRAY4[ANY][ANY][ANY][ANY])
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE INPLACE_ARRAY4[ANY][ANY][ANY][ANY])
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE INPLACE_ARRAY4[ANY][ANY][ANY][ANY])
+ (PyArrayObject* array=NULL)
+{
+ npy_intp size[4] = { $1_dim0, $1_dim1, $1_dim2 , $1_dim3 };
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,4) || !require_size(array, size, 4) ||
+ !require_contiguous(array) || !require_native(array)) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3, DIM_TYPE DIM4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,4) || !require_contiguous(array) ||
+ !require_native(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+ $4 = (DIM_TYPE) array_size(array,2);
+ $5 = (DIM_TYPE) array_size(array,3);
+}
+
+/* Typemap suite for (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3, DIM_TYPE DIM4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ $1 = PySequence_Check($input);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ (DATA_TYPE** array=NULL, PyArrayObject** object_array=NULL)
+{
+ npy_intp size[3] = { -1, -1, -1 };
+ PyArrayObject* temp_array;
+ Py_ssize_t i;
+
+ /* length of the list */
+ $2 = PyList_Size($input);
+
+ /* the arrays */
+ array = (DATA_TYPE **)malloc($2*sizeof(DATA_TYPE *));
+ object_array = (PyArrayObject **)calloc($2,sizeof(PyArrayObject *));
+
+ if (array == NULL || object_array == NULL)
+ {
+ SWIG_fail;
+ }
+
+ for (i=0; i<$2; i++)
+ {
+ temp_array = obj_to_array_no_conversion(PySequence_GetItem($input,i), DATA_TYPECODE);
+
+ /* the new array must be stored so that it can be destroyed in freearg */
+ object_array[i] = temp_array;
+
+ if ( !temp_array || !require_dimensions(temp_array, 3) ||
+ !require_contiguous(temp_array) ||
+ !require_native(temp_array) ||
+ !PyArray_EquivTypenums(array_type(temp_array), DATA_TYPECODE)
+ ) SWIG_fail;
+
+ /* store the size of the first array in the list, then use that for comparison. */
+ if (i == 0)
+ {
+ size[0] = array_size(temp_array,0);
+ size[1] = array_size(temp_array,1);
+ size[2] = array_size(temp_array,2);
+ }
+
+ if (!require_size(temp_array, size, 3)) SWIG_fail;
+
+ array[i] = (DATA_TYPE*) array_data(temp_array);
+ }
+
+ $1 = (DATA_TYPE**) array;
+ $3 = (DIM_TYPE) size[0];
+ $4 = (DIM_TYPE) size[1];
+ $5 = (DIM_TYPE) size[2];
+}
+%typemap(freearg)
+ (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ if (array$argnum!=NULL) free(array$argnum);
+ if (object_array$argnum!=NULL) free(object_array$argnum);
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4,
+ * DATA_TYPE* INPLACE_ARRAY4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_ARRAY4)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_ARRAY4)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,4) || !require_contiguous(array)
+ || !require_native(array)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DIM_TYPE) array_size(array,2);
+ $4 = (DIM_TYPE) array_size(array,3);
+ $5 = (DATA_TYPE*) array_data(array);
+}
+
+/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
+ * DIM_TYPE DIM3, DIM_TYPE DIM4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DATA_TYPE* INPLACE_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* INPLACE_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,4) || !require_contiguous(array) ||
+ !require_native(array) || !require_fortran(array)) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+ $2 = (DIM_TYPE) array_size(array,0);
+ $3 = (DIM_TYPE) array_size(array,1);
+ $4 = (DIM_TYPE) array_size(array,2);
+ $5 = (DIM_TYPE) array_size(array,3);
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
+ * DATA_TYPE* INPLACE_FARRAY4)
+ */
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
+ fragment="NumPy_Macros")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_FARRAY4)
+{
+ $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
+ DATA_TYPECODE);
+}
+%typemap(in,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_FARRAY4)
+ (PyArrayObject* array=NULL)
+{
+ array = obj_to_array_no_conversion($input, DATA_TYPECODE);
+ if (!array || !require_dimensions(array,4) || !require_contiguous(array)
+ || !require_native(array) || !require_fortran(array)) SWIG_fail;
+ $1 = (DIM_TYPE) array_size(array,0);
+ $2 = (DIM_TYPE) array_size(array,1);
+ $3 = (DIM_TYPE) array_size(array,2);
+ $4 = (DIM_TYPE) array_size(array,3);
+ $5 = (DATA_TYPE*) array_data(array);
+}
+
+/*************************/
+/* Argout Array Typemaps */
+/*************************/
+
+/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY1[ANY])
+ */
+%typemap(in,numinputs=0,
+ fragment="NumPy_Backward_Compatibility,NumPy_Macros")
+ (DATA_TYPE ARGOUT_ARRAY1[ANY])
+ (PyObject* array = NULL)
+{
+ npy_intp dims[1] = { $1_dim0 };
+ array = PyArray_SimpleNew(1, dims, DATA_TYPECODE);
+ if (!array) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+%typemap(argout)
+ (DATA_TYPE ARGOUT_ARRAY1[ANY])
+{
+ $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+}
+
+/* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
+ */
+%typemap(in,numinputs=1,
+ fragment="NumPy_Fragments")
+ (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
+ (PyObject* array = NULL)
+{
+ npy_intp dims[1];
+ if (!PyInt_Check($input))
+ {
+ const char* typestring = pytype_string($input);
+ PyErr_Format(PyExc_TypeError,
+ "Int dimension expected. '%s' given.",
+ typestring);
+ SWIG_fail;
+ }
+ $2 = (DIM_TYPE) PyInt_AsLong($input);
+ dims[0] = (npy_intp) $2;
+ array = PyArray_SimpleNew(1, dims, DATA_TYPECODE);
+ if (!array) SWIG_fail;
+ $1 = (DATA_TYPE*) array_data(array);
+}
+%typemap(argout)
+ (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
+{
+ $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+}
+
+/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
+ */
+%typemap(in,numinputs=1,
+ fragment="NumPy_Fragments")
+ (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
+ (PyObject* array = NULL)
+{
+ npy_intp dims[1];
+ if (!PyInt_Check($input))
+ {
+ const char* typestring = pytype_string($input);
+ PyErr_Format(PyExc_TypeError,
+ "Int dimension expected. '%s' given.",
+ typestring);
+ SWIG_fail;
+ }
+ $1 = (DIM_TYPE) PyInt_AsLong($input);
+ dims[0] = (npy_intp) $1;
+ array = PyArray_SimpleNew(1, dims, DATA_TYPECODE);
+ if (!array) SWIG_fail;
+ $2 = (DATA_TYPE*) array_data(array);
+}
+%typemap(argout)
+ (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
+{
+ $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+}
+
+/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
+ */
+%typemap(in,numinputs=0,
+ fragment="NumPy_Backward_Compatibility,NumPy_Macros")
+ (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
+ (PyObject* array = NULL)
+{
+ npy_intp dims[2] = { $1_dim0, $1_dim1 };
+ array = PyArray_SimpleNew(2, dims, DATA_TYPECODE);
+ if (!array) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+%typemap(argout)
+ (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
+{
+ $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+}
+
+/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
+ */
+%typemap(in,numinputs=0,
+ fragment="NumPy_Backward_Compatibility,NumPy_Macros")
+ (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
+ (PyObject* array = NULL)
+{
+ npy_intp dims[3] = { $1_dim0, $1_dim1, $1_dim2 };
+ array = PyArray_SimpleNew(3, dims, DATA_TYPECODE);
+ if (!array) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+%typemap(argout)
+ (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
+{
+ $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+}
+
+/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
+ */
+%typemap(in,numinputs=0,
+ fragment="NumPy_Backward_Compatibility,NumPy_Macros")
+ (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
+ (PyObject* array = NULL)
+{
+ npy_intp dims[4] = { $1_dim0, $1_dim1, $1_dim2, $1_dim3 };
+ array = PyArray_SimpleNew(4, dims, DATA_TYPECODE);
+ if (!array) SWIG_fail;
+ $1 = ($1_ltype) array_data(array);
+}
+%typemap(argout)
+ (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
+{
+ $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+}
+
+/*****************************/
+/* Argoutview Array Typemaps */
+/*****************************/
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility")
+ (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1)
+{
+ npy_intp dims[1] = { *$2 };
+ PyObject* obj = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DATA_TYPE** ARGOUTVIEW_ARRAY1)
+ (DIM_TYPE dim_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim_temp;
+ $2 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility")
+ (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1)
+{
+ npy_intp dims[1] = { *$1 };
+ PyObject* obj = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$2));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility")
+ (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+{
+ npy_intp dims[2] = { *$2, *$3 };
+ PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_ARRAY2)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2)
+{
+ npy_intp dims[2] = { *$1, *$2 };
+ PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
+ (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+{
+ npy_intp dims[2] = { *$2, *$3 };
+ PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEW_FARRAY2)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2)
+{
+ npy_intp dims[2] = { *$1, *$2 };
+ PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility")
+ (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
+{
+ npy_intp dims[3] = { *$2, *$3, *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
+ DATA_TYPE** ARGOUTVIEW_ARRAY3)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp = NULL)
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
+{
+ npy_intp dims[3] = { *$1, *$2, *$3 };
+ PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$4));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
+ (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
+{
+ npy_intp dims[3] = { *$2, *$3, *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
+ DATA_TYPE** ARGOUTVIEW_FARRAY3)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DATA_TYPE** ARGOUTVIEW_FARRAY3)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)
+{
+ npy_intp dims[3] = { *$1, *$2, *$3 };
+ PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$4));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+ $5 = &dim4_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility")
+ (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+{
+ npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
+ DATA_TYPE** ARGOUTVIEW_ARRAY4)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 , DATA_TYPE** ARGOUTVIEW_ARRAY4)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &dim4_temp;
+ $5 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEW_ARRAY4)
+{
+ npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+ $5 = &dim4_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
+ (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+{
+ npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
+ DATA_TYPE** ARGOUTVIEW_FARRAY4)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 , DATA_TYPE** ARGOUTVIEW_FARRAY4)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &dim4_temp;
+ $5 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEW_FARRAY4)
+{
+ npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/*************************************/
+/* Managed Argoutview Array Typemaps */
+/*************************************/
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY1, DIM_TYPE* DIM1)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY1, DIM_TYPE* DIM1 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY1, DIM_TYPE* DIM1)
+{
+ npy_intp dims[1] = { *$2 };
+ PyObject* obj = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DATA_TYPE** ARGOUTVIEWM_ARRAY1)
+ (DIM_TYPE dim_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim_temp;
+ $2 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1)
+{
+ npy_intp dims[1] = { *$1 };
+ PyObject* obj = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$2));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+{
+ npy_intp dims[2] = { *$2, *$3 };
+ PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEWM_ARRAY2)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2)
+{
+ npy_intp dims[2] = { *$1, *$2 };
+ PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities")
+ (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
+{
+ npy_intp dims[2] = { *$2, *$3 };
+ PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DATA_TYPE** ARGOUTVIEWM_FARRAY2)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2)
+{
+ npy_intp dims[2] = { *$1, *$2 };
+ PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
+{
+ npy_intp dims[3] = { *$2, *$3, *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
+ DATA_TYPE** ARGOUTVIEWM_ARRAY3)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DATA_TYPE** ARGOUTVIEWM_ARRAY3)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEWM_ARRAY3)
+{
+ npy_intp dims[3] = { *$1, *$2, *$3 };
+ PyObject* obj= PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$4));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities")
+ (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
+{
+ npy_intp dims[3] = { *$2, *$3, *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
+ DATA_TYPE** ARGOUTVIEWM_FARRAY3)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DATA_TYPE** ARGOUTVIEWM_FARRAY3)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEWM_FARRAY3)
+{
+ npy_intp dims[3] = { *$1, *$2, *$3 };
+ PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$4));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+ $5 = &dim4_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+{
+ npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
+ DATA_TYPE** ARGOUTVIEWM_ARRAY4)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 , DATA_TYPE** ARGOUTVIEWM_ARRAY4)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &dim4_temp;
+ $5 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_ARRAY4)
+{
+ npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+ $5 = &dim4_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities")
+ (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
+{
+ npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
+ DATA_TYPE** ARGOUTVIEWM_FARRAY4)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 , DATA_TYPE** ARGOUTVIEWM_FARRAY4)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &dim4_temp;
+ $5 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_FARRAY4)
+{
+ npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+ $5 = &dim4_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+{
+ npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
+ DATA_TYPE** ARGOUTVIEWM_ARRAY4)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 , DATA_TYPE** ARGOUTVIEWM_ARRAY4)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &dim4_temp;
+ $5 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Utilities")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_ARRAY4)
+{
+ npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
+ DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+ */
+%typemap(in,numinputs=0)
+ (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 )
+ (DATA_TYPE* data_temp = NULL , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
+{
+ $1 = &data_temp;
+ $2 = &dim1_temp;
+ $3 = &dim2_temp;
+ $4 = &dim3_temp;
+ $5 = &dim4_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities")
+ (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
+{
+ npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
+ DATA_TYPE** ARGOUTVIEWM_FARRAY4)
+ */
+%typemap(in,numinputs=0)
+ (DIM_TYPE* DIM1 , DIM_TYPE* DIM2 , DIM_TYPE* DIM3 , DIM_TYPE* DIM4 , DATA_TYPE** ARGOUTVIEWM_FARRAY4)
+ (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL )
+{
+ $1 = &dim1_temp;
+ $2 = &dim2_temp;
+ $3 = &dim3_temp;
+ $4 = &dim4_temp;
+ $5 = &data_temp;
+}
+%typemap(argout,
+ fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements,NumPy_Utilities")
+ (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_FARRAY4)
+{
+ npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
+ PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
+ PyArrayObject* array = (PyArrayObject*) obj;
+
+ if (!array || !require_fortran(array)) SWIG_fail;
+
+%#ifdef SWIGPY_USE_CAPSULE
+ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
+%#else
+ PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
+%#endif
+
+%#if NPY_API_VERSION < 0x00000007
+ PyArray_BASE(array) = cap;
+%#else
+ PyArray_SetBaseObject(array,cap);
+%#endif
+
+ $result = SWIG_Python_AppendOutput($result,obj);
+}
+
+%enddef /* %numpy_typemaps() macro */
+/* *************************************************************** */
+
+/* Concrete instances of the %numpy_typemaps() macro: Each invocation
+ * below applies all of the typemaps above to the specified data type.
+ */
+%numpy_typemaps(signed char , NPY_BYTE , int)
+%numpy_typemaps(unsigned char , NPY_UBYTE , int)
+%numpy_typemaps(short , NPY_SHORT , int)
+%numpy_typemaps(unsigned short , NPY_USHORT , int)
+%numpy_typemaps(int , NPY_INT , int)
+%numpy_typemaps(unsigned int , NPY_UINT , int)
+%numpy_typemaps(long , NPY_LONG , int)
+%numpy_typemaps(unsigned long , NPY_ULONG , int)
+%numpy_typemaps(long long , NPY_LONGLONG , int)
+%numpy_typemaps(unsigned long long, NPY_ULONGLONG, int)
+%numpy_typemaps(float , NPY_FLOAT , int)
+%numpy_typemaps(double , NPY_DOUBLE , int)
+
+/* ***************************************************************
+ * The follow macro expansion does not work, because C++ bool is 4
+ * bytes and NPY_BOOL is 1 byte
+ *
+ * %numpy_typemaps(bool, NPY_BOOL, int)
+ */
+
+/* ***************************************************************
+ * On my Mac, I get the following warning for this macro expansion:
+ * 'swig/python detected a memory leak of type 'long double *', no destructor found.'
+ *
+ * %numpy_typemaps(long double, NPY_LONGDOUBLE, int)
+ */
+
+%#ifdef __cplusplus
+
+%include
+
+%numpy_typemaps(std::complex, NPY_CFLOAT , int)
+%numpy_typemaps(std::complex, NPY_CDOUBLE, int)
+
+%#endif
+
+#endif /* SWIGPYTHON */
diff --git a/PyUQTk/pce/CMakeLists.txt b/PyUQTk/pce/CMakeLists.txt
new file mode 100644
index 00000000..3722c39c
--- /dev/null
+++ b/PyUQTk/pce/CMakeLists.txt
@@ -0,0 +1,67 @@
+FIND_PACKAGE(SWIG REQUIRED)
+INCLUDE(${SWIG_USE_FILE})
+
+FIND_PACKAGE(PythonLibs)
+INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH}/../../Extras/lib/python/numpy/core/include)
+
+#include source files
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/) # array classes, array input output, and array tools
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/include/) # utilities like error handlers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/) # tools like multindex, etc.
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/quad/) # quad class
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/kle/) # kle class
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/pce/) # PCSet and PCBasis classes
+
+# include dependencies
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/lapack/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/blas/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/dsfmt/) # dsfmt
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/figtree/) # figtree
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/slatec/) # slatec headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/cvode-2.7.0/include) # cvode
+INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/dep/cvode-2.7.0/include)
+INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/dep/cvode-2.7.0/include/nvector)
+
+# INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../numpy/) # numpy headers
+
+SET(CMAKE_SWIG_FLAGS "")
+SET_SOURCE_FILES_PROPERTIES(pce.i PROPERTIES CPLUSPLUS ON)
+
+# compile swig with cpp extensions
+SWIG_ADD_MODULE(
+ pce python pce.i
+ # array tools needed to compile misc tools source files
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arrayio.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arraytools.cpp
+
+ # source code for quad and kle class
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/quad/quad.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/kle/kle.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/pce/PCBasis.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/pce/PCSet.cpp
+
+ # source code for tools
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/combin.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/gq.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/minmax.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/multiindex.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/pcmaps.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/probability.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/rosenblatt.cpp
+)
+
+# link python and 3rd party libraries, e.g., gfortran and blas
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ SWIG_LINK_LIBRARIES(pce uqtkquad uqtkarray uqtktools depnvec depslatec deplapack depblas depdsfmt depfigtree depann depcvode gfortran ${PYTHON_LIBRARIES})
+ #SWIG_LINK_LIBRARIES(pce uqtkquad uqtkarray depnvec depslatec uqtktools deplapack depblas depdsfmt depann depfigtree depcvode gfortran ${PYTHON_LIBRARIES})
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel
+ SWIG_LINK_LIBRARIES(pce uqtktools uqtkquad uqtkarray depnvec deplapack depblas depslatec depdsfmt depann depfigtree depcvode ifcore ${PYTHON_LIBRARIES})
+endif()
+
+INSTALL(TARGETS _pce DESTINATION PyUQTk/)
+INSTALL(FILES ${CMAKE_BINARY_DIR}/${outdir}PyUQTk/pce/pce.py DESTINATION PyUQTk)
diff --git a/PyUQTk/pce/pce.i b/PyUQTk/pce/pce.i
new file mode 100644
index 00000000..b576de84
--- /dev/null
+++ b/PyUQTk/pce/pce.i
@@ -0,0 +1,136 @@
+%module(directors="1") pce
+//=====================================================================================
+// The UQ Toolkit (UQTk) version 3.0.4
+// Copyright (2017) Sandia Corporation
+// http://www.sandia.gov/UQToolkit/
+//
+// Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+// with Sandia Corporation, the U.S. Government retains certain rights in this software.
+//
+// This file is part of The UQ Toolkit (UQTk)
+//
+// UQTk is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// UQTk is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with UQTk. If not, see .
+//
+// Questions? Contact Bert Debusschere
+// Sandia National Laboratories, Livermore, CA, USA
+//=====================================================================================
+
+%{
+#define SWIG_FILE_WITH_INIT
+#include
+#include
+#include
+#include
+#include
+// #include "../../cpp/lib/array/Array1D.h"
+// #include "../../cpp/lib/array/Array2D.h"
+// #include "../../cpp/lib/array/arrayio.h"
+// #include "../../cpp/lib/array/arraytools.h"
+// #include "../../cpp/lib/tools/combin.h"
+// #include "../../cpp/lib/tools/gq.h"
+// #include "../../cpp/lib/tools/minmax.h"
+// #include "../../cpp/lib/tools/multiindex.h"
+// #include "../../cpp/lib/tools/pcmaps.h"
+// #include "../../cpp/lib/tools/probability.h"
+// #include "../../cpp/lib/tools/rosenblatt.h"
+
+// #include "../../cpp/lib/quad/quad.h"
+// #include "../../cpp/lib/kle/kle.h"
+#include "../../cpp/lib/pce/PCBasis.h"
+#include "../../cpp/lib/pce/PCSet.h"
+
+%}
+
+/*************************************************************
+// Standard SWIG Templates
+*************************************************************/
+
+// Include standard SWIG templates
+// Numpy array templates and wrapping
+%include "pyabc.i"
+%include "../numpy/numpy.i"
+%include "std_vector.i"
+%include "std_string.i"
+%include "cpointer.i"
+
+%init %{
+ import_array();
+%}
+
+%pointer_functions(double, doublep);
+
+/*************************************************************
+// Numpy SWIG Interface files
+*************************************************************/
+
+// // Basic typemap for an Arrays and its length.
+// // Must come before %include statement below
+
+// // For Array1D setnumpyarray4py function
+// %apply (long* IN_ARRAY1, int DIM1) {(long* inarray, int n)}
+// %apply (double* IN_ARRAY1, int DIM1) {(double* inarray, int n)}
+// // get numpy int and double array
+// %apply (long* INPLACE_ARRAY1, int DIM1) {(long* outarray, int n)}
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* outarray, int n)}
+
+// // For Array2D numpysetarray4py function
+// %apply (double* IN_FARRAY2, int DIM1, int DIM2) {(double* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (double* INPLACE_FARRAY2, int DIM1, int DIM2) {(double* outarray, int n1, int n2)}
+// // For Array2D numpysetarray4py function
+// %apply (long* IN_FARRAY2, int DIM1, int DIM2) {(long* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (long* INPLACE_FARRAY2, int DIM1, int DIM2) {(long* outarray, int n1, int n2)}
+
+
+// // For mcmc test to get log probabilities
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* l, int n)}
+
+/*************************************************************
+// Include header files
+*************************************************************/
+
+// // The above typemap is applied to header files below
+// %include "../../cpp/lib/array/Array1D.h"
+// %include "../../cpp/lib/array/Array2D.h"
+// %include "../../cpp/lib/array/arrayio.h"
+// %include "../../cpp/lib/array/arraytools.h"
+// %include "../../cpp/lib/tools/combin.h"
+// %include "../../cpp/lib/tools/gq.h"
+// %include "../../cpp/lib/tools/minmax.h"
+// %include "../../cpp/lib/tools/multiindex.h"
+// %include "../../cpp/lib/tools/pcmaps.h"
+// %include "../../cpp/lib/tools/probability.h"
+// %include "../../cpp/lib/tools/rosenblatt.h"
+
+// %include "../../cpp/lib/quad/quad.h"
+// %include "../../cpp/lib/kle/kle.h"
+%include "../../cpp/lib/pce/PCBasis.h"
+%include "../../cpp/lib/pce/PCSet.h"
+
+// Typemaps for standard vector
+// Needed to prevent to memory leak due to lack of destructor
+// must use namespace std
+// namespace std{
+// %template(dblVector) vector;
+// %template(intVector) vector;
+// %template(strVector) vector;
+
+// }
+
+
+// %include "swigi/arrayext.i"
+
+
+
diff --git a/PyUQTk/plotting/CMakeLists.txt b/PyUQTk/plotting/CMakeLists.txt
new file mode 100644
index 00000000..6c97882a
--- /dev/null
+++ b/PyUQTk/plotting/CMakeLists.txt
@@ -0,0 +1,11 @@
+project (UQTk)
+
+SET(copy_FILES
+ __init__.py
+ surrogate.py
+ inout.py
+ )
+
+INSTALL(FILES ${copy_FILES}
+ PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
+ DESTINATION PyUQTk/plotting)
diff --git a/PyUQTk/plotting/__init__.py b/PyUQTk/plotting/__init__.py
new file mode 100755
index 00000000..ed35c1cd
--- /dev/null
+++ b/PyUQTk/plotting/__init__.py
@@ -0,0 +1,28 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+import surrogate
+import inout
diff --git a/PyUQTk/plotting/inout.py b/PyUQTk/plotting/inout.py
new file mode 100644
index 00000000..0b7823c5
--- /dev/null
+++ b/PyUQTk/plotting/inout.py
@@ -0,0 +1,239 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+import os
+import shutil
+import sys
+
+try:
+ import numpy as np
+except ImportError:
+ print "Numpy was not found. "
+
+try:
+ import matplotlib
+except ImportError:
+ print "Matplotlib was not found. "
+
+try:
+ from scipy import stats, mgrid, reshape, random
+except ImportError:
+ print "Scipy was not found. "
+
+import math
+import matplotlib.pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D
+from pylab import *
+
+sys.path.append(os.environ['UQTK_INS'])
+import PyUQTk.utils.colors as ut
+
+rc('legend',loc='upper left', fontsize=12)
+rc('lines', linewidth=1, color='r')
+rc('axes',linewidth=3,grid=True,labelsize=22)
+rc('xtick',labelsize=20)
+rc('ytick',labelsize=20)
+
+#############################################################
+def parallel_coordinates(parnames, values, labels, savefig=[]):
+ """
+ Plots parallel coordinates.
+ Arguments:
+ * parnames : list of d parameter names
+ * values : (d,N) array of N data points with d parameters
+ * labels : list of N labels/categories, one per point
+ * savefig : figure name to save. If [], then show the plot
+ """
+
+ # Start the figure
+ fig=figure(figsize=(14,7))
+ fig.add_axes([0.1,0.15,0.8,0.8])
+ ax = gca()
+
+ # Categorize
+ ulabels = np.unique(labels)
+ n_labels = len(ulabels)
+
+ # Set colors
+ cmap = plt.get_cmap('prism')
+ colors = cmap(np.arange(n_labels)*cmap.N/(n_labels+1))
+
+ # Plot
+ class_id = np.searchsorted(ulabels, labels)
+ lines = plt.plot(values[:,:], 'ko-',ms=6,linewidth=0.7)
+ [ l.set_color(colors[c]) for c,l in zip(class_id, lines) ]
+
+ # Gridification
+ ax.spines['top'].set_visible(False)
+ ax.spines['bottom'].set_position(('outward', 5))
+ ax.spines['bottom'].set_visible(False)
+ ax.spines['right'].set_visible(False)
+ ax.spines['left'].set_visible(False)
+ ax.yaxis.set_ticks_position('both')
+ ax.xaxis.set_ticks_position('none')
+
+ plt.xticks(np.arange(len(parnames)), parnames)
+ plt.grid(axis='x', ls='-')
+
+ leg_handlers = [ lines[np.where(class_id==id)[0][0]]
+ for id in range(n_labels)]
+ ax.legend(leg_handlers, ulabels, frameon=False, loc='upper left',
+ ncol=len(labels),
+ bbox_to_anchor=(0, -0.03, 1, 0))
+
+ # Show or save
+ if (savefig==[]):
+ plt.show()
+ else:
+ plt.savefig(savefig)
+ plt.clf()
+
+
+#############################################################
+
+def plot_xx(d1,d2,parnames, values, labels, savefig=[]): #(x1,x2,inputs,labels,pnames,outfigdir='.'):
+ """
+ Plots one-dimension versus another with various labels.
+ Arguments:
+ * d1 : first dimension to plot
+ * d2 : second dimension to plot
+ * parnames : list of d parameter names
+ * values : (d,N) array of N data points with d parameters
+ * labels : list of N labels/categories, one per point
+ * savefig : figure name to save. If [], then show the plot
+ """
+
+ # Start the figure
+ fig=figure(figsize=(12,12))
+ fig.add_axes([0.1,0.15,0.8,0.8])
+ ax = gca()
+
+ # Categorize
+ ulabels = np.unique(labels)
+ n_labels = len(ulabels)
+
+ # Set colors
+ cmap = plt.get_cmap('prism')
+ colors = cmap(np.arange(n_labels)*cmap.N/(n_labels+1))
+
+ # Plot
+ class_id = np.searchsorted(ulabels, labels)
+ for id in range(n_labels):
+ plt.plot(values[class_id==id,d1],values[class_id==id,d2], 'o',color=colors[id],ms=7,label=ulabels[id])
+
+
+
+ ax.legend(frameon=False, loc='upper left',
+ ncol=len(labels),
+ bbox_to_anchor=(0, -0.06, 1, 0))
+
+ ax.set_xlabel(parnames[d1])
+ ax.set_ylabel(parnames[d2])
+
+ # Show or save
+ if (savefig==[]):
+ plt.show()
+ else:
+ plt.savefig(savefig)
+
+ return fig
+
+#############################################################
+
+def plot_xy(x,y,pname, outname, label='', savefig=[]):
+ """
+ Plots one array versus another.
+ Arguments:
+ * x : array for x-axis
+ * y : array for y-axis
+ * pname : xlabel
+ * outname : ylabel
+ * label : legend
+ * savefig : figure name to save. If [], then show the plot
+ """
+
+ # Start the figure
+ fig=figure(figsize=(12,8))
+ ax = gca()
+
+ # Plot
+ plt.plot(x,y,'o',label=label)
+
+ # Set labels
+ ax.set_xlabel(pname)
+ ax.set_ylabel(outname)
+
+ # Show or save
+ if (savefig==[]):
+ plt.show()
+ else:
+ plt.savefig(savefig)
+ #plt.clf()
+
+
+ return fig
+
+#############################################################
+
+def plot_xxy(x1,x2,y,pnames, outname, label='', savefig=[]):
+ """
+ Plots one array versus another.
+ Arguments:
+ * x1 : array for x1-axis
+ * x2 : array for x2-axis
+ * y : array for y-axis
+ * pnames : list of xlabels
+ * outname : ylabel (vertical axis)
+ * label : legend
+ * savefig : figure name to save. If [], then show the plot
+ """
+
+ # Start the figure
+ fig=figure(figsize=(12,8))
+ ax = fig.add_subplot(111, projection='3d')
+ ax.scatter(x1,x2,y,c='k',label=label)
+
+ # Set labels
+ ax.set_xlabel(pnames[0])
+ ax.set_ylabel(pnames[1])
+ ax.set_zlabel(outname)
+
+ # Show or save
+ if (savefig==[]):
+ plt.show()
+ else:
+ plt.savefig(savefig)
+ #plt.clf()
+
+
+ return fig
+
+
+
+
+
+
diff --git a/PyUQTk/plotting/surrogate.py b/PyUQTk/plotting/surrogate.py
new file mode 100644
index 00000000..694a9969
--- /dev/null
+++ b/PyUQTk/plotting/surrogate.py
@@ -0,0 +1,673 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+import os
+import shutil
+import sys
+
+try:
+ import numpy as np
+except ImportError:
+ print "Numpy was not found. "
+
+try:
+ import matplotlib
+except ImportError:
+ print "Matplotlib was not found. "
+
+try:
+ from scipy import stats, mgrid, reshape, random
+except ImportError:
+ print "Scipy was not found. "
+import math
+import matplotlib.pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D
+from matplotlib.patches import Circle, Wedge, Polygon
+from itertools import combinations
+
+from pylab import *
+
+sys.path.append(os.environ['UQTK_SRC'])
+import PyUQTk.utils.colors as ut
+
+uqtkbin=os.environ['UQTK_INS']+"/bin/"
+
+
+rc('legend',loc='upper left', fontsize=12)
+rc('lines', linewidth=1, color='r')
+rc('axes',linewidth=3,grid=True,labelsize=22)
+rc('xtick',labelsize=20)
+rc('ytick',labelsize=20)
+
+#############################################################
+def saveplot(figname):
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ savefig(figname)
+ gcf().clf()
+
+#############################################################
+def plot_pcpdf(pctype,mindex,cfs,nsam,custom_xlabel,figname='pcdens.eps',showplot=False):
+
+ np.savetxt('mi',mindex,fmt='%d')
+ np.savetxt('cfs',cfs)
+ dim=mindex.shape[1]
+ cmd=uqtkbin+"pce_rv -w'PCmi' -n"+str(nsam)+" -p"+str(dim)+" -f'cfs' -m'mi' -x"+pctype+" > pcrv.log"
+ os.system(cmd)
+
+ cmd=uqtkbin+"pdf_cl -i rvar.dat -g 1000 > pdfcl.log"
+ os.system(cmd)
+ xtarget=np.loadtxt('dens.dat')[:,:-1]
+ dens=np.loadtxt('dens.dat')[:,-1:]
+
+
+ #rv=np.loadtxt('rvar.dat')
+ #xtarget=np.linspace(rv.min(),rv.max(),100)
+ #kernlin=stats.kde.gaussian_kde(rv)
+ #dens=kernlin.evaluate(xtarget)
+
+ np.savetxt('pcdens.dat',np.vstack((xtarget,dens)).T)
+
+ figure(figsize=(12,8))
+ plot(xtarget,dens)
+ xlabel(custom_xlabel)
+ ylabel('PDF')
+
+ saveplot(figname)
+ if showplot:
+ show()
+
+ #cleanup
+ os.system('rm mi cfs rvar.dat')
+
+#############################################################
+
+def plot_mindex(mindex,varfrac,varname,figname='mindex.eps',showplot=False):
+
+ custom_xlabel='Param i'
+ custom_ylabel='Param j'
+
+ npc=mindex.shape[0]
+ ndim=mindex.shape[1]
+ plt.figure(figsize=(14,10))
+ ij=[]
+ acfs=[]
+ k=0
+ for basis in mindex:
+ nzeros=np.nonzero(basis)[0]
+ if nzeros.shape[0]==0:
+ ij.append((0,0))
+ acfs.append(varfrac[k])
+ elif nzeros.shape[0]==1:
+ ijcur=(0,nzeros[0]+1)
+ if ijcur in ij:
+ acfs[ij.index(ijcur)]+=varfrac[k]
+ else:
+ ij.append(ijcur)
+ acfs.append(varfrac[k])
+ elif nzeros.shape[0]==2:
+ ijcur=(nzeros[0]+1,nzeros[1]+1)
+ if ijcur in ij:
+ acfs[ij.index(ijcur)]+=varfrac[k]
+ else:
+ ij.append(ijcur)
+ acfs.append(varfrac[k])
+ else:
+ #print "More than a couple detected!", nzeros+1
+ for cc in combinations(nzeros, 2):
+ ijcur=(cc[0]+1,cc[1]+1) #[ic+1 for ic in cc]
+ #print "adding to ",ijcur
+ if ijcur in ij:
+ acfs[ij.index(ijcur)]+=varfrac[k]
+ else:
+ ij.append(ijcur)
+ acfs.append(varfrac[k])
+ k+=1
+ ija=np.array(ij)
+ #print "ija",ija,acfs
+
+ pad=0.1
+ plt.fill_between(range(ndim+2),-pad,range(ndim+2),color='lightgrey')
+ plt.plot([-pad,ndim+pad],[-pad,ndim+pad],'k-',lw=1)
+ if varfrac==[]:
+ #plot(mindex[:,0],mindex[:,1],'bo',markersize=13)
+ plt.scatter(ija[:,0],ija[:,1],s=320, marker = 'o',cmap = cm.jet )
+ else:
+ #print cfs
+ plt.scatter(ija[:,0],ija[:,1],s=320, c=acfs, marker = 'o',vmin=0.0,vmax=max(acfs))
+
+ #cp=get_cp()
+ #cs=plt.gva().pcolor(abs(cfs),cmap=cp);
+ plt.colorbar(pad=0.05,shrink=0.9)
+ plt.gca().set_aspect('equal')
+ #cbar=plt.colorbar(ticks=[0.,0.6,1.0])
+ #cbar.ax.set_ylim([0.0,1.0])
+ #cbar.set_ticklabels(['0.0', '0.6','1.0'])
+ #plt.tight_layout()
+
+
+ #plt.gca().xaxis.set_label_position('top')
+ #plt.gca().xaxis.tick_top()
+ plt.gca().set_xticks(range(ndim+1), minor=False)
+ plt.gca().set_yticks(range(ndim+1), minor=False)
+ plt.gca().xaxis.grid(True, which='major')
+ plt.gca().yaxis.grid(True, which='major')
+
+ plt.xlabel(custom_xlabel)
+ plt.ylabel(custom_ylabel)
+ plt.xlim([-pad, ndim+pad])
+ plt.ylim([ndim+pad,-pad])
+
+ plt.gca().spines["top"].set_visible(False)
+ plt.gca().spines["right"].set_visible(False)
+
+ plt.text(int(ndim/2),int(ndim/3),varname,fontsize=37)
+
+
+ saveplot(figname)
+ if showplot:
+ show()
+
+
+def plot_micf(mindex,cfs=[],figname='micf.eps',showplot=False):
+ """Plots 2d or 3d multiindices"""
+
+ custom_xlabel='Dim 1'
+ custom_ylabel='Dim 2'
+ custom_zlabel='Dim 3'
+
+ npc=mindex.shape[0]
+ ndim=mindex.shape[1]
+
+ if (ndim==2):
+ if cfs==[]:
+ #plot(mindex[:,0],mindex[:,1],'bo',markersize=13)
+ scatter(mindex[:,0],mindex[:,1],s=150, marker = 'o',cmap = cm.jet )
+ else:
+ scatter(mindex[:,0],mindex[:,1],s=150, c=cfs, marker = 'o',cmap = cm.jet )
+ elif (ndim==3):
+ ax = figure().add_subplot(111, projection='3d')
+ if cfs==[]:
+ ax.scatter(mindex[:,0],mindex[:,1],mindex[:,2],s=50)
+ else:
+ ax.scatter(mindex[:,0],mindex[:,1],mindex[:,2],c=cfs,s=50)
+
+ ax.set_zlabel(custom_zlabel)
+ ax.set_zlim([-.5, max(mindex[:,2])+.5])
+
+ else:
+ raise NameError("Multi-index should be 2d or 3d")
+
+ xlabel(custom_xlabel)
+ ylabel(custom_ylabel)
+ xlim([-.5, max(mindex[:,0])+.5])
+ ylim([-.5, max(mindex[:,1])+.5])
+
+ saveplot(figname)
+ if showplot:
+ show()
+
+
+
+#############################################################
+def plot_idm(data,model,errbar,sort='none',figname='idm.eps',showplot=False):
+ """Plots data and model on the same axis"""
+ erb=True
+
+ axes_labels=['Run Id','Model / Surrogate']
+
+ custom_xlabel=axes_labels[0]
+ custom_ylabel=axes_labels[1]
+
+ figure(figsize=(12,8))
+
+ npts=data.shape[0]
+ neach=1
+ if (data.ndim>1):
+ neach=data.shape[1]
+
+ erbl,erbh=errbar
+
+ if (sort=='model'):
+ ind=argsort(model)
+ elif (sort=='data'):
+ ind=argsort(data)
+ elif (sort=='none'):
+ ind=range(npts)
+
+
+ ddata=data.reshape(npts,neach)
+
+ if (erb):
+ errorbar(range(1,npts+1),model[ind],yerr=[erbl,erbh],fmt='o', markersize=2,ecolor='grey')
+
+ if (sort!='model'):
+ plot(range(1,npts+1),model[ind], 'bo', label='Surrogate')
+ for j in range(neach):
+ plot(range(1,npts+1),ddata[ind,j], 'ro',label='Model')
+ if (sort=='model'):
+ plot(range(1,npts+1),model[ind], 'bo', label='Surrogate')
+
+ xlabel(custom_xlabel)
+ ylabel(custom_ylabel)
+ #title('Data vs Model')
+ legend()
+
+
+ saveplot(figname)
+ if showplot:
+ show()
+
+#############################################################
+def plot_dm(datas,models,errorbars=[],labels=[],axes_labels=['Model','Apprx'],figname='dm.eps',showplot=False):
+ """Plots data-vs-model and overlays y=x"""
+ if errorbars==[]:
+ erb=False
+ else:
+ erb=True
+
+
+ custom_xlabel=axes_labels[0]
+ custom_ylabel=axes_labels[1]
+
+ figure(figsize=(10,10))
+
+ ncase=len(datas)
+ if labels==[]:
+ labels=['']*ncase
+
+
+ # Create colors list
+ colors=ut.set_colors(ncase)
+ yy=np.empty((0,1))
+ for i in range(ncase):
+ data=datas[i]
+ model=models[i]
+ if erb:
+ erbl,erbh=errorbars[i]
+ npts=data.shape[0]
+ neach=1
+ if (data.ndim>1):
+ neach=data.shape[1]
+
+ #neb=model.shape[1]-1# errbars not implemented yet
+
+
+
+ ddata=data.reshape(npts,neach)
+
+
+ for j in range(neach):
+ yy=np.append(yy,ddata[:,j])
+ if (erb):
+ errorbar(ddata[:,j],model,yerr=[erbl,erbh],fmt='o', markersize=2,ecolor='grey')
+ plot(ddata[:,j],model, 'o',color=colors[i],label=labels[i])
+
+ delt=0.1*(yy.max()-yy.min())
+ minmax=[yy.min()-delt, yy.max()+delt]
+ plot(minmax,minmax,'k--',linewidth=1.5,label='y=x')
+
+ xlabel(custom_xlabel)
+ ylabel(custom_ylabel)
+ #title('Data vs Model')
+ legend()
+
+ #xscale('log')
+ #yscale('log')
+
+ #gca().set_aspect('equal', adjustable='box')
+ plt.axis('scaled')
+ # Trying to make sure both axis have the same number of ticks
+ gca().xaxis.set_major_locator(MaxNLocator(7))
+ gca().yaxis.set_major_locator(MaxNLocator(7))
+ if showplot:
+ show()
+
+ saveplot(figname)
+
+
+#############################################################
+
+def plot_sens(sensdata,pars,cases,vis="bar",reverse=False,par_labels=[],case_labels=[],colors=[],ncol=4,grid_show=True,xlbl='',legend_show=2,xdatatick=None,figname='sens.eps',showplot=False):
+ """Plots sensitivity for multiple observables"""
+
+ ncases=sensdata.shape[0]
+ npar=sensdata.shape[1]
+
+ wd=0.6
+ ylbl='Sensitivity'
+
+
+ assert set(pars) <= set(range(npar))
+ assert set(cases) <= set(range(ncases))
+
+ # Set up the figure
+ # TODO need to scale figure size according to the expected amount of legends
+ xticklabel_size=25
+ if ncases>20:
+ xticklabel_size=1000/ncases
+ fig = plt.figure(figsize=(20,12))
+ #fig = plt.figure(figsize=(18,12))
+ fig.add_axes([0.1,0.3,0.8,0.65])
+ #########
+
+ # Default parameter names
+ if (par_labels==[]):
+ for i in range(npar):
+ par_labels.append(('par_'+str(i+1)))
+ # Default case names
+ if (case_labels==[]):
+ for i in range(ncases):
+ case_labels.append(('case_'+str(i+1)))
+
+
+ if(reverse):
+ tmp=par_labels
+ par_labels=case_labels
+ case_labels=tmp
+ tmp=pars
+ pars=cases
+ cases=tmp
+ sensdata=sensdata.transpose()
+ ##############################################################################
+
+ npar_=len(pars)
+ ncases_=len(cases)
+
+ # Create colors list
+ if colors==[]:
+ colors=ut.set_colors(npar_)
+
+
+ case_labels_=[]
+ for i in range(ncases_):
+ case_labels_.append(case_labels[cases[i]])
+
+ if xdatatick==None:
+ xflag=False
+ xdatatick=np.array(range(1,ncases_+1))
+ sc=1.
+ else:
+ xflag=True
+ sc=(xdatatick[-1]-xdatatick[0])/ncases_
+
+ if (vis=="graph"):
+ for i in range(npar_):
+ plot(xdatatick_,sensdata[cases,i], '-o',color=colors[pars[i]], label=par_labels[pars[i]])
+ elif (vis=="bar"):
+ curr=np.zeros((ncases_))
+ #print pars,colors
+ for i in range(npar_):
+ bar(xdatatick,sensdata[cases,i], width=wd*sc,color=colors[pars[i]], bottom=curr, label=par_labels[pars[i]])
+ curr=sensdata[cases,i]+curr
+ if not xflag:
+ if ncases>20:
+ xticks(np.array(range(1,ncases_+1))+wd/2.,case_labels_,rotation='vertical')
+ else:
+ xticks(np.array(range(1,ncases_+1))+wd/2.,case_labels_)
+ xlim(xdatatick[0]-wd*sc/2.,xdatatick[-1]+wd*sc/2.)
+
+ #else:
+ # xticks(xdatatick)
+
+ ylabel(ylbl)
+ xlabel(xlbl)
+
+
+
+ maxsens=max(max(curr),1.0)
+ ylim([0,maxsens])
+ if legend_show==1:
+ legend()
+ elif (legend_show==2):
+ legend(bbox_to_anchor=(1.0, -0.05),fancybox=True, shadow=True,ncol=ncol,labelspacing=-0.1)
+ #legend(bbox_to_anchor=(0.0, -0.05),fancybox=True, shadow=True,ncol=5,labelspacing=-0.1)
+
+ if not xflag:
+ zed = [tick.label.set_fontsize(xticklabel_size) for tick in gca().xaxis.get_major_ticks()]
+
+ grid(grid_show)
+
+ saveplot(figname)
+ if showplot:
+ show()
+
+##################################################################################################
+
+
+def plot_senscirc(varname, msens,jsens,inpar_names,figname='senscirc.eps',showplot=False):
+
+ Nmain=min(len(np.nonzero(msens)[0]),6)
+ Nsec=Nmain-1
+ lwMax=10
+ lwCut=0.2
+ radMain=50
+ radOut=15
+ lext=0.4
+ verbose=1
+
+ nx,ny=jsens.shape
+
+
+ #jsens=np.log10(jsens);
+ #print msens
+ ind=msens.argsort()[::-1];
+ msensShort=msens[ind[0:Nmain]]
+ if verbose > 0:
+ for i in range(Nmain):
+ print "Variable ",ind[i],", main sensitivity ",msens[ind[i]]
+ fig = plt.figure(figsize=(10,8))
+ ax=fig.add_axes([0.05, 0.05, 0.9, 0.9],aspect='equal')
+ #circ=pylab.Circle((0,0),radius=0.5,color='r')
+ circ=Wedge((0.0,0.0),1.01, 0, 360, width=0.02,color='r')
+ ax.add_patch(circ)
+ maxJfr=-1.e10;
+ for i in range(Nmain):
+ jfr_i=np.array(np.zeros(nx))
+ iord=ind[i]
+ for j in range(iord):
+ jfr_i[j]=jsens[j,iord]
+ for j in range(iord+1,nx):
+ jfr_i[j]=jsens[iord,j]
+ ind_j=jfr_i.argsort()[::-1];
+ if jfr_i[ind_j[0]] > maxJfr: maxJfr = jfr_i[ind_j[0]];
+ if verbose > 1:
+ for j in range(Nsec):
+ print iord," ",ind_j[j],jfr_i[ind_j[j]]
+ if verbose > 1:
+ print "Maximum joint sensitivity :",maxJfr
+ gopar=[]
+ for i in range(Nmain):
+ jfr_i=np.array(np.zeros(nx))
+ iord=ind[i]
+ for j in range(iord):
+ jfr_i[j]=jsens[j,iord]
+ for j in range(iord+1,nx):
+ jfr_i[j]=jsens[iord,j]
+ ind_j=jfr_i.argsort()[::-1];
+ elst=[]
+ for j in range(Nsec):
+ if maxJfr>1.e-16 and jfr_i[ind_j[j]]/maxJfr >= lwCut:
+ posj=[k for k,x in enumerate(ind[:Nmain]) if x == ind_j[j]]
+ if verbose > 2:
+ print j," ",posj
+ if len(posj) > 0 :
+ x1=np.cos(0.5*np.pi+(2.0*np.pi*posj[0])/Nmain)
+ x2=np.cos(0.5*np.pi+(2.0*np.pi*i )/Nmain)
+ y1=np.sin(0.5*np.pi+(2.0*np.pi*posj[0])/Nmain)
+ y2=np.sin(0.5*np.pi+(2.0*np.pi*i )/Nmain)
+ lw=lwMax*jfr_i[ind_j[j]]/maxJfr
+ plt.plot([x1,x2],[y1,y2],'g-',linewidth=lw)
+ if ( verbose > 2 ):
+ print iord," ",ind[posj[0]]
+ else:
+ elst.append(j)
+ if len(elst) > 0:
+ asft=[0,-1,1]
+ for k in range(min(len(elst),3)):
+ ang=0.5*np.pi+(2.0*np.pi*i)/Nmain+2*np.pi/12*asft[k]
+ x2=np.cos(0.5*np.pi+(2.0*np.pi*i)/Nmain)
+ y2=np.sin(0.5*np.pi+(2.0*np.pi*i)/Nmain)
+ x1=x2+lext*np.cos(ang)
+ y1=y2+lext*np.sin(ang)
+ lw=lwMax*jfr_i[ind_j[elst[k]]]/maxJfr
+ plt.plot([x1,x2],[y1,y2],'g-',linewidth=lw)
+ plt.plot([x1],[y1],"wo",markersize=radOut,markeredgecolor='k',
+ markeredgewidth=2)
+ if ( ind_j[elst[k]] > 32 ):
+ ltext=str(ind_j[elst[k]]+3)
+ elif ( ind_j[elst[k]] > 30 ):
+ ltext=str(ind_j[elst[k]]+2)
+ else:
+ ltext=str(ind_j[elst[k]]+1)
+ plt.text(x1+(0.15)*np.cos(ang),y1+(0.15)*np.sin(ang),ltext,
+ ha='center',va='center',fontsize=16)
+ posj=[k1 for k1,x in enumerate(gopar) if x == ind_j[elst[k]]]
+ if len(posj)==0:
+ gopar.append(ind_j[elst[k]])
+ if ( verbose > 2 ):
+ print "------------------------"
+ for i in range(Nmain):
+ angl=0.5*np.pi+(2.0*np.pi*i)/Nmain
+ xc=np.cos(angl);
+ yc=np.sin(angl);
+ msize=radMain*msens[ind[i]]/msens[ind[0]]
+ plt.plot([xc],[yc],"bo",markersize=msize,markeredgecolor='k',markeredgewidth=2)
+ da=1.0
+ lab=0.2
+ llab=lab*msens[ind[i]]/msens[ind[0]]
+
+ ltext=str(ind[i]+1)
+ lleg=ltext+" - "+inpar_names[ind[i]]
+ plt.text(xc+(0.08+llab)*np.cos(angl+da),yc+(0.08+llab)*np.sin(angl+da),ltext,
+ ha='center',va='center',fontsize=16)
+ plt.text(1.6,1.2-0.15*i,lleg,fontsize=16)
+ for k in range(len(gopar)):
+ lleg=str(gopar[k]+1)+" - "+inpar_names[gopar[k]]
+ plt.text(1.6,1.2-0.15*Nmain-0.15*k,lleg,fontsize=16)
+
+ plt.text(0.9,-1.2,varname,fontsize=27)
+
+ ax.set_xlim([-1-1.6*lext,1.8+1.6*lext])
+ ax.set_ylim([-1-1.6*lext,1+1.6*lext])
+ ax.set_xticks([])
+ ax.set_yticks([])
+
+ saveplot(figname)
+ if showplot:
+ show()
+
+
+##################################################################################################
+
+
+def plot_sensmat(sensdata,pars,cases,vis="bar",reverse=False,par_labels=[],case_labels=[],figname='sensmat.eps',showplot=False):
+
+ cdict = cm.jet._segmentdata.copy()
+ cdict['red']=tuple([tuple([0.0, 1, 1 ]),
+ tuple([0.01, 0, 0 ]),
+ tuple([0.35, 0, 0 ]),
+ tuple([0.66, 1, 1 ]),
+ tuple([0.89, 1, 1 ]),
+ tuple([1, 0.5, 0.5])
+ ]
+ )
+ cdict['green']=tuple([tuple([0.0, 1, 1]),
+ tuple([0.01, 0, 0]),
+ tuple([0.125, 0, 0]),
+ tuple([0.375, 1, 1]),
+ tuple([0.64, 1, 1]),
+ tuple([0.91, 0, 0]),
+ tuple([1, 0, 0])
+ ]
+ )
+ cdict['blue']=tuple([tuple([0, 1.0,1.0]),
+ tuple([0.01, 0.5,0.5]),
+ tuple([0.11, 1, 1 ]),
+ tuple([0.34, 1, 1 ]),
+ tuple([0.65, 0, 0 ]),
+ tuple([1, 0, 0 ])
+ ]
+ )
+
+ cp=matplotlib.colors.LinearSegmentedColormap('colormap',cdict,64)
+
+ # Read varfrac files and retain indices of important params
+ vlst=[]
+ allSens=[]
+ for nm in range(len(cases)):
+ #vfr=np.array(column(readfile("varfrac."+nm+".dat")[0],0))
+ vfr=sensdata[nm,:] #np.array(column(readfile(nm+".vf.dat")[0],0))
+ allSens.append(vfr)
+ vlst.append([ n for n,i in enumerate(vfr) if i>0.1 ])
+ # Get union
+ allV=[]
+ for i in range(len(vlst)):
+ allV=list(set(allV) | set(vlst[i]))
+ allV=np.sort(allV)
+ # Create matrix, populate, and rescale
+ nobs=len(cases);
+ npar=len(allV);
+ print "Number of observables plotted = ", nobs
+ print "Number of parameters plotted = ", npar
+ jsens=np.array(np.zeros([nobs,npar]));
+ for i in range(nobs):
+ for j in range(npar):
+ jsens[i,j]=allSens[i][allV[j]];
+ #for i in range(nobs):
+ # jsens[i]=jsens[i]/jsens[i].max();
+ jsens[np.where(jsens==0)]=0.5*jsens[np.where(jsens>0)].min();
+ #for i in range(nobs):
+ # for j in range(npar):
+ # jsens[i,j]=np.log10(jsens[i,j]);
+
+ par_labels_sorted=[];
+ for i in allV:
+ par_labels_sorted.append(par_labels[i]);
+ # make fig
+ fs1=13;
+ fig = plt.figure(figsize=(10,3.9));
+ ax=fig.add_axes([0.12, 0.27, 0.88, 0.68]);
+ cs=ax.pcolor(jsens,cmap=cp);
+ #cs=ax.pcolor(jsens,cmap=cm.jet)
+ ax.set_xlim([0,npar]);
+ ax.set_ylim([0,nobs]);
+ ax.set_xticks([0.5+i for i in range(npar)]);
+ ax.set_yticks([0.4+i for i in range(nobs)]);
+ ax.set_yticklabels([case_labels[i] for i in range(nobs)],fontsize=fs1);
+ ax.set_xticklabels([par_labels_sorted[i] for i in range(npar)],rotation=45,fontsize=fs1);
+ ax.tick_params(length=0.0)
+ cbar=plt.colorbar(cs)
+ #cbar.set_ticks(range(-13,1,1))
+ #cbar.set_ticklabels(['$10^{'+str(i)+'}$' for i in range(-13,1,1)])
+
+
+ saveplot(figname)
+ if showplot:
+ show()
+
+
diff --git a/PyUQTk/pytests/.!99843!quadpnts.pdf b/PyUQTk/pytests/.!99843!quadpnts.pdf
new file mode 100644
index 00000000..9ab49ee0
Binary files /dev/null and b/PyUQTk/pytests/.!99843!quadpnts.pdf differ
diff --git a/PyUQTk/pytests/.!99844!quadpnts.pdf b/PyUQTk/pytests/.!99844!quadpnts.pdf
new file mode 100644
index 00000000..9ab49ee0
Binary files /dev/null and b/PyUQTk/pytests/.!99844!quadpnts.pdf differ
diff --git a/PyUQTk/pytests/.!99845!quadweights.pdf b/PyUQTk/pytests/.!99845!quadweights.pdf
new file mode 100644
index 00000000..9ab49ee0
Binary files /dev/null and b/PyUQTk/pytests/.!99845!quadweights.pdf differ
diff --git a/PyUQTk/pytests/.!99846!quadweights.pdf b/PyUQTk/pytests/.!99846!quadweights.pdf
new file mode 100644
index 00000000..9ab49ee0
Binary files /dev/null and b/PyUQTk/pytests/.!99846!quadweights.pdf differ
diff --git a/PyUQTk/pytests/CMakeLists.txt b/PyUQTk/pytests/CMakeLists.txt
new file mode 100644
index 00000000..dfdbca7e
--- /dev/null
+++ b/PyUQTk/pytests/CMakeLists.txt
@@ -0,0 +1,22 @@
+project (UQTk)
+
+set(PYTHON_EXECUTABLE python)
+
+SET( CMAKE_SWIG_OUTDIR "${PROJECT_BINARY_DIR}" )
+
+# Add python tests and run without "make install"
+configure_file( PyModTest.py "${CMAKE_SWIG_OUTDIR}/PyModTest.py" COPYONLY )
+add_test( NAME PyModTest COMMAND ${PYTHON_EXECUTABLE} PyModTest.py WORKING_DIRECTORY ${CMAKE_SWIG_OUTDIR} )
+
+configure_file( PyArrayTest.py "${CMAKE_SWIG_OUTDIR}/PyArrayTest.py" COPYONLY )
+add_test( NAME PyArrayTest COMMAND ${PYTHON_EXECUTABLE} PyArrayTest.py WORKING_DIRECTORY ${CMAKE_SWIG_OUTDIR} )
+
+configure_file( PyArrayTest2.py "${CMAKE_SWIG_OUTDIR}/PyArrayTest2.py" COPYONLY )
+add_test( NAME PyArrayTest2 COMMAND ${PYTHON_EXECUTABLE} PyArrayTest2.py WORKING_DIRECTORY ${CMAKE_SWIG_OUTDIR} )
+
+configure_file( PyQuadTest.py "${CMAKE_SWIG_OUTDIR}/PyQuadTest.py" COPYONLY )
+add_test( NAME PyQuadTest COMMAND ${PYTHON_EXECUTABLE} PyQuadTest.py WORKING_DIRECTORY ${CMAKE_SWIG_OUTDIR} )
+
+configure_file( PyBCSTest.py "${CMAKE_SWIG_OUTDIR}/PyBCSTest.py" COPYONLY )
+add_test( NAME PyBCSTest COMMAND ${PYTHON_EXECUTABLE} PyBCSTest.py WORKING_DIRECTORY ${CMAKE_SWIG_OUTDIR} )
+
diff --git a/PyUQTk/pytests/PyArrayTest.py b/PyUQTk/pytests/PyArrayTest.py
new file mode 100644
index 00000000..c98cc7de
--- /dev/null
+++ b/PyUQTk/pytests/PyArrayTest.py
@@ -0,0 +1,109 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# include path to include PyUQTk
+# only necessary for cmake tests, so that user doesn have to "make install" to run
+# python tests
+import sys
+sys.path.append('../uqtkarray/')
+
+# try to import numpy and matplotlib
+try:
+ from numpy import *
+ from matplotlib.pyplot import *
+except ImportError:
+ "Need numpy and matplotlib to test PyUQTk"
+
+# try to import uqtk array library and
+# functions to convert between uqtk and numpy arrays
+try:
+ import uqtkarray
+ from uqtkarray import numpy2uqtk
+ from uqtkarray import uqtk2numpy
+except ImportError:
+ print "PyUQTk array module not found"
+ print "If installing in a directory other than the build directory, make sure PYTHONPATH includes the install directory"
+
+import unittest
+
+''' Test converting 1d numpy array to 1d uqtk array '''
+# create 1d array
+N = 35
+x = uqtkarray.dblArray1D(N,0)
+
+# create 1d numpy array
+x_np = random.randn(N)
+
+# set uqtk array to numpy array
+x.setnpdblArray(x_np)
+
+# test to make sure array elements are the same
+for i in range(N):
+ assert x[i] == x_np[i]
+
+''' Test converting 2d numpy array to 2d uqtk array '''
+# create 2d array in uqtk
+m = 100
+n = 3
+y = uqtkarray.dblArray2D(m,n,1)
+
+# set 2d array to numpy array
+# make sure to pass asfortranarray
+y_np = random.randn(m,n)
+y.setnpdblArray(asfortranarray(y_np))
+
+for i in range(m):
+ for j in range(n):
+ assert y[i,j] == y_np[i,j]
+
+''' alternative using uqtk2numpy and numpy2uqtk '''
+
+# test conversion from 1d numpy array to 1d uqtk array
+nn = 10
+x1 = random.rand(nn)
+y1 = numpy2uqtk(x1)
+z1 = uqtk2numpy(y1)
+for i in range(nn):
+ assert x1[i] == y1[i]
+
+# test conversion from 1d uqtk array to numpy
+for i in range(nn):
+ assert z1[i] == x1[i]
+
+# test for conversion from 2d numpy to 2d uqtk
+nn = 10
+mm = 5
+X1 = random.rand(mm,nn)
+Y1 = numpy2uqtk(X1)
+Z1 = uqtk2numpy(Y1)
+for i in range(mm):
+ for j in range(nn):
+ assert X1[i,j] == Y1[i,j]
+
+# test for conversion from 2d uqtk array to numpy array
+for i in range(mm):
+ for j in range(nn):
+ assert Z1[i,j] == X1[i,j]
diff --git a/PyUQTk/pytests/PyArrayTest2.py b/PyUQTk/pytests/PyArrayTest2.py
new file mode 100644
index 00000000..610f15ef
--- /dev/null
+++ b/PyUQTk/pytests/PyArrayTest2.py
@@ -0,0 +1,74 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# include path to include PyUQTk
+import sys
+sys.path.append('../uqtkarray/')
+
+try:
+ import numpy as np
+except ImportError:
+ "Need numpy to test PyUQTk"
+
+try:
+ import uqtkarray
+ from uqtkarray import numpy2uqtk
+ from uqtkarray import uqtk2numpy
+except ImportError:
+ print "PyUQTk array module not found"
+ print "If installing in a directory other than the build directory, make sure PYTHONPATH includes the install directory"
+
+'''
+This file tests to make sure conversion from numpy -> uqtkarray does
+not change the row-major (C contiguous) format of the original numpy array
+
+Also, when converting form uqtkarray-> numpy we want to make sure that the
+resulting numpy array is *only* row major (C contiguous)
+
+'''
+
+# create numpy matrix and show flags
+a_np = np.array([[0, 2.00],[0.1, 1],[1, 5.0]])
+print "flags for a_np to show whether C or F contiguous"
+print a_np.flags
+
+# get a uqtk array from a numpy array (memory is copied, not shared)
+a_uqtk = numpy2uqtk(a_np)
+print "\nflags for original numpy array to make sure it hasn't changed to F continguous after converting"
+# verify that the original numpy array is only C contiguous
+assert a_np.flags['F_CONTIGUOUS'] == False
+assert a_np.flags['C_CONTIGUOUS'] == True
+
+print "\nConvert uqtk array back to numpy array and make sure C contiguous"
+b_np = uqtk2numpy(a_uqtk)
+# test to make sure new numpy array is *only* C contiguous (row - major)
+assert b_np.flags['F_CONTIGUOUS'] == False
+assert b_np.flags['C_CONTIGUOUS'] == True
+
+# test for the dot product
+print "\ncompute dot product which should be [2,1.1,6] (Note that if F contigous, the dot product would be [.1,3,6]:"
+dp = np.dot(b_np,np.ones(2))
+assert np.alltrue( dp == np.array([2.,1.1,6.]))
diff --git a/PyUQTk/pytests/PyBCSTest.py b/PyUQTk/pytests/PyBCSTest.py
new file mode 100644
index 00000000..13c05a70
--- /dev/null
+++ b/PyUQTk/pytests/PyBCSTest.py
@@ -0,0 +1,104 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# include path for PyUQTk.
+import sys
+sys.path.append('../bcs/') # imports as build lib so installing not needed
+sys.path.append('../uqtkarray/')
+sys.path.append('../tools/')
+sys.path.append('../pce/')
+
+import os
+dir_path = os.path.dirname(os.path.realpath(__file__))
+print dir_path
+
+try:
+ import numpy as np
+ import matplotlib.pyplot as mpl
+ import pdb
+except ImportError:
+ "Need numpy and matplotlib to test PyUQTk"
+
+try:
+ import uqtkarray as uqtkarray
+ import pce as uqtkpce
+ import tools as uqtktools
+ from bcs import bcsreg
+except ImportError:
+ print "PyUQTk array and quad module not found"
+
+'''
+This example uses BCS to fit
+
+f(x,y) = 1 + x + .5(3y^2-1)
+
+using 100 randomly generating training data points
+and 20 test data points. Sensitivity analysis is also
+performed post fitting.
+
+'''
+
+# set dimension
+ndim = 2
+
+# Create training data
+rn = np.random.RandomState(145)
+X = 2*rn.rand(100,ndim) - 1
+x1,x2 = X.T[0],X.T[1]
+f = lambda x1,x2: 1 + x1 + .5*(3*x2**2-1)
+y = f(x1,x2)
+
+# create test data
+Xtest = 2*rn.rand(20,ndim) - 1
+ytest = f(Xtest.T[0],Xtest.T[1])
+testdata = {'X': Xtest, 'y': ytest}
+
+# BCS hyperparameter definitions
+sigsq=None
+pcorder = 2
+pctype = "LU"
+tol=1e-12
+upit=1
+
+# setup, git and predict bcs model
+regmodel = bcsreg(ndim=2,pcorder=pcorder,pctype="LU")
+c, mindex = regmodel.fit(X,y,upit=upit,tol=tol)
+ypred = regmodel.predict(Xtest)
+
+# print mean squared prediction error
+mse = np.mean((ypred - ytest)**2)
+nmse = np.mean((ypred - ytest)**2)/np.mean(ytest)
+print "\nMSE is %.5g" %mse
+print "NMSE is %.5g" %nmse
+
+# print sensitivities
+print "\nSensitivities are ", regmodel.getsens()
+
+prec = 1e-7
+assert mse < prec, "BCS failed to recover the coefficients to desired precision :-("
+
+
+
diff --git a/PyUQTk/pytests/PyHMCMCTest.py b/PyUQTk/pytests/PyHMCMCTest.py
new file mode 100644
index 00000000..b17333e9
--- /dev/null
+++ b/PyUQTk/pytests/PyHMCMCTest.py
@@ -0,0 +1,192 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+try:
+ from numpy import *
+ from matplotlib.pyplot import *
+ from acor import *
+except ImportError:
+ "Need numpy and matplotlib to test PyUQTk"
+
+try:
+ import PyUQTk.array as uqtkarray
+ import PyUQTk.mcmc as uqtkmcmc
+ from PyUQTk.inference.mcmc import *
+ from PyUQTk.inference.postproc import *
+except ImportError:
+ print "PyUQTk array module not found"
+ print "If installing in a directory other than the build directory, make sure PYTHONPATH includes the install directory"
+
+import time
+
+'''
+Use HMCMC to get samples from banana shaped function
+'''
+
+def U(q,a=1.0,b=100.0):
+ '''
+ U(q) = -log(prior(q)*Likelihood(q|data))
+ '''
+ q = copy(atleast_2d(q))
+ return b*(q[:,1] - q[:,0]**2)**2 + (a - q[:,0])**2
+
+def grad_U(q):
+ '''
+ grad_U(q) = gradient vector of U at q
+ '''
+ q = copy(atleast_2d(q))
+ dUdx = (-400*q[:,0]*(q[:,1] - q[:,0]**2) - 2*(1 - q[:,0]))[0]
+ dUdy = (200*(q[:,1] - q[:,0]**2))[0]
+ return array([dUdx,dUdy])
+
+# def HMCMC(U,grad_U,eps,L,q):
+# current_q = copy(q) # save current
+
+# # generate current p
+# # propcov = 4*array([[ 0.01175383, 0.02065261],[ 0.02065261, 0.04296117]])
+# p = random.randn(len(current_q))
+# # p = random.multivariate_normal([0,0],propcov)
+# current_p = copy(p) # save current p
+
+# # make half step for momentum used for leap frog step
+# p = p - eps * grad_U(q)/2.0
+
+# for i in range(L):
+# # p = p - eps * grad_U(q)/2.0
+# q = q + eps*p
+# # p = p - eps * grad_U(q)/2.0
+# if (i != L-1): p = p - eps*grad_U(q)
+
+# # make a half step for momentum at the end
+# p = p - eps * grad_U(q)/2.0
+
+# # negate the momentum to make a symmetric proposal
+# p = -p
+
+# # Evaluate potential and kinetic energy
+# current_U = U(current_q)[0]
+# current_K = sum(current_p**2)/2.0
+# proposed_U = U(q)[0]
+# proposed_K = sum(p**2)/2.0
+
+# # Accept or reject the state at end of trajectory, returning either
+# # the position at the end of the trajectory or the initial position
+
+# if (log(random.rand()) < current_U-proposed_U+current_K-proposed_K):
+# return q
+# else:
+# alpha = 0
+# return current_q
+
+fig = figure()
+ax1 = fig.add_subplot(2,2,1)
+ax2 = fig.add_subplot(2,2,2)
+ax3 = fig.add_subplot(2,2,3)
+ax4 = fig.add_subplot(2,2,4)
+
+# Test U(q)
+N = 80
+qx = linspace(-1.5,2.5,N)
+qy = linspace(-.5,5,N)
+qx,qy = meshgrid(qx,qy)
+qz = exp(-U(array(zip(qx.flatten(),qy.flatten()))))
+qz.shape = (N,N)
+
+# Test grad_U
+dUdx = zeros((N,N))
+dUdy = zeros((N,N))
+for i in range(N):
+ for j in range(N):
+ dU = grad_U([qx[i,j],qy[i,j]])
+ dUdx[i,j] = dU[0]
+ dUdy[i,j] = dU[1]
+
+# Test HMCMC
+print '\n*****************\nTesting HMCMC\n*****************\n'
+samples1 = []
+qstart = array([1.0,1.0])
+q = copy(qstart)
+samples1.append(copy(q))
+eps = .01
+L = 150
+M = 500
+nburn = 300
+thin1 = 1
+for i in range(M):
+ q = HMCMC(U,grad_U,eps,L,q)
+ if i > 0:
+ samples1.append(copy(q))
+samples1 = array(samples1)
+
+# ax1.plot(qstart[0],qstart[0],'ok')
+# ax1.quiver(qx,qy,-dUdx,-dUdy,alpha=.1)
+# ax1.contour(qx,qy,qz,20,alpha=.4)
+# ax1.plot(samples1[nburn::thin1,0],samples1[nburn::thin1,1],'*k',alpha=.1)
+
+'''
+Use AMCMC for banana shaped function
+'''
+class pyLikelihood(uqtkmcmc.LikelihoodBase):
+ def eval(self,x):
+ x0 = x[0]
+ x1 = x[1]
+ return -(1-x0)*(1-x0) - 100*(x1 - x0*x0)*(x1 - x0*x0)
+
+# testing MCMC library
+print '\n*****************\nTesting AMCMC\n*****************\n'
+Like = pyLikelihood()
+xstart = uqtkarray.dblArray1D(2,1.0)
+mchain = uqtkmcmc.MCMC(Like)
+dim = 2
+mchain.setChainDim(dim)
+mchain.initMethod("am")
+g = uqtkarray.dblArray1D(dim,.1)
+mchain.initChainPropCovDiag(g)
+nCalls = L*M
+thin2 = thin1*L
+mchain.setWriteFlag(0)
+mchain.setOutputInfo("txt","chain.dat",M,nCalls);
+mchain.runChain(nCalls,xstart);
+mchain.getSamples()
+samples = mchain.samples
+samples2 = zeros((dim,nCalls))
+samples.getnpdblArray(samples2)
+samples2 = samples2.T
+propcov = uqtkarray.dblArray2D(2,2,0)
+mchain.getChainPropCov(propcov)
+m1 = propcov[0,0]; m2 = propcov[1,1]
+
+# ax2.contour(qx,qy,qz,250,alpha=.4)
+# ax2.plot(samples2[L*nburn::thin2,0],samples2[L*nburn::thin2,1],'*g',alpha=.1)
+
+# plot mixing of samples
+ax3.plot(samples1[nburn::thin1,0],'k',alpha=.4)
+ax3.plot(samples2[L*nburn::thin2,0],'g',alpha=.4)
+ax4.plot(samples1[nburn::thin1,1],'k',alpha=.4)
+ax4.plot(samples2[L*nburn::thin2,1],'g',alpha=.4)
+
+print 'acor using HMCMC', acor(samples1[nburn::thin1,0])
+print 'acor using MCMC', acor(samples2[L*nburn::thin2,0])
diff --git a/PyUQTk/pytests/PyMCMC2dTest.py b/PyUQTk/pytests/PyMCMC2dTest.py
new file mode 100644
index 00000000..f66ed8a5
--- /dev/null
+++ b/PyUQTk/pytests/PyMCMC2dTest.py
@@ -0,0 +1,101 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# include path to include PyUQTk
+import sys
+sys.path.append('../../')
+
+try:
+ from numpy import *
+ from matplotlib.pyplot import *
+except ImportError:
+ "Need numpy and matplotlib to test PyUQTk"
+
+try:
+ import PyUQTk.array as uqtkarray
+ import PyUQTk.mcmc as uqtkmcmc
+except ImportError:
+ print "PyUQTk array module not found"
+ print "If installing in a directory other than the build directory, make sure PYTHONPATH includes the install directory"
+
+import time
+
+# temp = random.randn(1000.)
+# a = uqtkarray.dblArray1D(1000,101.0)
+
+class pyLikelihood(uqtkmcmc.LikelihoodBase):
+ def eval(self,x):
+ # a.getnpdblarray(temp)
+ # temp = array(a.pint4py())
+ x0 = x[0]
+ x1 = x[1]
+ return -(1-x0)*(1-x0) - 100*(x1 - x0*x0)*(x1 - x0*x0)
+
+start = time.time()
+# testing MCMC library
+print '\n*****************\nTesting MCMC\n*****************\n'
+print 'Setting LogPosterior function, L'
+print 'L is defined in uqtk.cpp (Rosenbrock function)'
+L = pyLikelihood()
+print 'Testing logposterior function'
+xstart = uqtkarray.dblArray1D(2,0)
+print xstart
+print 'L.eval(x) = ', L.eval(xstart)
+
+print 'Setting up the sampler'
+mchain = uqtkmcmc.MCMC(L)
+print 'Setting chain dim, type (ss), initial proposal covariance'
+dim = 2
+mchain.setChainDim(dim)
+mchain.initMethod("ss")
+g = uqtkarray.dblArray1D(dim,.1)
+mchain.initChainPropCovDiag(g)
+print 'Chain Setup:'
+mchain.printChainSetup();
+print 'Running chain to chain.dat ...'
+nCalls = 100000
+mchain.setOutputInfo("txt","chain.dat",nCalls,nCalls);
+mchain.runChain(nCalls,xstart);
+print 'loading samples and plotting'
+thin = 25
+samples = loadtxt('chain.dat')[3001:-1:thin,1:3]
+figure()
+plot(samples[:,0],samples[:,1],'.')
+
+# get the likelihood information
+print 'Getting samples into numpy array...'
+# logprobs = zeros(nCalls);
+mchain.getSamples()
+samples = mchain.samples
+npsamples = zeros((dim,nCalls))
+samples.getnpdblArray(npsamples)
+figure()
+plot(npsamples[0][::thin],npsamples[1][::thin],'.g')
+
+end = time.time()
+print end - start
+
+# show()
diff --git a/PyUQTk/pytests/PyMCMCTest.py b/PyUQTk/pytests/PyMCMCTest.py
new file mode 100644
index 00000000..c57aa567
--- /dev/null
+++ b/PyUQTk/pytests/PyMCMCTest.py
@@ -0,0 +1,142 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# include path to include PyUQTk
+import sys
+sys.path.append('../../')
+
+try:
+ from numpy import *
+ from matplotlib.pyplot import *
+ import time
+except ImportError:
+ "Need numpy and matplotlib to test PyUQTk"
+
+try:
+ import PyUQTk.array as uqtkarray
+ import PyUQTk.quad as uqtkquad
+ import PyUQTk.mcmc as uqtkmcmc
+except ImportError:
+ print "PyUQTk array and quad module not found"
+
+# class pyLikelihood(uqtkmcmc.LikelihoodBase):
+# def eval(self,x):
+# x0 = x[0]
+# x1 = x[1]
+# return -(1-x0)*(1-x0) - 100*(x1 - x0*x0)*(x1 - x0*x0)
+# class pyLikelihood(uqtkmcmc.LikelihoodBase):
+# def eval(self,x):
+# '''
+# sample from 1./(abs(1-x**2))
+# '''
+# y1 = x[0]
+# y2 = x[1]
+# if sqrt(y1**2 + y2**2) > 1:
+# return -1e16
+# else:
+# return -.0001*log(1 - y2**2 - y1**2)
+class pyLikelihood(uqtkmcmc.LikelihoodBase):
+ def eval(self,x):
+ '''
+ sample from exp(-.5*(x**2/.1**2 - y**2/.8**2))
+ '''
+ y1 = x[0]
+ y2 = x[1]
+ return -.5*(y1**2/.1**2 + y2**2/.8**2)
+
+start = time.time()
+# testing MCMC library
+print '\n*****************\nTesting MCMC\n*****************\n'
+print 'Setting LogPosterior function, L'
+print 'L is defined in uqtk.cpp (Rosenbrock function)'
+L = pyLikelihood()
+print 'Testing logposterior function'
+xstart = uqtkarray.dblArray1D(2,0)
+print xstart
+print 'L.eval(x) = ', L.eval(xstart)
+
+print 'Setting up the sampler'
+mchain = uqtkmcmc.MCMC(L)
+
+print 'Setting chain dim, type (ss), initial proposal covariance'
+dim = 2
+mchain.setChainDim(dim)
+mchain.initMethod("am")
+g = uqtkarray.dblArray1D(dim,.5)
+mchain.initChainPropCovDiag(g)
+
+print 'Running chain to chain.dat ...'
+nCalls = 100000
+# mchain.setOutputInfo("txt","chain.dat",nCalls,nCalls);
+mchain.setWriteFlag(0)
+mchain.runChain(nCalls,xstart);
+
+print 'Getting samples into numpy array...'
+mchain.getSamples()
+samples = array(mchain.samples).T[3000::5,:]
+print std(samples,0) - array([.1,.8])
+
+# print 'loading samples and plotting'
+# thin = 25
+# samples = loadtxt('chain.dat')[3001:-1:thin,1:3]
+# figure()
+# plot(samples[:,0],samples[:,1],'.')
+
+
+# # get quad points and weights
+# x = uqtkarray.dblArray2D()
+# w = uqtkarray.dblArray1D()
+
+# print 'Create an instance of Quad class'
+# ndim = 2
+# level = 5
+# q = uqtkquad.Quad('LU','sparse',ndim,level,0,1)
+# print 'Now set and get the quadrature rule...'
+# q.SetRule()
+# q.GetRule(x,w)
+
+# # print out x and w
+# print 'Displaying the quadrature points and weights:\n'
+# # print x
+# # print w
+# n = len(x)
+# print 'Number of quad points is ', n, '\n'
+
+# # now we plot the points
+# print 'Plotting the points (get points in column major order as a flattened vector)'
+# print 'need to use reshape with fortran ordering'
+# xpnts = zeros((n,ndim))
+# x.getnpdblArray(xpnts)
+# plot(xpnts[:,0], xpnts[:,1],'ob',ms=10,alpha=.25)
+# savefig('quadpnts.pdf')
+
+# # get quad weights
+# w_np = zeros(n)
+# w.getnpdblArray(w_np)
+# clf()
+# plot(w,'ro-',lw=4)
+# savefig('quadweights.pdf')
+
diff --git a/PyUQTk/pytests/PyModTest.py b/PyUQTk/pytests/PyModTest.py
new file mode 100644
index 00000000..91020377
--- /dev/null
+++ b/PyUQTk/pytests/PyModTest.py
@@ -0,0 +1,54 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# include path to include PyUQTk
+""" Test to make sure all Python modules load. """
+import sys
+
+print "Loading array module:"
+sys.path.append('../uqtkarray/')
+import uqtkarray
+
+print "Loading quadrature module:"
+sys.path.append('../quad/')
+import quad
+
+print "Loading bayesian compressed sensing module:"
+# sys.path.append('../bcs/')
+# import bcs
+
+print "Loading polynomial chaos module:"
+sys.path.append('../pce/')
+import pce
+
+print "Loading tools module:"
+sys.path.append('../tools')
+import tools
+
+#print "Loading dfi module:"
+#sys.path.append('../dfi/')
+#import dfi
+
diff --git a/PyUQTk/pytests/PyPCE1dTest.py b/PyUQTk/pytests/PyPCE1dTest.py
new file mode 100644
index 00000000..e6871c2b
--- /dev/null
+++ b/PyUQTk/pytests/PyPCE1dTest.py
@@ -0,0 +1,131 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# include path to include PyUQTk
+import sys
+sys.path.append('../../')
+
+try:
+ from numpy import *
+ from matplotlib.pyplot import *
+except ImportError:
+ "Need numpy and matplotlib to test PyUQTk"
+
+try:
+ import PyUQTk.array as uqtkarray
+ import PyUQTk.quad as uqtkquad
+ import PyUQTk.pce as uqtkpce
+except ImportError:
+ print "PyUQTk array and quad module not found"
+
+
+# get quad points and weights
+x = uqtkarray.dblArray2D()
+w = uqtkarray.dblArray1D()
+
+print 'Create an instance of Quad class'
+ndim = 1
+level = 8
+q = uqtkquad.Quad('LU','full',ndim,level)
+print 'Now set and get the quadrature rule...'
+q.SetRule()
+q.GetRule(x,w)
+
+# print out x and w
+print 'Displaying the quadrature points and weights:\n'
+# print x
+# print w
+n = len(x)
+print 'Number of quad points is ', n, '\n'
+
+# conver to numpy arrays
+x_np = zeros((len(x),1))
+w_np = zeros(len(x))
+x.getnpdblArray(x_np)
+w.getnpdblArray(w_np)
+
+# define function for evaluation over [-1,1]
+f = lambda x: 1./(1 + x**2)
+y = f(x_np)
+y.shape = (len(y),) # make 1d array
+
+# convert numpy y to 1d array
+ydata = uqtkarray.dblArray1D(len(y),0)
+ydata.setnpdblArray(y)
+
+'''
+Define PCSet object
+'''
+# Instantiate object
+nord = 8
+chaos_type = "LEG"
+pcmodel = uqtkpce.PCSet('NISPnoq',nord,ndim,'LEG')
+
+# set quad rule for pc model
+pcmodel.SetQuadRule(q)
+nup = pcmodel.GetNumberPCTerms()-1
+totquad = pcmodel.GetNQuadPoints()
+
+# Get the multiindex for postprocessing
+mindex = uqtkarray.intArray2D();
+pcmodel.GetMultiIndex(mindex);
+
+# get the coefficients using the quadrature rule
+# to calculate the projections
+ck = uqtkarray.dblArray1D(nup+1,0.0)
+pcmodel.GalerkProjection(ydata,ck);
+c_np = zeros(len(ck))
+ck.getnpdblArray(c_np)
+
+'''
+Evaluate PC Model at random points
+'''
+xall = linspace(-1,1,1000); xall.shape = (len(xall),1)
+yeval = uqtkarray.dblArray1D(len(xall),0.0)
+xeval = uqtkarray.dblArray2D(len(xall),1,0.0)
+xeval.setnpdblArray(asfortranarray(xall))
+pcmodel.EvalPCAtCustPoints(yeval,xeval,ck)
+
+y_exact = f(xall)
+y_pce = array(yeval.flatten())
+plot(xall,y_exact,'k',lw=2,alpha=.4)
+plot(xeval,y_pce,'--r',lw=1)
+
+'''
+Evaluate PC Model at quad points
+'''
+yevalq = uqtkarray.dblArray1D(len(x),0.0)
+pcmodel.EvalPCAtCustPoints(yevalq,x,ck)
+y_pceq = array(yevalq.flatten())
+plot(x,y_pceq,'or',lw=1)
+
+'''
+Plot the ImportError
+'''
+figure()
+plot(xeval,abs(y_pce - y_exact[:,0]),'k')
+
+
diff --git a/PyUQTk/pytests/PyPCE2dTest.py b/PyUQTk/pytests/PyPCE2dTest.py
new file mode 100644
index 00000000..8da37cb7
--- /dev/null
+++ b/PyUQTk/pytests/PyPCE2dTest.py
@@ -0,0 +1,148 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# include path to include PyUQTk
+import sys
+sys.path.append('../../')
+
+try:
+ from numpy import *
+ from matplotlib.pyplot import *
+except ImportError:
+ "Need numpy and matplotlib to test PyUQTk"
+
+try:
+ import PyUQTk.array as uqtkarray
+ import PyUQTk.quad as uqtkquad
+ import PyUQTk.pce as uqtkpce
+except ImportError:
+ print "PyUQTk array and quad module not found"
+
+
+# get quad points and weights
+x = uqtkarray.dblArray2D()
+w = uqtkarray.dblArray1D()
+
+print 'Create an instance of Quad class'
+ndim = 2
+level = 8
+q = uqtkquad.Quad('LU','full',ndim,level)
+print 'Now set and get the quadrature rule...'
+q.SetRule()
+q.GetRule(x,w)
+
+# print out x and w
+print 'Displaying the quadrature points and weights:\n'
+print x
+print w
+n = len(x)
+print 'Number of quad points is ', n, '\n'
+
+# conver to numpy arrays
+x_np = zeros((n,2))
+w_np = zeros(len(x))
+x.getnpdblArray(x_np)
+w.getnpdblArray(w_np)
+
+# define function for evaluation over [-1,1]
+f = lambda x: x[:,0]*x[:,1] + x[:,0]**2 + sqrt(abs(x[:,1]))
+y = f(x_np)
+y.shape = (len(y),) # make 1d array
+
+# convert numpy y to 1d array
+ydata = uqtkarray.dblArray1D(len(y),0)
+ydata.setnpdblArray(y)
+
+'''
+Define PCSet object
+'''
+# Instantiate object
+nord = 4
+chaos_type = "LEG"
+pcmodel = uqtkpce.PCSet('NISPnoq',nord,ndim,'LEG')
+
+# set quad rule for pc model
+pcmodel.SetQuadRule(q)
+nup = pcmodel.GetNumberPCTerms()-1
+totquad = pcmodel.GetNQuadPoints()
+
+# Get the multiindex for postprocessing
+mindex = uqtkarray.intArray2D();
+pcmodel.GetMultiIndex(mindex);
+
+# get the coefficients using the quadrature rule
+# to calculate the projections
+ck = uqtkarray.dblArray1D(nup+1,0.0)
+pcmodel.GalerkProjection(ydata,ck);
+c_np = zeros(len(ck))
+ck.getnpdblArray(c_np)
+
+# compute main sensitivities
+mainsens = uqtkarray.dblArray1D(ndim,0)
+pcmodel.ComputeMainSens(ck,mainsens)
+
+# compute total sensitivity
+totsens = uqtkarray.dblArray1D(ndim,0)
+pcmodel.ComputeTotSens(ck,totsens)
+
+#compute joint sensitivity
+jointsens = uqtkarray.dblArray2D(ndim,ndim,0)
+pcmodel.ComputeJointSens(ck,jointsens)
+
+print mainsens, totsens, jointsens
+
+# '''
+# Evaluate PC Model at random points
+# '''
+# xall = linspace(-1,1,1000); xall.shape = (len(xall),1)
+# yeval = uqtkarray.dblArray1D(len(xall),0.0)
+# xeval = uqtkarray.dblArray2D(len(xall),1,0.0)
+# xeval.setnpdblArray(asfortranarray(xall))
+# pcmodel.EvalPCAtCustPoints(yeval,xeval,ck)
+
+# y_exact = f(xall)
+# y_pce = array(yeval.flatten())
+# plot(xall,y_exact,'k',lw=2,alpha=.4)
+# plot(xeval,y_pce,'--r',lw=1)
+
+# '''
+# Evaluate PC Model at quad points
+# '''
+# yevalq = uqtkarray.dblArray1D(len(x),0.0)
+# pcmodel.EvalPCAtCustPoints(yevalq,x,ck)
+# y_pceq = array(yevalq.flatten())
+# plot(x,y_pceq,'or',lw=1)
+
+# '''
+# Plot the ImportError
+# '''
+# figure()
+# plot(xeval,abs(y_pce - y_exact[:,0]),'k')
+
+
+
+
+
diff --git a/PyUQTk/pytests/PyQuadTest.py b/PyUQTk/pytests/PyQuadTest.py
new file mode 100644
index 00000000..b69977c9
--- /dev/null
+++ b/PyUQTk/pytests/PyQuadTest.py
@@ -0,0 +1,139 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+# include path to include PyUQTk
+import sys
+sys.path.append('../uqtkarray/')
+sys.path.append('../quad/')
+
+try:
+ from numpy import *
+ from matplotlib.pyplot import *
+except ImportError:
+ "Need numpy and matplotlib to test PyUQTk"
+
+try:
+ import uqtkarray
+ from uqtkarray import numpy2uqtk
+ from uqtkarray import uqtk2numpy
+ import quad as uqtkquad
+except ImportError:
+ print "PyUQTk array and quad module not found"
+
+'''
+This file tests the quadrature pyqutk routine
+'''
+
+# true quad points for sparse LU with ndim = 2 and level = 3
+qpnts_ref = array([[-9.681602395076263079e-01, 0.000000000000000000e+00],
+[-9.061798459386638527e-01, -7.745966692414832933e-01],
+[-9.061798459386638527e-01, 0.000000000000000000e+00],
+[-9.061798459386638527e-01, 7.745966692414834043e-01],
+[-8.360311073266358806e-01, 0.000000000000000000e+00],
+[-7.745966692414832933e-01, -9.061798459386638527e-01],
+[-7.745966692414832933e-01, -7.745966692414832933e-01],
+[-7.745966692414832933e-01, -5.384693101056832187e-01],
+[-7.745966692414832933e-01, 0.000000000000000000e+00],
+[-7.745966692414832933e-01, 5.384693101056829967e-01],
+[-7.745966692414832933e-01, 7.745966692414834043e-01],
+[-7.745966692414832933e-01, 9.061798459386638527e-01],
+[-6.133714327005902467e-01, 0.000000000000000000e+00],
+[-5.384693101056832187e-01, -7.745966692414832933e-01],
+[-5.384693101056832187e-01, 0.000000000000000000e+00],
+[-5.384693101056832187e-01, 7.745966692414834043e-01],
+[-3.242534234038090268e-01, 0.000000000000000000e+00],
+[0.000000000000000000e+00, -9.681602395076263079e-01],
+[0.000000000000000000e+00, -9.061798459386638527e-01],
+[0.000000000000000000e+00, -8.360311073266358806e-01],
+[0.000000000000000000e+00, -7.745966692414832933e-01],
+[0.000000000000000000e+00, -6.133714327005902467e-01],
+[0.000000000000000000e+00, -5.384693101056832187e-01],
+[0.000000000000000000e+00, -3.242534234038090268e-01],
+[0.000000000000000000e+00, 0.000000000000000000e+00],
+[0.000000000000000000e+00, 3.242534234038088048e-01],
+[0.000000000000000000e+00, 5.384693101056829967e-01],
+[0.000000000000000000e+00, 6.133714327005905798e-01],
+[0.000000000000000000e+00, 7.745966692414834043e-01],
+[0.000000000000000000e+00, 8.360311073266353254e-01],
+[0.000000000000000000e+00, 9.061798459386638527e-01],
+[0.000000000000000000e+00, 9.681602395076263079e-01],
+[3.242534234038088048e-01, 0.000000000000000000e+00],
+[5.384693101056829967e-01, -7.745966692414832933e-01],
+[5.384693101056829967e-01, 0.000000000000000000e+00],
+[5.384693101056829967e-01, 7.745966692414834043e-01],
+[6.133714327005905798e-01, 0.000000000000000000e+00],
+[7.745966692414834043e-01, -9.061798459386638527e-01],
+[7.745966692414834043e-01, -7.745966692414832933e-01],
+[7.745966692414834043e-01, -5.384693101056832187e-01],
+[7.745966692414834043e-01, 0.000000000000000000e+00],
+[7.745966692414834043e-01, 5.384693101056829967e-01],
+[7.745966692414834043e-01, 7.745966692414834043e-01],
+[7.745966692414834043e-01, 9.061798459386638527e-01],
+[8.360311073266353254e-01, 0.000000000000000000e+00],
+[9.061798459386638527e-01, -7.745966692414832933e-01],
+[9.061798459386638527e-01, 0.000000000000000000e+00],
+[9.061798459386638527e-01, 7.745966692414834043e-01],
+[9.681602395076263079e-01, 0.000000000000000000e+00]])
+
+# initiate uqtk arrays for quad points and weights
+x = uqtkarray.dblArray2D()
+w = uqtkarray.dblArray1D()
+
+# create instance of quad class and output
+# points and weights
+print 'Create an instance of Quad class'
+ndim = 2
+level = 3
+q = uqtkquad.Quad('LU','sparse',ndim,level,0,1)
+print 'Now set and get the quadrature rule...'
+q.SetRule()
+q.GetRule(x,w)
+
+# print out x and w
+print 'Displaying the quadrature points and weights:\n'
+x_np = uqtk2numpy(x)
+print x_np
+n = len(x)
+print 'Number of quad points is ', n, '\n'
+
+# plot the quadrature points
+print 'Plotting the points (get points in column major order as a flattened vector)'
+print 'need to use reshape with fortran ordering'
+xpnts = zeros((n,ndim))
+x.getnpdblArray(xpnts)
+# plot(xpnts[:,0], xpnts[:,1],'ob',ms=10,alpha=.25)
+# show()
+
+# convert the quad weights to numpy arrays
+w_np = zeros(n)
+w.getnpdblArray(w_np)
+
+# asserting the quadrature points are correct
+m,n = x_np.shape
+for i in range(m):
+ for j in range(n):
+ assert x_np[i,j] == qpnts_ref[i,j]
+
diff --git a/PyUQTk/pytests/quadpnts.pdf b/PyUQTk/pytests/quadpnts.pdf
new file mode 100644
index 00000000..92663dd7
Binary files /dev/null and b/PyUQTk/pytests/quadpnts.pdf differ
diff --git a/PyUQTk/pytests/quadweights.pdf b/PyUQTk/pytests/quadweights.pdf
new file mode 100644
index 00000000..be8fdb05
Binary files /dev/null and b/PyUQTk/pytests/quadweights.pdf differ
diff --git a/PyUQTk/quad/CMakeLists.txt b/PyUQTk/quad/CMakeLists.txt
new file mode 100644
index 00000000..931965f1
--- /dev/null
+++ b/PyUQTk/quad/CMakeLists.txt
@@ -0,0 +1,57 @@
+FIND_PACKAGE(SWIG REQUIRED)
+INCLUDE(${SWIG_USE_FILE})
+
+FIND_PACKAGE(PythonLibs)
+INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH}/../../Extras/lib/python/numpy/core/include)
+
+#include source files
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/) # array classes, array input output, and array tools
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/include/) # utilities like error handlers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/) # tools like multindex, etc.
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/quad/) # quad class
+
+# include dependencies
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/blas/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/lapack/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/dsfmt/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/figtree/) # blas library headers
+# INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../numpy/) # numpy headers
+
+SET(CMAKE_SWIG_FLAGS "")
+SET_SOURCE_FILES_PROPERTIES(quad.i PROPERTIES CPLUSPLUS ON)
+
+# compile swig with cpp extensions
+SWIG_ADD_MODULE(
+ quad python quad.i
+ # # array tools needed to compile misc tools source files
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arrayio.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arraytools.cpp
+
+ # source code for quad class
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/quad/quad.cpp
+
+ # # source code for tools
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/combin.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/gq.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/minmax.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/multiindex.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/pcmaps.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/probability.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/rosenblatt.cpp
+)
+
+# link python and 3rd party libraries, e.g., fortran and blas
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ SWIG_LINK_LIBRARIES(quad uqtkarray uqtktools deplapack depdsfmt depblas depfigtree depann gfortran ${PYTHON_LIBRARIES})
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel
+ SWIG_LINK_LIBRARIES(quad uqtkarray uqtktools deplapack depdsfmt depblas depfigtree depann ifcore ${PYTHON_LIBRARIES})
+endif()
+
+
+INSTALL(TARGETS _quad DESTINATION PyUQTk/)
+INSTALL(FILES ${CMAKE_BINARY_DIR}/${outdir}PyUQTk/quad/quad.py DESTINATION PyUQTk)
diff --git a/PyUQTk/quad/quad.i b/PyUQTk/quad/quad.i
new file mode 100644
index 00000000..65bb7dc7
--- /dev/null
+++ b/PyUQTk/quad/quad.i
@@ -0,0 +1,130 @@
+%module(directors="1") quad
+//=====================================================================================
+// The UQ Toolkit (UQTk) version 3.0.4
+// Copyright (2017) Sandia Corporation
+// http://www.sandia.gov/UQToolkit/
+//
+// Copyright (2013) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+// with Sandia Corporation, the U.S. Government retains certain rights in this software.
+//
+// This file is part of The UQ Toolkit (UQTk)
+//
+// UQTk is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// UQTk is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with UQTk. If not, see .
+//
+// Questions? Contact Bert Debusschere
+// Sandia National Laboratories, Livermore, CA, USA
+//=====================================================================================
+
+%{
+#define SWIG_FILE_WITH_INIT
+#include
+#include
+#include
+#include
+#include
+// #include "../../cpp/lib/array/Array1D.h"
+// #include "../../cpp/lib/array/Array2D.h"
+// #include "../../cpp/lib/array/arrayio.h"
+// #include "../../cpp/lib/array/arraytools.h"
+// #include "../../cpp/lib/tools/combin.h"
+// #include "../../cpp/lib/tools/gq.h"
+// #include "../../cpp/lib/tools/minmax.h"
+// #include "../../cpp/lib/tools/multiindex.h"
+// #include "../../cpp/lib/tools/pcmaps.h"
+// #include "../../cpp/lib/tools/probability.h"
+// #include "../../cpp/lib/tools/rosenblatt.h"
+
+#include "../../cpp/lib/quad/quad.h"
+
+%}
+
+/*************************************************************
+// Standard SWIG Templates
+*************************************************************/
+
+// Include standard SWIG templates
+// Numpy array templates and wrapping
+%include "pyabc.i"
+%include "../numpy/numpy.i"
+%include "std_vector.i"
+%include "std_string.i"
+%include "cpointer.i"
+
+%init %{
+ import_array();
+%}
+
+%pointer_functions(double, doublep);
+
+/*************************************************************
+// Numpy SWIG Interface files
+*************************************************************/
+
+// // Basic typemap for an Arrays and its length.
+// // Must come before %include statement below
+
+// // For Array1D setnumpyarray4py function
+// %apply (long* IN_ARRAY1, int DIM1) {(long* inarray, int n)}
+// %apply (double* IN_ARRAY1, int DIM1) {(double* inarray, int n)}
+// // get numpy int and double array
+// %apply (long* INPLACE_ARRAY1, int DIM1) {(long* outarray, int n)}
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* outarray, int n)}
+
+// // For Array2D numpysetarray4py function
+// %apply (double* IN_FARRAY2, int DIM1, int DIM2) {(double* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (double* INPLACE_FARRAY2, int DIM1, int DIM2) {(double* outarray, int n1, int n2)}
+// // For Array2D numpysetarray4py function
+// %apply (long* IN_FARRAY2, int DIM1, int DIM2) {(long* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (long* INPLACE_FARRAY2, int DIM1, int DIM2) {(long* outarray, int n1, int n2)}
+
+
+// // For mcmc test to get log probabilities
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* l, int n)}
+
+/*************************************************************
+// Include header files
+*************************************************************/
+
+// // The above typemap is applied to header files below
+// %include "../../cpp/lib/array/Array1D.h"
+// %include "../../cpp/lib/array/Array2D.h"
+// %include "../../cpp/lib/array/arrayio.h"
+// %include "../../cpp/lib/array/arraytools.h"
+// %include "../../cpp/lib/tools/combin.h"
+// %include "../../cpp/lib/tools/gq.h"
+// %include "../../cpp/lib/tools/minmax.h"
+// %include "../../cpp/lib/tools/multiindex.h"
+// %include "../../cpp/lib/tools/pcmaps.h"
+// %include "../../cpp/lib/tools/probability.h"
+// %include "../../cpp/lib/tools/rosenblatt.h"
+
+%include "../../cpp/lib/quad/quad.h"
+
+// // Typemaps for standard vector
+// // Needed to prevent to memory leak due to lack of destructor
+// // must use namespace std
+// namespace std{
+// %template(dblVector) vector;
+// %template(intVector) vector;
+// %template(strVector) vector;
+
+// }
+
+
+// %include "arrayext.i"
+
+
+
diff --git a/PyUQTk/sens/CMakeLists.txt b/PyUQTk/sens/CMakeLists.txt
new file mode 100644
index 00000000..5a186774
--- /dev/null
+++ b/PyUQTk/sens/CMakeLists.txt
@@ -0,0 +1,10 @@
+project (UQTk)
+
+SET(copy_FILES
+ __init__.py
+ gsalib.py
+ )
+
+INSTALL(FILES ${copy_FILES}
+ PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
+ DESTINATION PyUQTk/sens)
diff --git a/PyUQTk/sens/__init__.py b/PyUQTk/sens/__init__.py
new file mode 100755
index 00000000..f34768cb
--- /dev/null
+++ b/PyUQTk/sens/__init__.py
@@ -0,0 +1,27 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+import gsalib
diff --git a/PyUQTk/sens/gsalib.py b/PyUQTk/sens/gsalib.py
new file mode 100644
index 00000000..c66cb4ee
--- /dev/null
+++ b/PyUQTk/sens/gsalib.py
@@ -0,0 +1,284 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+try:
+ import numpy as npy
+except ImportError:
+ print "gsalib requires numpy package -> Exit"
+ quit()
+
+import os.path
+
+def genSpl_Si(nspl,ndim,abrng,**kwargs):
+ # get default values for optional arguments
+ splout = kwargs.get('splout', "gsaSplSi.dat") # samples file
+ matfile = kwargs.get('matfile',"mat12.npz") # intermediary matrices
+ verb = kwargs.get('verb', 0) # verbosity
+ nd = kwargs.get('nd', 18) # no. of significant digits in samples output
+ # Test nd values
+ if (nd<6) or (nd>18):
+ raise ValueError("Number of digits should be between 6 and 18")
+ #------------------------------------------------------------------------------------
+ # create nspl uniform samples in [a_i,b_i], i=1,ndim
+ #------------------------------------------------------------------------------------
+ if verb>0:
+ print "Create ensemble of input parameters"
+ mat1=npy.random.random_sample((nspl,ndim))
+ mat2=npy.random.random_sample((nspl,ndim))
+ for i in range(ndim):
+ mat1[:,i] = mat1[:,i]*(abrng[i,1]-abrng[i,0])+abrng[i,0]
+ mat2[:,i] = mat2[:,i]*(abrng[i,1]-abrng[i,0])+abrng[i,0]
+ # save temporary matrices
+ npy.savez(matfile, mat1=mat1, mat2=mat2)
+ # assemble the big matrix for main sensitivities directly to a file
+ if os.path.isfile(splout):
+ os.remove(splout)
+ f_handle = file(splout, 'a')
+ npy.savetxt(f_handle, mat1, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ for idim in range(ndim):
+ if verb>0:
+ print " - working on parameter %d"%idim
+ matj=mat2.copy();
+ matj[:,idim]=mat1[:,idim]
+ npy.savetxt(f_handle, matj, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ npy.savetxt(f_handle, mat2, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ f_handle.close()
+
+def genSens_Si(modeval,ndim,**kwargs):
+ # get optional arguments
+ verb = kwargs.get('verb', 0) # verbosity
+ #------------------------------------------------------------------------------------
+ # load model evaluations and compute main sensitivities
+ #------------------------------------------------------------------------------------
+ ymod = npy.genfromtxt(modeval)
+ nspl = ymod.shape[0]/(ndim+2)
+ if verb > 0:
+ print "Compute sensitivities, no. of samples:",nspl
+ sobolSi = npy.zeros(ndim)
+ yMat1 = ymod[:nspl]
+ yMat2 = ymod[nspl*(ndim+1):]
+ mean12 = npy.mean(yMat1*yMat2)
+ vv1 = npy.var(yMat1,ddof=1)
+ for idim in range(1,ndim+1):
+ vari = npy.sum(yMat1*ymod[idim*nspl:(idim+1)*nspl])
+ sobolSi[idim-1] = (vari/(nspl-1.0)-mean12)/vv1
+ if verb > 1:
+ print " - parameter ",idim,": ",sobolSi[idim-1]
+ if verb > 0:
+ print " - total first order sensitivity: ",npy.sum(sobolSi)
+ return sobolSi
+
+def genSpl_SiT(nspl,ndim,abrng,**kwargs):
+ # get optional arguments
+ splout = kwargs.get('splout', "gsaSplSiT.dat") # samples file
+ matfile = kwargs.get('matfile', "mat12.npz") # intermediary matrices
+ verb = kwargs.get('verb', 0) # verbosity
+ nd = kwargs.get('nd', 18) # no. of significant digits in samples output
+ # Test nd values
+ if (nd<6) or (nd>18):
+ raise ValueError("Number of digits should be between 6 and 18")
+ #------------------------------------------------------------------------------------
+ # create nspl uniform samples in [a_i,b_i], i=1,ndim
+ #------------------------------------------------------------------------------------
+ if verb>0:
+ print "Create ensemble of input parameters"
+ mat1=npy.random.random_sample((nspl,ndim))
+ mat2=npy.random.random_sample((nspl,ndim))
+ for i in range(ndim):
+ mat1[:,i] = mat1[:,i]*(abrng[i,1]-abrng[i,0])+abrng[i,0]
+ mat2[:,i] = mat2[:,i]*(abrng[i,1]-abrng[i,0])+abrng[i,0]
+ # save temporary matrices
+ npy.savez(matfile, mat1=mat1, mat2=mat2)
+ # assemble the big matrix for main sensitivities
+ if os.path.isfile(splout):
+ os.remove(splout)
+ f_handle = file(splout, 'a')
+ npy.savetxt(f_handle, mat1, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ for idim in range(ndim):
+ if verb>0:
+ print " - working on parameter %d"%idim
+ matj=mat1.copy();
+ matj[:,idim]=mat2[:,idim]
+ npy.savetxt(f_handle, matj, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ npy.savetxt(f_handle, mat2, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ f_handle.close()
+ return
+
+def genSens_SiT(modeval,ndim,**kwargs):
+ # get optional arguments
+ verb = kwargs.get('verb', 0) # verbosity
+ siTmethod = kwargs.get('type', 'type1') # sampling method
+ #------------------------------------------------------------------------------------
+ # load model evaluations and compute main sensitivities
+ #------------------------------------------------------------------------------------
+ ymod = npy.genfromtxt(modeval)
+ nspl = ymod.shape[0]/(ndim+2)
+ if verb > 0:
+ print "Compute sensitivities, no. of samples:",nspl
+ sobolSiT = npy.zeros(ndim)
+ yMat1 = ymod[:nspl]
+ vv1 = npy.var(yMat1,ddof=1)
+ Ey = npy.average(yMat1)
+ for idim in range(1,ndim+1):
+ if (siTmethod == "type1"):
+ ssqrs=0.0
+ for i in range(nspl):
+ ssqrs = ssqrs+yMat1[i]*ymod[idim*nspl+i]
+ vari = ssqrs/(nspl-1.0)
+ sobolSiT[idim-1] = 1-(vari-Ey**2)/(vv1);
+ else:
+ vari = npy.sum(npy.power(yMat1-ymod[idim*nspl:(idim+1)*nspl],2))/nspl
+ sobolSiT[idim-1] = vari/(2.0*vv1);
+ if verb > 1:
+ print " - parameter ",idim,": ",sobolSiT[idim-1]
+ if verb > 0:
+ print " - total main sensitivity: ",npy.sum(sobolSiT)
+ return sobolSiT
+
+def genSpl_SiTcust(nspl,ndim,abrng,collst,**kwargs):
+ # get optional arguments
+ splout = kwargs.get('splout', "gsaSplSiT.dat") # samples file
+ verb = kwargs.get('verb', 0) # verbosity
+ nd = kwargs.get('nd', 18) # no. of significant digits in samples output
+ if (nd<6) or (nd>18):
+ raise ValueError("Number of digits should be between 6 and 18")
+ #------------------------------------------------------------------------------------
+ # create nspl uniform samples in [a_i,b_i], i=1,ndim
+ #------------------------------------------------------------------------------------
+ if verb>0:
+ print "Create ensemble of input parameters"
+ mat1=npy.random.random_sample((nspl,ndim))
+ mat2=npy.random.random_sample((nspl,ndim))
+ for i in range(ndim):
+ mat1[:,i] = mat1[:,i]*(abrng[i,1]-abrng[i,0])+abrng[i,0]
+ mat2[:,i] = mat2[:,i]*(abrng[i,1]-abrng[i,0])+abrng[i,0]
+ # assemble the big matrix for main sensitivities
+ if os.path.isfile(splout):
+ os.remove(splout)
+ f_handle = file(splout, 'a')
+ npy.savetxt(f_handle, mat1, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ for j in collst:
+ print j
+ matj = mat1.copy()
+ matj[:,j] = mat2[:,j].copy()
+ npy.savetxt(f_handle, matj, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ f_handle.close()
+ return
+
+def genSens_SiTcust(modeval,ndim,collst,**kwargs):
+ # get optional arguments
+ verb = kwargs.get('verb', 0) # verbosity
+ siTmethod = kwargs.get('type', 'type1') # sampling method
+ #------------------------------------------------------------------------------------
+ # load model evaluations and compute main sensitivities
+ #------------------------------------------------------------------------------------
+ ymod = npy.genfromtxt(modeval)
+ nspl = ymod.shape[0]/(1+len(collst))
+ print nspl
+ if verb > 0:
+ print "Compute sensitivities, no. of samples:",nspl
+ sobolSiT = npy.zeros(len(collst))
+ yMat1 = ymod[:nspl]
+ Ey = npy.average(yMat1)
+ vv1 = npy.var(yMat1,ddof=1)
+ for j in range(len(collst)):
+ print j,collst[j]
+ if (siTmethod == "type1"):
+ ssqrs=0.0
+ for i in range(nspl):
+ ssqrs = ssqrs+yMat1[i]*ymod[(j+1)*nspl+i]
+ vari = ssqrs/(nspl-1.0)
+ sobolSiT[j] = 1.0-(vari-Ey**2)/(vv1);
+ else:
+ vari = npy.sum(npy.power(yMat1-ymod[(j+1)*nspl:(j+2)*nspl],2))/nspl
+ sobolSiT[j] = vari/(2.0*vv1);
+ if verb > 0:
+ print " - total sensitivities: ",sobolSiT
+ return sobolSiT
+
+def genSpl_Sij(ndim,**kwargs):
+ # get optional arguments
+ splout = kwargs.get('splout', "gsaSplSij.dat") # samples file
+ matfile = kwargs.get('matfile', "mat12.npz") # intermediary matrices
+ verb = kwargs.get('verb', 0) # verbosity
+ nd = kwargs.get('nd', 18) # no. of significant digits in samples output
+ if verb > 0:
+ print "Load intermediary matrices of input parameters"
+ if os.path.isfile(matfile):
+ m12=npy.load(matfile)
+ else:
+ raise IOError("Could not load samples")
+ quit()
+ mat1=m12["mat1"]
+ mat2=m12["mat2"]
+ # assemble the big matrix for main sensitivities
+ if os.path.isfile(splout):
+ os.remove(splout)
+ f_handle = file(splout, 'a')
+ npy.savetxt(f_handle, mat1, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ for idim in range(ndim-1):
+ for jdim in range(idim+1,ndim):
+ if verb>1:
+ print " - working on pair ",idim,jdim
+ matj=mat2.copy();
+ matj[:,idim]=mat1[:,idim]
+ matj[:,jdim]=mat1[:,jdim]
+ npy.savetxt(f_handle, matj, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ npy.savetxt(f_handle, mat2, fmt="%."+str(nd)+"e", delimiter=' ', newline='\n')
+ f_handle.close()
+ return
+
+def genSens_Sij(sobolSi,modeval,**kwargs):
+ # get optional arguments
+ verb = kwargs.get('verb', 0) # verbosity
+ #------------------------------------------------------------------------------------
+ # joint sensitivities
+ #------------------------------------------------------------------------------------
+ ndim = len(sobolSi)
+ ymod = npy.genfromtxt(modeval)
+ nspl = ymod.shape[0]/(ndim*(ndim-1)/2+2)
+ if verb > 0:
+ print "No. of samples, no. of dimensions:",nspl,ndim
+ sobolSij = npy.array(npy.zeros((ndim,ndim)))
+ yMat1 = ymod[:nspl]
+ yMat2 = ymod[nspl*(ndim*(ndim-1)/2+1):]
+ mean12 = npy.mean(yMat1*yMat2)
+ vv1 = npy.var(yMat1,ddof=1)
+ ijd = 0
+ for idim in range(1,ndim):
+ for jdim in range(idim+1,ndim+1):
+ ijd += 1;
+ vari = npy.sum(yMat1*ymod[ijd*nspl:(ijd+1)*nspl])
+ sobolSij[idim-1,jdim-1] = (vari/(nspl-1.0)-mean12)/vv1-sobolSi[idim-1]-sobolSi[jdim-1]
+ if verb > 1:
+ print " - pair ",idim,jdim,": ",sobolSij[idim-1,jdim-1]
+ if verb > 0:
+ print " - total Sij: ",npy.sum(sobolSij[:,:])
+ return sobolSij
+
+
+
+
diff --git a/PyUQTk/sens/gsatest.py b/PyUQTk/sens/gsatest.py
new file mode 100644
index 00000000..81b850ff
--- /dev/null
+++ b/PyUQTk/sens/gsatest.py
@@ -0,0 +1,194 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+try:
+ import numpy as npy
+except ImportError:
+ print "gsalib requires numpy package -> Exit"
+ quit()
+
+try:
+ import matplotlib
+ foundMPL = True
+except ImportError:
+ foundMPL = False
+
+import os
+from gsalib import genSpl_Si, genSens_Si, genSpl_Sij, genSens_Sij, genSpl_SiT, genSens_SiT
+if foundMPL:
+ import matplotlib.pyplot as plt
+
+def func1(x):
+ return npy.sum(x)
+
+def func2(x):
+ f = npy.sum(x)
+ for i in range(len(x)-1):
+ f = f+(i+1)*(i+1)*x[i]*x[i+1]
+ return f
+
+#--------------------------------------------------------------------------------
+# Setup
+#--------------------------------------------------------------------------------
+nruns= 10
+ndim = 4
+nspl = 10000
+abr = npy.zeros((ndim,2))
+abr[:,1]=1.0; abr[0,1]=3.0
+
+#--------------------------------------------------------------------------------
+# Theoretical values for Sobol indices
+#--------------------------------------------------------------------------------
+Simath=npy.array([0.14908, 0.14908, 0.41411, 0.222699])
+Sijmath=npy.array([0.00552147, 0.00981595, 0.04969339])
+SiTmath=Simath.copy()
+for i in range(ndim-1):
+ SiTmath[i] = SiTmath[i] +Sijmath[i]
+ SiTmath[i+1] = SiTmath[i+1]+Sijmath[i]
+
+
+#--------------------------------------------------------------------------------
+# First-order and joint Sobol indices
+#--------------------------------------------------------------------------------
+runSi=[]
+runSij=[]
+print "Computing first-order and joint Sobol indices"
+for irun in range(nruns):
+ print " run",irun+1," out of",nruns
+ # Generate samples for Si
+ genSpl_Si(nspl,ndim,abr,nd=12)
+ # Load samples, run model, save model evaluations
+ gsaens=npy.genfromtxt('gsaSplSi.dat')
+ modelRuns=npy.array([func2(gsaens[i]) for i in range(gsaens.shape[0])])
+ npy.savetxt('modelSi.dat', modelRuns, fmt="%.12e", delimiter=' ', newline='\n')
+ # Compute first order sensitivity indices
+ Si=genSens_Si('modelSi.dat',ndim,verb=0)
+ # Generate samples for Sij
+ genSpl_Sij(ndim,matfile='mat12.npz',nd=12)
+ # Load samples, run model, save model evaluations
+ gsaens=npy.genfromtxt('gsaSplSij.dat')
+ modelRuns=npy.array([func2(gsaens[i]) for i in range(gsaens.shape[0])])
+ npy.savetxt('modelSij.dat', modelRuns, fmt="%.12e", delimiter=' ', newline='\n')
+ # Compute joint sensitivity indices
+ Sij=genSens_Sij(Si,'modelSij.dat',verb=0)
+ runSi.append(Si)
+ runSij.append(Sij)
+
+
+runSi=npy.array(runSi)
+runSij=npy.array(runSij)
+
+if foundMPL:
+ width = 0.4
+ ind = npy.arange(ndim)+0.5-width/2.0
+ fs1 = 24
+ # Si
+ fig=plt.figure(figsize=(8,6))
+ ax=fig.add_axes([0.15,0.10,0.8,0.85])
+ Simean = npy.array([npy.average(runSi[:,i]) for i in range(ndim)])
+ Sistd = npy.array([npy.std(runSi[:,i]) for i in range(ndim)])
+ rects1 = ax.bar(ind, Simean, width, color='r', yerr=Sistd,error_kw=dict(linewidth=3, color='b',capsize=5) )
+ plt.plot(ind+width/2.0,Simath,'o',ms=8,mfc='k')
+ ax.set_xlim([0,ndim])
+ ax.set_ylim([0,0.5])
+ ax.set_ylabel(r'$S_i$',fontsize=fs1)
+ ax.set_xticks(ind+width/2)
+ ax.set_yticks([0,0.1,0.2,0.3,0.4,0.5])
+ ax.set_yticklabels( ('$0$', '$0.1$', '$0.2$', '$0.3$', '$0.4$', '$0.5$') ,fontsize=fs1-6)
+ ax.set_xticklabels( ('$x_1$', '$x_2$', '$x_3$', '$x_4$') ,fontsize=fs1)
+ plt.savefig('gsaspl_Si.pdf')
+ #plt.show()
+ # Sij
+ width = 0.4
+ ind = npy.arange(ndim-1)+0.5-width/2.0
+ fs1 = 24
+ fig=plt.figure(figsize=(8,6))
+ ax=fig.add_axes([0.15,0.10,0.8,0.85])
+ Simean = npy.array([npy.average(runSij[:,i,i+1]) for i in range(ndim-1)])
+ Sistd = npy.array([npy.std(runSij[:,i,i+1]) for i in range(ndim-1)])
+ rects1 = ax.bar(ind, Simean, width, color='r', yerr=Sistd,error_kw=dict(linewidth=3, color='b',capsize=5) )
+ plt.plot(ind+width/2.0,Sijmath,'o',ms=8,mfc='k')
+ ax.set_xlim([0,ndim-1])
+ ax.set_ylim([0,0.07])
+ ax.set_ylabel(r'$S_{ij}$',fontsize=fs1)
+ ax.set_xticks(ind+width/2)
+ ax.set_yticks([0,0.02,0.04,0.06])
+ ax.set_yticklabels( ('$0$', '$0.02$', '$0.04$', '$0.06$') ,fontsize=fs1-6)
+ ax.set_xticklabels( ('$(x_1,x_2)$', '$(x_2,x_3)$', '$(x_3,x_4)$') ,fontsize=fs1)
+ plt.savefig('gsaspl_Sij.pdf')
+ #plt.show()
+else:
+ # could not find matplotlib, saving Sobol indices to file
+ npy.savez("gsaSi_Sij.npz",Si=runSi,Sij=runSij)
+
+#--------------------------------------------------------------------------------
+# Total-order Sobol indices
+#--------------------------------------------------------------------------------
+runSiT_1=[]
+runSiT_2=[]
+print "Computing total-order Sobol indices"
+for irun in range(nruns):
+ print " run",irun+1," out of",nruns
+ # Generate samples for SiT
+ genSpl_SiT(nspl,ndim,abr,nd=12)
+ # Load samples, run model, save model evaluations
+ gsaens=npy.genfromtxt('gsaSplSiT.dat')
+ modelRuns=npy.array([func2(gsaens[i]) for i in range(gsaens.shape[0])])
+ npy.savetxt('modelSiT.dat', modelRuns, fmt="%.12e", delimiter=' ', newline='\n')
+ # Compute total sensitivity indices
+ runSiT_1.append(genSens_SiT('modelSiT.dat',ndim,type='type1',verb=0))
+ runSiT_2.append(genSens_SiT('modelSiT.dat',ndim,type='type2',verb=0))
+
+runSiT_1=npy.array(runSiT_1)
+runSiT_2=npy.array(runSiT_2)
+
+if foundMPL:
+ width = 0.4
+ ind = npy.arange(ndim)+0.5-width/2.0
+ fs1 = 24
+ # SiT
+ fig=plt.figure(figsize=(8,6))
+ ax=fig.add_axes([0.15,0.10,0.8,0.85])
+ SiT1mn = npy.array([npy.average(runSiT_1[:,i]) for i in range(ndim)])
+ SiT1std = npy.array([npy.std(runSiT_1[:,i]) for i in range(ndim)])
+ rects1 = ax.bar(ind, SiT1mn, width/2.0, color='r', yerr=SiT1std,error_kw=dict(linewidth=3, color='b',capsize=5),label="Est.1")
+ SiT2mn = npy.array([npy.average(runSiT_2[:,i]) for i in range(ndim)])
+ SiT2std = npy.array([npy.std(runSiT_2[:,i]) for i in range(ndim)])
+ rects2 = ax.bar(ind+width/2.0, SiT2mn, width/2.0, color='y', yerr=SiT2std,error_kw=dict(linewidth=3, color='b',capsize=5),label="Est.2")
+ plt.plot(ind+width/2.0,SiTmath,'o',ms=8,mfc='k',label="Exact")
+ ax.set_xlim([0,ndim])
+ ax.set_ylim([0,0.55])
+ ax.set_ylabel(r'$S_i^T$',fontsize=fs1)
+ ax.set_xticks(ind+width/2)
+ ax.set_yticks([0,0.1,0.2,0.3,0.4,0.5])
+ ax.set_yticklabels( ('$0$', '$0.1$', '$0.2$', '$0.3$', '$0.4$', '$0.5$') ,fontsize=fs1-6)
+ ax.set_xticklabels( ('$x_1$', '$x_2$', '$x_3$', '$x_4$'),fontsize=fs1)
+ plt.legend(loc=2,prop={'size':fs1})
+ plt.savefig('gsaspl_SiT.pdf')
+ #plt.show()
+else:
+ # could not find matplotlib, saving Sobol indices to file
+ npy.savez("gsaSiT.npz",SiT1=runSiT_1,SiT2=runSiT_2)
diff --git a/PyUQTk/tools/CMakeLists.txt b/PyUQTk/tools/CMakeLists.txt
new file mode 100644
index 00000000..75bb4056
--- /dev/null
+++ b/PyUQTk/tools/CMakeLists.txt
@@ -0,0 +1,57 @@
+enable_language(Fortran)
+
+FIND_PACKAGE(SWIG REQUIRED)
+INCLUDE(${SWIG_USE_FILE})
+
+FIND_PACKAGE(PythonLibs)
+INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH}/../../Extras/lib/python/numpy/core/include)
+
+#include source files
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/) # array classes, array input output, and array tools
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/include/) # utilities like error handlers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/) # tools like multindex, etc.
+
+# include dependencies
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/blas/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/lapack/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/dsfmt/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/figtree/) # blas library headers
+# INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../numpy/) # numpy headers
+
+SET(CMAKE_SWIG_FLAGS "")
+SET_SOURCE_FILES_PROPERTIES(tools.i PROPERTIES CPLUSPLUS ON)
+
+# compile swig with cpp extensions
+SWIG_ADD_MODULE(
+ tools python tools.i
+ # array tools needed to compile misc tools source files
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arrayio.cpp
+ # ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arraytools.cpp
+
+ # source code for tools
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/combin.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/gq.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/minmax.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/multiindex.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/pcmaps.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/probability.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/rosenblatt.cpp
+
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/tools/toolsf.f
+)
+
+# link python and 3rd party libraries, e.g., gfortran and blas
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ SWIG_LINK_LIBRARIES(tools uqtkarray deplapack depdsfmt depblas depfigtree depann gfortran ${PYTHON_LIBRARIES})
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel
+ SWIG_LINK_LIBRARIES(tools uqtkarray deplapack depdsfmt depblas depfigtree depann ifcore ${PYTHON_LIBRARIES})
+endif()
+
+
+INSTALL(TARGETS _tools DESTINATION PyUQTk/)
+INSTALL(FILES ${CMAKE_BINARY_DIR}/${outdir}PyUQTk/tools/tools.py DESTINATION PyUQTk)
diff --git a/PyUQTk/tools/tools.i b/PyUQTk/tools/tools.i
new file mode 100644
index 00000000..1dc3dc0d
--- /dev/null
+++ b/PyUQTk/tools/tools.i
@@ -0,0 +1,125 @@
+%module(directors="1") tools
+//=====================================================================================
+// The UQ Toolkit (UQTk) version 3.0.4
+// Copyright (2017) Sandia Corporation
+// http://www.sandia.gov/UQToolkit/
+//
+// Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+// with Sandia Corporation, the U.S. Government retains certain rights in this software.
+//
+// This file is part of The UQ Toolkit (UQTk)
+//
+// UQTk is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// UQTk is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with UQTk. If not, see .
+//
+// Questions? Contact Bert Debusschere
+// Sandia National Laboratories, Livermore, CA, USA
+//=====================================================================================
+
+%{
+#define SWIG_FILE_WITH_INIT
+#include
+#include
+#include
+#include
+#include
+// #include "../../cpp/lib/array/Array1D.h"
+// #include "../../cpp/lib/array/Array2D.h"
+// #include "../../cpp/lib/array/arrayio.h"
+// #include "../../cpp/lib/array/arraytools.h"
+#include "../../cpp/lib/tools/combin.h"
+#include "../../cpp/lib/tools/gq.h"
+#include "../../cpp/lib/tools/minmax.h"
+#include "../../cpp/lib/tools/multiindex.h"
+#include "../../cpp/lib/tools/pcmaps.h"
+#include "../../cpp/lib/tools/probability.h"
+#include "../../cpp/lib/tools/rosenblatt.h"
+%}
+
+/*************************************************************
+// Standard SWIG Templates
+*************************************************************/
+
+// Include standard SWIG templates
+// Numpy array templates and wrapping
+%include "pyabc.i"
+%include "../numpy/numpy.i"
+%include "std_vector.i"
+%include "std_string.i"
+%include "cpointer.i"
+
+%init %{
+ import_array();
+%}
+
+%pointer_functions(double, doublep);
+
+/*************************************************************
+// Numpy SWIG Interface files
+*************************************************************/
+
+// // Basic typemap for an Arrays and its length.
+// // Must come before %include statement below
+
+// // For Array1D setnumpyarray4py function
+// %apply (long* IN_ARRAY1, int DIM1) {(long* inarray, int n)}
+// %apply (double* IN_ARRAY1, int DIM1) {(double* inarray, int n)}
+// // get numpy int and double array
+// %apply (long* INPLACE_ARRAY1, int DIM1) {(long* outarray, int n)}
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* outarray, int n)}
+
+// // For Array2D numpysetarray4py function
+// %apply (double* IN_FARRAY2, int DIM1, int DIM2) {(double* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (double* INPLACE_FARRAY2, int DIM1, int DIM2) {(double* outarray, int n1, int n2)}
+// // For Array2D numpysetarray4py function
+// %apply (long* IN_FARRAY2, int DIM1, int DIM2) {(long* inarray, int n1, int n2)}
+// // get numpy array (must be FARRAY)
+// %apply (long* INPLACE_FARRAY2, int DIM1, int DIM2) {(long* outarray, int n1, int n2)}
+
+
+// // For mcmc test to get log probabilities
+// %apply (double* INPLACE_ARRAY1, int DIM1) {(double* l, int n)}
+
+/*************************************************************
+// Include header files
+*************************************************************/
+
+// // The above typemap is applied to header files below
+// %include "../../cpp/lib/array/Array1D.h"
+// %include "../../cpp/lib/array/Array2D.h"
+// %include "../../cpp/lib/array/arrayio.h"
+// %include "../../cpp/lib/array/arraytools.h"
+%include "../../cpp/lib/tools/combin.h"
+%include "../../cpp/lib/tools/gq.h"
+%include "../../cpp/lib/tools/minmax.h"
+%include "../../cpp/lib/tools/multiindex.h"
+%include "../../cpp/lib/tools/pcmaps.h"
+%include "../../cpp/lib/tools/probability.h"
+%include "../../cpp/lib/tools/rosenblatt.h"
+
+// // Typemaps for standard vector
+// // Needed to prevent to memory leak due to lack of destructor
+// // must use namespace std
+// namespace std{
+// %template(dblVector) vector;
+// %template(intVector) vector;
+// %template(strVector) vector;
+
+// }
+
+
+// %include "swigi/arrayext.i"
+
+
+
diff --git a/PyUQTk/uqtkarray/CMakeLists.txt b/PyUQTk/uqtkarray/CMakeLists.txt
new file mode 100644
index 00000000..f9c6495a
--- /dev/null
+++ b/PyUQTk/uqtkarray/CMakeLists.txt
@@ -0,0 +1,39 @@
+FIND_PACKAGE(SWIG REQUIRED)
+INCLUDE(${SWIG_USE_FILE})
+
+FIND_PACKAGE(PythonLibs)
+INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR})
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}/../../Extras/lib/python/numpy/core/include)
+
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/) # array classes, array input output, and array tools
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/include/) # utilities like error handlers
+
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/blas/) # blas library headers
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../dep/lapack/) # blas library headers
+# INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../numpy/) # numpy headers
+
+SET(CMAKE_SWIG_FLAGS "")
+SET_SOURCE_FILES_PROPERTIES(uqtkarray.i PROPERTIES CPLUSPLUS ON)
+
+# compile swig with cpp extensions
+SWIG_ADD_MODULE(uqtkarray python uqtkarray.i
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arrayio.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/lib/array/arraytools.cpp
+)
+
+# link python and 3rd party libraries, e.g., gfortran and blas
+# Link fortran libraries
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ SWIG_LINK_LIBRARIES(uqtkarray deplapack depblas gfortran ${PYTHON_LIBRARIES})
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel
+ SWIG_LINK_LIBRARIES(uqtkarray deplapack depblas ifcore ${PYTHON_LIBRARIES})
+endif()
+
+
+INSTALL(TARGETS _uqtkarray DESTINATION PyUQTk/)
+INSTALL(FILES ${CMAKE_BINARY_DIR}/${outdir}PyUQTk/uqtkarray/uqtkarray.py
+ DESTINATION PyUQTk)
diff --git a/PyUQTk/uqtkarray/arrayext.i b/PyUQTk/uqtkarray/arrayext.i
new file mode 100644
index 00000000..44143e8b
--- /dev/null
+++ b/PyUQTk/uqtkarray/arrayext.i
@@ -0,0 +1,409 @@
+//=====================================================================================
+// The UQ Toolkit (UQTk) version 3.0.4
+// Copyright (2017) Sandia Corporation
+// http://www.sandia.gov/UQToolkit/
+//
+// Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+// with Sandia Corporation, the U.S. Government retains certain rights in this software.
+//
+// This file is part of The UQ Toolkit (UQTk)
+//
+// UQTk is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// UQTk is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with UQTk. If not, see .
+//
+// Questions? Contact Bert Debusschere
+// Sandia National Laboratories, Livermore, CA, USA
+//=====================================================================================
+/*************************************************************
+// Templates for Array1D and 2D types
+*************************************************************/
+%template(intArray1D) Array1D;
+%template(dblArray1D) Array1D;
+%template(strArray1D) Array1D;
+%template(intArray2D) Array2D;
+%template(dblArray2D) Array2D;
+
+/*************************************************************
+// Extend Array1D class for easy python use
+*************************************************************/
+
+// Array 1D
+%define Array1DExtend(name, T)
+%extend name{
+ T __getitem__(int index) {
+ return (*self)[index];
+ }
+ Array1D __getitem__(PyObject *slice) {
+ Py_ssize_t start, stop, step;
+ Py_ssize_t length = (*self).Length();
+ Py_ssize_t slicelength;
+ PySlice_GetIndicesEx((PySliceObject*)slice,length,&start,&stop,&step,&slicelength);
+
+ Array1D vnew(slicelength);
+ int place = 0;
+ for (int i = start; i < stop; i=i+step){
+ vnew(place) = (*self)[i];
+ place += 1;
+ }
+ return vnew;
+ }
+ int __len__(){
+ return (*self).Length();
+ }
+ void __setitem__(int i, T j){
+ (*self)[i] = j;
+ }
+ void __setitem__(vector index, vector vin){
+ // multiple index items to in vector at one time
+ // In python, both index and in must be lists
+ for (int i = 0; i < index.size(); i++){
+ (*self)[index[i]] = vin[i];
+ }
+ }
+ Array1D __mul__(T a){
+ int l = (*self).Length();
+ Array1D newArray(l,0);
+ for (int i = 0; i < l; i++){
+ newArray[i] = a*(*self)[i];
+ }
+ return newArray;
+ }
+ Array1D __rmul__(T a){
+ int l = (*self).Length();
+ Array1D newArray(l,0);
+ for (int i = 0; i < l; i++){
+ newArray[i] = a*(*self)[i];
+ }
+ return newArray;
+ }
+ Array1D __add__(Array1D y){
+ int l = (*self).Length();
+ Array1D newArray(l,0);
+ for (int i = 0; i < l; i++){
+ newArray[i] = (*self)[i] + y[i];
+ }
+ return newArray;
+ }
+ Array1D __add__(T y){
+ int l = (*self).Length();
+ Array1D newArray(l,0);
+ for (int i = 0; i < l; i++){
+ newArray[i] = (*self)[i] + y;
+ }
+ return newArray;
+ }
+ Array1D __sub__(Array1D y){
+ int l = (*self).Length();
+ Array1D newArray(l,0);
+ for (int i = 0; i < l; i++){
+ newArray[i] = (*self)[i] - y[i];
+ }
+ return newArray;
+ }
+ Array1D __div__(Array1D y){
+ int l = (*self).Length();
+ Array1D newArray(l,0);
+ for (int i = 0; i < l; i++){
+ newArray[i] = (*self)[i]/y[i];
+ }
+ return newArray;
+ }
+ Array1D __pow__(double p){
+ int l = (*self).Length();
+ Array1D newArray(l,0);
+ for (int i = 0; i < l; i++){
+ newArray[i] = pow((*self)[i],p);
+ }
+ return newArray;
+ }
+ Array1D copy(){
+ int l = (*self).Length();
+ Array1D newArray(l,0);
+ for (int i = 0; i < l; i++){
+ newArray[i] = (*self)[i];
+ }
+ return newArray;
+ }
+ string __repr__(){
+ stringstream ss;
+ // print contents of array as strings
+ int l = (*self).Length();
+ ss << "Array1D<" << (*self).type() << ">(";
+ ss << (*self).Length() << ")" << endl;
+ int imax = 10;
+
+ ss << "[";
+ for (int i = 0; i < l-1; i++){
+ // ss << setw(8) << (*self)[i] << ", ";
+ ss << (*self)[i] << ", ";
+ if (i == imax){
+ ss << "..., ";
+ break;
+ }
+ }
+ int m = min(4,l-imax);
+ if (l >= imax){
+ for (int i = l-m+1; i < l-1; i++){
+ ss << (*self)[i] << ", ";
+ }
+ }
+ if (l > 0){ss << (*self)[l-1];}
+ ss << "]";
+
+ return ss.str();
+ }
+ vector shape(){
+ vector s(1,0);
+ s[0] = (*self).XSize();
+ return s;
+ }
+
+
+}
+%enddef
+
+// Array 1D
+%define Array1DStrExtend(name, T)
+%extend name{
+ T __getitem__(int index) {
+ return (*self)[index];
+ }
+ int __len__(){
+ return (*self).Length();
+ }
+ void __setitem__(int i, T j){
+ (*self)[i] = j;
+ }
+ void __setitem__(vector index, vector vin){
+ // multiple index items to in vector at one time
+ // In python, both index and in must be lists
+ for (int i = 0; i < index.size(); i++){
+ (*self)[index[i]] = vin[i];
+ }
+ }
+ Array1D copy(){
+ int l = (*self).Length();
+ Array1D newArray(l);
+ for (int i = 0; i < l; i++){
+ newArray[i] = (*self)[i];
+ }
+ return newArray;
+ }
+ string __repr__(){
+ stringstream ss;
+ // print contents of array as strings
+ int l = (*self).Length();
+ ss << "Array1D<" << (*self).type() << ">(";
+ ss << (*self).Length() << ")" << endl;
+ int imax = 10;
+
+ ss << "[";
+ for (int i = 0; i < l-1; i++){
+ ss << (*self)[i] << ", ";
+ if (i == imax){
+ ss << "..., ";
+ break;
+ }
+ }
+ int m = min(4,l-imax);
+ if (l >= imax){
+ for (int i = l-m+1; i < l-1; i++){
+ ss << (*self)[i] << ", ";
+ }
+ }
+ if (l > 0){ss << (*self)[l-1];}
+ ss << "]";
+
+ return ss.str();
+ }
+ vector shape(){
+ vector s(1,0);
+ s[0] = (*self).XSize();
+ return s;
+ }
+}
+%enddef
+
+Array1DExtend(Array1D, int);
+Array1DExtend(Array1D, double);
+Array1DStrExtend(Array1D, string);
+
+/*************************************************************
+// Extend Array2D classes for easy python use
+*************************************************************/
+
+// Array2D
+%define Array2DExtend(name, T)
+%extend name{
+ T __getitem__(vector v) {
+ return (*self)[v[0]][v[1]];
+ }
+ Array2D __getitem__(PyObject *slices){
+ PyObject* slice1;
+ PyObject* slice2;
+ slice1 = PyTuple_GetItem(slices, 0);
+ slice2 = PyTuple_GetItem(slices, 1);
+ PySliceObject *s1 = (PySliceObject*)slice1; // recast pointer to proper type
+ PySliceObject *s2 = (PySliceObject*)slice2; // recast pointer to proper type
+
+ Py_ssize_t start1 = 0, stop1 = 0, step1 = 0, slicelength1 = 0;
+ Py_ssize_t start2 = 0, stop2 = 0, step2 = 0, slicelength2 = 0;
+ Py_ssize_t len1 = (*self).XSize();
+ Py_ssize_t len2 = (*self).YSize();
+ PySlice_GetIndicesEx(s1,len1,&start1,&stop1,&step1,&slicelength1);
+ PySlice_GetIndicesEx(s2,len2,&start2,&stop2,&step2,&slicelength2);
+
+ Array2D vnew(slicelength1,slicelength2);
+ int p1 = 0, p2 = 0;
+ for (int i=start1; i __getitem__(int row) {
+ (*self).getRow(row);
+ return (*self).arraycopy;
+ }
+ int __len__(){
+ return (*self).XSize();
+ }
+ void __setitem__(vector v, T j){
+ (*self)(v[0],v[1]) = j;
+ }
+ vector shape(){
+ vector s(2,0);
+ s[0] = (*self).XSize();
+ s[1] = (*self).YSize();
+ return s;
+ }
+ string __repr__(){
+ stringstream ss;
+ stringstream sstemp;
+ // print contents of array as strings
+ int lx = (*self).XSize();
+ int ly = (*self).YSize();
+ ss << "Array2D<" << (*self).type() << ">(";
+ ss << lx << ", ";
+ ss << ly << ")" << endl;
+
+ //find # digits for number of rows, lx
+ int digits = 1, pten=10;
+ while ( pten <= lx ) { digits++; pten*=10; }
+
+ // find max width (number of digits) for printing
+ double test = 0.0;
+ int w0 = 1, w1 = 1;
+ for (int k = 0; k < lx*ly; k++){
+ test = (*self).data_[k];
+ sstemp.str("");
+ sstemp << test;
+ w1 = sstemp.str().length();
+ w0 = max(w0,w1);
+ }
+ int w = w0;
+
+ // size of columns for printing, dependent on width
+ int imax, jmax;
+ if (w >= 8){
+ imax = 10;
+ jmax = 8-1;
+ }
+ else if (w < 8){
+ imax = 10;
+ jmax = 12-1;
+ }
+
+ // print array
+ for (int i = 0; i < lx; i++){
+ // print row number
+ ss << "[" << setw(digits) << i;
+ ss << "] ";
+ ss << setw(2) << "[";
+ for (int j=0; j= jmax){
+ for (int j=ly-m+1; j 1){
+ ss << setw(w) << (*self)[i][ly-1] << "]";
+ }
+ //print only if # of columns is 1
+ else if (ly == 1){
+ ss << setw(w) << (*self)[i][ly-1] << "]";
+ }
+ if (i == imax){
+ ss << "\n";
+ ss << setw(w) << "...," << endl;
+ break;
+ }
+ ss << "\n";
+ }
+
+ // print last 4 rows
+ int mx = min(4,lx-imax);
+ if (lx >= imax){
+ for (int i = lx-mx+1; i < lx; i++){
+ ss << "[" << setw(digits) << i;
+ ss << "] ";
+ ss << setw(2) << "[";
+ for (int j=0; j= jmax){
+ for (int j=ly-m+1; j 1){
+ ss << setw(w) << (*self)[i][ly-1] << "]";
+ }
+ else if (ly == 1){
+ ss << setw(w) << (*self)[i][ly-1] << "]";
+ }
+ ss << "\n";
+ }
+ }
+
+ return ss.str();
+ }
+}
+%enddef
+
+Array2DExtend(Array2D, int);
+Array2DExtend(Array2D, double);
+
diff --git a/PyUQTk/uqtkarray/uqtkarray.i b/PyUQTk/uqtkarray/uqtkarray.i
new file mode 100644
index 00000000..bb735105
--- /dev/null
+++ b/PyUQTk/uqtkarray/uqtkarray.i
@@ -0,0 +1,163 @@
+%module(directors="1") uqtkarray
+//=====================================================================================
+// The UQ Toolkit (UQTk) version 3.0.4
+// Copyright (2017) Sandia Corporation
+// http://www.sandia.gov/UQToolkit/
+//
+// Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+// with Sandia Corporation, the U.S. Government retains certain rights in this software.
+//
+// This file is part of The UQ Toolkit (UQTk)
+//
+// UQTk is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// UQTk is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with UQTk. If not, see .
+//
+// Questions? Contact Bert Debusschere
+// Sandia National Laboratories, Livermore, CA, USA
+//=====================================================================================
+
+%feature("autodoc", "3");
+%rename(Assign) *::operator=;
+%ignore *::operator[];
+
+%{
+#define SWIG_FILE_WITH_INIT
+#include
+#include
+#include
+#include
+#include
+#include "../../cpp/lib/array/Array1D.h"
+#include "../../cpp/lib/array/Array2D.h"
+#include "../../cpp/lib/array/arrayio.h"
+#include "../../cpp/lib/array/arraytools.h"
+%}
+
+/*************************************************************
+// Standard SWIG Templates
+*************************************************************/
+
+// Include standard SWIG templates
+// Numpy array templates and wrapping
+%include "pyabc.i"
+%include "../numpy/numpy.i"
+%include "std_vector.i"
+%include "std_string.i"
+%include "cpointer.i"
+
+%init %{
+ import_array();
+%}
+
+%pointer_functions(double, doublep);
+
+
+/*************************************************************
+// Numpy SWIG Interface files
+*************************************************************/
+
+// Basic typemap for an Arrays and its length.
+// Must come before %include statement below
+
+// For Array1D setnumpyarray4py function
+%apply (long* IN_ARRAY1, int DIM1) {(long* inarray, int n)}
+%apply (double* IN_ARRAY1, int DIM1) {(double* inarray, int n)}
+// get numpy int and double array
+%apply (long* INPLACE_ARRAY1, int DIM1) {(long* outarray, int n)}
+%apply (double* INPLACE_ARRAY1, int DIM1) {(double* outarray, int n)}
+
+// For Array2D numpysetarray4py function
+%apply (double* IN_FARRAY2, int DIM1, int DIM2) {(double* inarray, int n1, int n2)}
+// get numpy array (must be FARRAY)
+%apply (double* INPLACE_FARRAY2, int DIM1, int DIM2) {(double* outarray, int n1, int n2)}
+// For Array2D numpysetarray4py function
+%apply (long* IN_FARRAY2, int DIM1, int DIM2) {(long* inarray, int n1, int n2)}
+// get numpy array (must be FARRAY)
+%apply (long* INPLACE_FARRAY2, int DIM1, int DIM2) {(long* outarray, int n1, int n2)}
+
+
+// For mcmc test to get log probabilities
+%apply (double* INPLACE_ARRAY1, int DIM1) {(double* l, int n)}
+
+/*************************************************************
+// Include header files
+*************************************************************/
+
+// // The above typemap is applied to header files below
+%include "../../cpp/lib/array/Array1D.h"
+%include "../../cpp/lib/array/Array2D.h"
+%include "../../cpp/lib/array/arrayio.h"
+%include "../../cpp/lib/array/arraytools.h"
+
+// Typemaps for standard vector
+// Needed to prevent to memory leak due to lack of destructor
+// must use namespace std
+namespace std{
+ %template(dblVector) vector;
+ %template(intVector) vector;
+ %template(strVector) vector;
+
+}
+
+%template(subMatrix_row_int) subMatrix_row;
+%template(subMatrix_row_dbl) subMatrix_row;
+
+%include "arrayext.i"
+
+%pythoncode %{
+import numpy as np
+def uqtk2numpy(x):
+ if x.type() == 'int':
+ s = x.shape()
+ imin = np.argmin(s)
+ if len(s) == 1:
+ n = s[0]
+ y = np.zeros(n,dtype='int64')
+ x.getnpintArray(y)
+ if len(s) == 2 and np.amin(s) > 1:
+ n = s[0]
+ m = s[1]
+ y = np.zeros((n,m),dtype='int64')
+ x.getnpintArray(y)
+ if len(s) == 2 and np.amin(s) == 1:
+ y = np.array(x.flatten())
+ return y.copy()
+ else:
+ s = x.shape()
+ imin = np.argmin(s)
+ if len(s) == 1:
+ n = s[0]
+ y = np.zeros(n)
+ x.getnpdblArray(y)
+ if len(s) == 2 and np.amin(s) > 1:
+ n = s[0]
+ m = s[1]
+ y = np.zeros((n,m))
+ x.getnpdblArray(y)
+ if len(s) == 2 and np.amin(s) == 1:
+ y = np.array(x.flatten())
+ return y.copy()
+
+def numpy2uqtk(y):
+ s = np.shape(y)
+ if len(s) == 1:
+ n = s[0]
+ x = dblArray1D(n)
+ if len(s) == 2:
+ n = s[0]
+ m = s[1]
+ x = dblArray2D(n,m)
+ x.setnpdblArray(np.asfortranarray(y.copy()))
+ return x
+%}
+
diff --git a/PyUQTk/utils/CMakeLists.txt b/PyUQTk/utils/CMakeLists.txt
new file mode 100644
index 00000000..7e771573
--- /dev/null
+++ b/PyUQTk/utils/CMakeLists.txt
@@ -0,0 +1,15 @@
+project (UQTk)
+
+SET(copy_FILES
+ __init__.py
+ colors.py
+ crps.py
+ pdf_kde.py
+ multiindex.py
+ regr.py
+ func.py
+ )
+
+INSTALL(FILES ${copy_FILES}
+ PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
+ DESTINATION PyUQTk/utils)
diff --git a/PyUQTk/utils/__init__.py b/PyUQTk/utils/__init__.py
new file mode 100755
index 00000000..3da35896
--- /dev/null
+++ b/PyUQTk/utils/__init__.py
@@ -0,0 +1,32 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+import colors
+import crps
+import pdf_kde
+import multiindex
+import regr
+import func
diff --git a/PyUQTk/utils/colors.py b/PyUQTk/utils/colors.py
new file mode 100644
index 00000000..0434bd50
--- /dev/null
+++ b/PyUQTk/utils/colors.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+"""
+Utilities for defining color lists.
+Note that Python has standards as well; this is an alternative
+that often produces enough variability in colors for eye-pleasing results.
+"""
+
+try:
+ import numpy as np
+except ImportError:
+ print "Numpy was not found. "
+
+
+
+def set_colors(npar):
+ """ Sets a list of different colors of requested length, as rgb triples"""
+ colors = []
+ pp=1+npar/6
+ for i in range(npar):
+ c=1-(float) (i/6)/pp
+ b=np.empty((3))
+ for jj in range(3):
+ b[jj]=c*int(i%3==jj)
+ a=int(i%6)/3
+ colors.append(((1-a)*b[2]+a*(c-b[2]),(1-a)*b[1]+a*(c-b[1]),(1-a)*b[0]+a*(c-b[0])))
+
+ return colors
+
diff --git a/PyUQTk/utils/crps.py b/PyUQTk/utils/crps.py
new file mode 100644
index 00000000..7848449a
--- /dev/null
+++ b/PyUQTk/utils/crps.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+try:
+ import numpy as npy
+except ImportError:
+ print "Numpy was not found. "
+
+def CRPSinteg(s1,s2):
+ """ Computes integral of squared difference between two CDFs """
+ Ns1 = s1.shape[0];
+ Ns2 = s2.shape[0];
+ ds1 = 1.0/Ns1;
+ ds2 = 1.0/Ns2;
+ # Combine samples and sort
+ s12=npy.sort(npy.concatenate((s1,s2)));
+ CRPS = 0.0;
+ j1 = 0;
+ j2 = 0;
+ for i in range(Ns1+Ns2-1):
+ if s12[i+1] <= s1[0]:
+ fs1 = 0.0;
+ elif s12[i] >= s1[Ns1-1]:
+ fs1 = 1.0;
+ else:
+ j1 = j1+npy.argmax(s1[j1:]>s12[i])-1
+ fs1 = (j1+1)*ds1;
+ if s12[i+1] <= s2[0]:
+ fs2 = 0.0;
+ elif s12[i] >= s2[Ns2-1]:
+ fs2 = 1.0;
+ else:
+ j2 = j2+npy.argmax(s2[j2:]>s12[i])-1
+ fs2 = (j2+1)*ds2;
+ CRPS = CRPS + (s12[i+1]-s12[i])*(fs1-fs2)**2;
+ return CRPS
+
+def CRPS(s1,s2):
+ """ Computes CRPS score """
+ nsamples = s1.shape[0]
+ if nsamples != s2.shape[0]:
+ print "The number of realizations in s1 and s2 is not the same:",nsamples,s2.shape[0]
+ return (-1.0);
+ crps = npy.zeros(nsamples)
+ for i in range(nsamples):
+ crps[i] = CRPSinteg(s1[i],s2[i])
+ return crps.mean()
diff --git a/PyUQTk/utils/func.py b/PyUQTk/utils/func.py
new file mode 100755
index 00000000..172f5698
--- /dev/null
+++ b/PyUQTk/utils/func.py
@@ -0,0 +1,291 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+"""
+Generic tools for evaluation of standard functions
+and their integrals
+"""
+
+try:
+ import numpy as np
+except ImportError:
+ "Need numpy"
+
+import sys
+from math import *
+import random as rnd
+import itertools
+
+###################################################################################################
+
+
+def func(xdata,model,func_params):
+ """Generic function evaluator.
+ Note:
+ * Currently only genz functions are implemented.
+ * Note that conventional Genz arguments are in [0,1], here the expected input is on [-1,1]
+ Arguments:
+ * xdata : Nxd numpy array of input, should be in [-1,1]^d
+ * model : Model name, options are 'genz_osc', 'genz_exp', 'genz_cont', 'genz_gaus', 'genz_cpeak', 'genz_ppeak'
+ * func_params : Auxiliary parameters
+ : For genz functions, an array of size d+1, the first entry being the shift, and the rest of the entries are the weights.
+ : See UQTk Manual for Genz formulae.
+ Returns:
+ * ydata : An array of outputs of size N.
+ """
+
+ # Get the input size
+ sam=xdata.shape[0]
+ dim=xdata.shape[1]
+
+ # Check the function types and evaluate
+ if model == 'genz_osc':
+ xdata=0.5*(xdata+1.)
+ ydata=np.empty((sam,))
+ gcf=func_params[1:]
+ xtmp=np.dot(xdata,gcf)
+ for j in range(sam):
+ ydata[j]=cos(2.*pi*func_params[0]+xtmp[j])
+
+ elif model == 'genz_exp':
+ xdata=0.5*(xdata+1.)
+ ydata=np.empty((sam,))
+ ww=func_params[0]
+ gcf=func_params[1:]
+
+ xtmp=np.dot(xdata-ww,gcf)
+ for j in range(sam):
+ ydata[j]=exp(xtmp[j])
+
+ elif model == 'genz_cont':
+ xdata=0.5*(xdata+1.)
+ ydata=np.empty((sam,))
+ ww=func_params[0]
+ gcf=func_params[1:]
+
+ xtmp=np.dot(abs(xdata-ww),gcf)
+ for j in range(sam):
+ ydata[j]=exp(-xtmp[j])
+
+ elif model == 'genz_gaus':
+ xdata=0.5*(xdata+1.)
+ ydata=np.empty((sam,))
+ ww=func_params[0]
+ gcf=func_params[1:]
+
+ xtmp=np.dot((xdata-ww)*(xdata-ww),gcf*gcf)
+ for j in range(sam):
+ ydata[j]=exp(-xtmp[j])
+
+ elif model == 'genz_cpeak':
+ xdata=0.5*(xdata+1.)
+ ydata=np.empty((sam,))
+ #ww=param[0]
+ gcf=func_params[1:]
+
+ xtmp=1.+(np.dot(xdata,gcf)) #use abs if defined on [-1,1]
+ for j in range(sam):
+ ydata[j]=exp(-(dim+1.)*log(xtmp[j]))
+
+ elif model == 'genz_ppeak':
+ xdata=0.5*(xdata+1.)
+ ydata=np.empty((sam,))
+ ww=func_params[0]
+ gcf=func_params[1:]
+
+ for j in range(sam):
+ prod=1.
+ for i in range(dim):
+ prod = prod / (1./(gcf[i]**2.)+(xdata[j,i]-ww)**2.)
+ ydata[j]=prod
+
+ elif model == 'ishigami':
+ assert(dim==3)
+ a=func_params[0]
+ b=func_params[1]
+ ydata=np.empty((sam,))
+
+ for j in range(sam):
+ ydata[j]=np.sin(xdata[j,0])+a*np.sin(xdata[j,1])**2+b*np.sin(xdata[j,0])*xdata[j,2]**4
+
+ elif model == 'sobol':
+ assert(dim==func_params.shape[0])
+ ydata=np.empty((sam,))
+ for j in range(sam):
+ val=1.
+ for k in range(dim):
+ val *= ( (abs(2*xdata[j,k])+func_params[k])/(1.+func_params[k]) )
+ ydata[j]=val
+
+ elif model == 'poly_exsens':
+ assert(dim==func_params[0])
+ ydata=np.empty((sam,))
+ for j in range(sam):
+ val=1.
+ for k in range(dim):
+ val *= ( (3./4.)*(xdata[j,k]+1.)**2+1. )/2.
+ ydata[j]=val
+
+ return ydata
+
+##################################################################################
+
+def integ_exact(model,func_params):
+ """Analytically available function integrals.
+ Note:
+ * Currently only genz functions are implemented.
+ * Note that conventional Genz arguments are in [0,1], here the expected input is on [-1,1]
+ Arguments:
+ * model : Model name, options are 'genz_osc', 'genz_exp', 'genz_cont', 'genz_gaus', 'genz_cpeak', 'genz_ppeak'
+ * func_params : Auxiliary parameters
+ : For genz functions, an array of size d+1, the first entry being the shift, and the rest of the entries are the weights.
+ : See UQTk Manual for Genz integral formulae.
+ Returns:
+ * integ_ex : A real number that is the integral over [-1,1]^d
+ """
+
+
+ if (model=='genz_osc'):
+ gcf=func_params
+ dim=gcf.shape[0]-1
+ integ_ex=cos(2.*pi*gcf[0]+0.5*sum(gcf[1:]))
+ for i in range(1,dim+1):
+ integ_ex*=(2.*sin(gcf[i]/2.)/gcf[i])
+ elif (model=='genz_exp'):
+ gcf=func_params
+ dim=gcf.shape[0]-1
+ integ_ex=1.
+ for i in range(1,dim+1):
+ at1=exp(-gcf[i]*gcf[0])
+ at2=exp(gcf[i]*(1.-gcf[0]))
+ integ_ex*=((at2-at1)/(gcf[i]))
+ elif (model=='genz_cont'):
+ gcf=func_params
+ dim=gcf.shape[0]-1
+ integ_ex=1.
+ for i in range(1,dim+1):
+ integ_ex*= ((2.-exp(gcf[i]*(-gcf[0]))-exp(gcf[i]*(gcf[0]-1.)))/gcf[i])
+ elif (model=='genz_gaus'):
+ gcf=func_params
+ dim=gcf.shape[0]-1
+ integ_ex=1.
+ for i in range(1,dim+1):
+ at1=erf(-gcf[i]*gcf[0])
+ at2=erf(gcf[i]*(1.-gcf[0]))
+ integ_ex*=((at2-at1)*sqrt(pi)/(2.*gcf[i]))
+ elif (model=='genz_cpeak'):
+ gcf=func_params
+ dim=gcf.shape[0]-1
+ numer=0.0
+ count=1
+ denom=1.
+ for i in range(1,dim+1):
+ comb=list(itertools.combinations(range(1,dim+1),i))
+ for j in range(len(comb)):
+ assert(i==len(comb[j]))
+ #print i,j,pow(-1,i)
+ numer+=(pow(-1,i)/(1.+sum(gcf[list(comb[j])])))
+ count+=1
+ denom*=(i*gcf[i])
+ #print count, numer
+ integ_ex=(1.+numer)/denom
+ elif (model=='genz_ppeak'):
+ gcf=func_params
+ dim=gcf.shape[0]-1
+ integ_ex=1.
+ for i in range(1,dim+1):
+ at1=np.arctan(-gcf[i]*gcf[0])
+ at2=np.arctan(gcf[i]*(1.-gcf[0]))
+ integ_ex*=(gcf[i]*(at2-at1))
+
+ return integ_ex
+
+################################################################################
+################################################################################
+
+def mainsens_exact(model,func_params):
+ """Analytically available main sensitivities for some functions.
+ Note:
+ * Currently only sobol, ishigami and poly_exsens functions are implemented.
+ * Note that conventional sobol arguments are in [0,1], here the expected input is on [-1,1]
+ Arguments:
+ * model : Model name, options are 'sobol', 'ishigami', 'poly_exsens'
+ * func_params : Auxiliary parameters
+ Returns:
+ * mainsens : Main effect Sobol sensitivity index
+ """
+ if (model=='sobol'):
+ dim=func_params.shape[0]
+ mainsens=np.empty((dim,))
+ var=1.0
+ for i in range(dim):
+ mainsens[i]=1./(3.*(1.+func_params[i])**2)
+ var*=(mainsens[i]+1.)
+ var-=1.0
+ mainsens/=var
+
+ elif (model=='ishigami'):
+ a=func_params[0]
+ b=func_params[1]
+ var=a**2/8.+b*np.pi**4/5.+b**2*np.pi**8/18.+0.5
+ mainsens=np.empty((3,))
+ mainsens[0]=b*np.pi**4/5.+b**2*np.pi**8/50.+0.5
+ mainsens[1]=a**2/8.
+ mainsens[2]=0.0
+ mainsens/=var
+
+ elif (model=='poly_exsens'):
+ dim=func_params[0]
+ mainsens=(0.2/(1.2**dim-1))*np.ones((dim,))
+
+ else:
+ print "No exact sensitivity available for this function. Exiting."
+ sys.exit(1)
+
+
+ return mainsens
+
+##################################################################################
+##################################################################################
+
+def main(arg):
+ modelname=arg[0]
+ input_file=arg[1]
+ output_file=arg[2]
+ auxparam=[]
+ if len(arg)>3:
+ auxparam_file=arg[3]
+ auxparam=np.loadtxt(auxparam_file,ndmin=1)
+
+ input=np.loadtxt(input_file,ndmin=2)
+
+ output=func(input,modelname,auxparam)
+ np.savetxt(output_file,output)
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/PyUQTk/utils/mindex_order.py b/PyUQTk/utils/mindex_order.py
new file mode 100644
index 00000000..3f9944e1
--- /dev/null
+++ b/PyUQTk/utils/mindex_order.py
@@ -0,0 +1,173 @@
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+"""
+Proof-of-concept functions to play around with several ordering
+sequences, e.g.
+ 1. lexicographical order (lex)
+ 2. colexicographic order (colex)
+ 3. reverse lexicographical order (revlex)
+ 4. reverse colexicographical order (revcolex)
+"""
+
+import os
+import sys
+
+try:
+ import numpy as npy
+except ImportError:
+ print "Numpy was not found. "
+
+def sort_lex(a,b):
+"""
+ Indicator function for lexicographical order
+"""
+ n=a.shape[0]
+ for i in range(n):
+ if (a[i]>b[i]):
+ return (1)
+ elif (b[i]>a[i]):
+ return (-1)
+ return(0);
+
+def sort_colex(a,b):
+"""
+ Indicator function for colexicographical order
+"""
+ n=a.shape[0]
+ for i in range(n-1,0,-1):
+ if (a[i]>b[i]):
+ return (1)
+ elif (b[i]>a[i]):
+ return (-1)
+ return(0);
+
+def sort_revlex(a,b):
+"""
+ Indicator function for reverse lexicographical order
+"""
+ n=a.shape[0]
+ for i in range(n):
+ if (a[i] 0):
+ #-----------first order terms---------------------------
+ for idim in range(ndim):
+ iup+=1
+ mi[iup,idim] = 1;
+ if (norder > 1):
+ #-----------higher order terms--------------------------
+ for iord in range(2,norder+1):
+ lessiord = iup;
+ for idim in range(ndim):
+ for ii in range(idim+1,ndim):
+ ic[idim] += ic[ii];
+ for idimm in range(ndim):
+ for ii in range(lessiord-ic[idimm]+1,lessiord+1):
+ iup+=1
+ mi[iup]=mi[ii].copy()
+ mi[iup,idimm] += 1
+ if type == 'lex':
+ return npc,graded_sorted(mi,sort_lex)
+ elif type == 'colex':
+ return npc,graded_sorted(mi,sort_colex)
+ elif type == 'colex':
+ return npc,graded_sorted(mi,sort_colex)
+ elif type == 'colex':
+ return npc,graded_sorted(mi,sort_colex)
+ else:
+ print 'Unknown multi-index order type: ',type
+ return -1,mi
+
+
+
+
diff --git a/PyUQTk/utils/multiindex.py b/PyUQTk/utils/multiindex.py
new file mode 100755
index 00000000..1513e3a8
--- /dev/null
+++ b/PyUQTk/utils/multiindex.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+"""
+Scripts for managing multiindices.
+"""
+
+import os
+import sys
+
+try:
+ import numpy as np
+except ImportError:
+ print "Numpy was not found. "
+
+#############################################################
+#############################################################
+
+def gen_mi(mi_type,params):
+ """
+ Wrapper around the app gen_mi for generating multiindex sets
+ Arguments:
+ * mi_type : Multiindex tpye, options are 'TO', 'TP', 'HDMR'
+ * params : Parameters, a two-element tuple
+ : First element is the order ('TO'), list of orders per dimension ('TP'), or list of HDMR orders ('HDMR')
+ : Second element is dimensionality
+ Returns:
+ * mindex : A 2d array of multiindices.
+ """
+
+ # Total-Order truncation
+ if mi_type=='TO':
+ # Order
+ nord=params[0]
+ # Dimensionality
+ dim=params[1]
+ # Command for the app
+ cmd='gen_mi -x' + mi_type + ' -p' + str(nord) + ' -q' + str(dim)
+
+ # Tensor-product truncation
+ elif mi_type=='TP':
+ # A list of orders per dimension
+ orders=params[0]
+ # Dimensionality
+ dim=params[1]
+ assert(dim==len(orders))
+ # Save the per-dimension orders in a file
+ np.savetxt('orders.dat',np.array(orders),fmt='%d')
+ # Command for the app
+ cmd='gen_mi -x' + mi_type + ' -f orders.dat -q'+str(dim)
+
+ # HDMR trunction
+ elif mi_type=='HDMR':
+ # A list of per-variate orders
+ hdmr_dims=params[0]
+ # Dimensionality
+ dim=params[1]
+ # Save the HDMR dimensions in a file
+ np.savetxt('hdmr_dims.dat',np.array(hdmr_dims),fmt='%d')
+ # Command for the app
+ cmd='gen_mi -x' + mi_type + ' -f hdmr_dims.dat -q'+str(dim)
+
+ else:
+ print "Multiindex type is not recognized. Use 'TO', 'TP' or 'HDMR'. Exiting."
+ sys.exit(1)
+
+ # Run the app
+ os.system(cmd + ' > gen_mi.out')
+
+ # Load the generated multtindex file
+ mindex=np.loadtxt('mindex.dat',dtype=int).reshape(-1,dim)
+ return mindex
+
+
+#############################################################
+#############################################################
+
+def mi_addfront_cons(mindex):
+ """
+ Adding a front to multiindex in a conservative way, i.e.
+ a multiindex is added only if *all* parents are in the current set
+ """
+
+ print "Adding multiindex front (conservative)"
+
+ npc=mindex.shape[0]
+ ndim=mindex.shape[1]
+ mindex_f=np.zeros((1,ndim),dtype=int)
+ mindex_add=np.zeros((1,ndim),dtype=int)
+ mindex_new=np.zeros((1,ndim),dtype=int)
+ for i in range(npc):
+ cur_mi=mindex[i,:]
+
+ fflag=True
+ for j in range(ndim):
+ test_mi=np.copy(cur_mi)
+ test_mi[j] += 1
+ #print "Trying test_mi", test_mi
+ fl=True
+
+
+ if not any(np.equal(mindex,test_mi).all(1)):
+ for k in range(ndim):
+ if(test_mi[k]!=0):
+ subt_mi=np.copy(test_mi)
+ subt_mi[k] -= 1
+
+ if any(np.equal(mindex,subt_mi).all(1)):
+ cfl=True
+ fl=cfl*fl
+
+ else:
+ fl=False
+ break
+
+
+ if (fl):
+ if not any(np.equal(mindex_add,test_mi).all(1)):
+ mindex_add=np.vstack((mindex_add,test_mi))
+ if fflag:
+ mindex_f=np.vstack((mindex_f,cur_mi))
+ fflag=False
+
+ mindex_f=mindex_f[1:]
+ mindex_add=mindex_add[1:]
+ mindex_new=np.vstack((mindex,mindex_add))
+
+ print "Multiindex resized from %d to %d." % (mindex.shape[0],mindex_new.shape[0])
+
+ # Returns the new muliindex, the added new multiindices,
+ # and the 'front', i.e. multiindices whose children are added
+ return [mindex_new,mindex_add,mindex_f]
+
+#############################################################
+#############################################################
+#############################################################
+
+def mi_addfront(mindex):
+ """
+ Adding a front to multiindex in a non-conservative way, i.e.
+ a multiindex is added only if *any* of the parents is in the current set
+ """
+
+ print "Adding multiindex front (non-conservative)"
+
+ npc=mindex.shape[0]
+ ndim=mindex.shape[1]
+
+ mindex_f=np.zeros((1,ndim),dtype=int)
+ mindex_add=np.zeros((1,ndim),dtype=int)
+ mindex_new=np.zeros((1,ndim),dtype=int)
+ for i in range(npc):
+ cur_mi=mindex[i,:]
+
+ fflag=True
+ for j in range(ndim):
+ test_mi=np.copy(cur_mi)
+ test_mi[j] += 1
+ if not any(np.equal(mindex,test_mi).all(1)):
+ if not any(np.equal(mindex_add,test_mi).all(1)):
+ mindex_add=np.vstack((mindex_add,test_mi))
+ if fflag:
+ mindex_f=np.vstack((mindex_f,cur_mi))
+ fflag=False
+
+ mindex_f=mindex_f[1:]
+ mindex_add=mindex_add[1:]
+ mindex_new=np.vstack((mindex,mindex_add))
+
+
+ print "Multiindex resized from %d to %d." % (mindex.shape[0],mindex_new.shape[0])
+
+ # Returns the new muliindex, the added new multiindices,
+ # and the 'front', i.e. multiindices whose children are added
+ return [mindex_new,mindex_add,mindex_f]
+
+
+
+#####################################################################
diff --git a/PyUQTk/utils/pdf_kde.py b/PyUQTk/utils/pdf_kde.py
new file mode 100755
index 00000000..4fac5e3b
--- /dev/null
+++ b/PyUQTk/utils/pdf_kde.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+"""
+KDE PDF computation routines.
+"""
+
+import os
+try:
+ from scipy import stats
+except ImportError:
+ print "Scipy was not found. "
+
+try:
+ import numpy as np
+except ImportError:
+ print "Numpy was not found. "
+
+
+#############################################################
+
+def get_pdf(data,target, method='UQTk',verbose=1):
+ """
+ Compute PDF given data at target points
+ with Python built-in method or with the UQTk app
+
+ Arguments:
+ * data : an N x d array of N samples in d dimensions
+ * target : an M x d array of target points
+ : can be an integer in method-UQTk case; and is interpreted as
+ : the number of grid points per dimension for a target grid
+ * method : 'UQTk' or 'Python'
+ * verbose: verbosity on the screen, 0,1, or 2
+
+ Returns:
+ * xtarget : target points (same as target, or a grid, if target is an integer)
+ * dens : PDF values at xtarget
+ """
+ np.savetxt('data',data)
+
+ # Wrapper around the UQTk app
+ if (method=='UQTk'):
+
+ if (verbose>1):
+ outstr=''
+ else:
+ outstr=' > pdfcl.log'
+
+ if(type(target)==int):
+ cmd='pdf_cl -i data -g '+str(target)+outstr
+ if (verbose>0):
+ print "Running ", cmd
+ os.system(cmd)
+
+ else:
+ np.savetxt('target',target)
+ cmd='pdf_cl -i data -x target'+outstr
+ if (verbose>0):
+ print "Running ", cmd
+
+ os.system(cmd)
+
+ xtarget=np.loadtxt('dens.dat')[:,:-1]
+ dens=np.loadtxt('dens.dat')[:,-1]
+
+ # Python Scipy built-in method of KDE
+ elif (method=='Python'):
+ assert (type(target)!=int)
+ np.savetxt('target',target)
+
+ kde_py=stats.kde.gaussian_kde(data.T)
+ dens=kde_py(target.T)
+ xtarget=target
+
+ else:
+ print "KDE computation method is not recognized (choose 'Python' or 'UQTk'). Exiting."
+ sys.exit()
+
+ # Return the target points and the probability density
+ return xtarget,dens
+
diff --git a/PyUQTk/utils/regr.py b/PyUQTk/utils/regr.py
new file mode 100755
index 00000000..0413a733
--- /dev/null
+++ b/PyUQTk/utils/regr.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+"""
+Regression-related tools
+"""
+
+import os
+import sys
+
+try:
+ import numpy as np
+except ImportError:
+ print "Numpy was not found. "
+
+
+from multiindex import mi_addfront
+
+#############################################################
+#############################################################
+#############################################################
+
+def regression(xdata,ydata,mode,basisparams,regparams):
+ """
+ Polynomial regression given x- and y-data. A wrapper around the regression app
+
+ Arguments:
+ * xdata : N x d array of x-data
+ * ydata : N x e array of y-data
+ * mode : m (only mean), ms (mean and stdev) or msc (mean,stdev and cov)
+ * basisparams : tuple of two elements, (pctype,mindex)
+ : pctype - PC type
+ : mindex - multiindex array
+ * regparams : tuple of two elements, (method, methodpars)
+ : method - regression method, 'lsq' or 'wbcs'
+ : methodpars - parameters of the regression (regularization weights for wbcs)
+ : Note: regparams=np.array(methodpars) on output
+
+ Returns:
+ * cfs : Coefficient vector
+ * mindex : Multiindex array
+ * Sig : Coefficient covariance matrix
+ * used : Indices of retained multiindices
+ """
+
+ # Read input settings
+ pctype,mindex=basisparams
+ method,methodpars=regparams
+
+ # Turn regparams tuple into an array
+ regparams=np.array(methodpars).reshape(-1,1)
+
+ # Get the dimensionality
+ dim=mindex.shape[1]
+
+ # Save the appropriate files for the regression app
+ np.savetxt('xdata.dat',xdata)
+ np.savetxt('ydata.dat',ydata)
+ np.savetxt('mindex.dat',mindex,fmt='%d')
+ np.savetxt('regparams.dat',regparams,fmt='%24.16f')
+
+ # Regularization
+ lamstr=''
+ regstr=''
+ if method=='lsq':
+ lamstr='-l 0.0'
+ if method=='wbcs':
+ regstr='-w regparams.dat'
+
+ # Run the regression app
+ cmd='regression -c 1.e-5 -x xdata.dat -y ydata.dat -b PC_MI -s '+pctype+' -p mindex.dat -m '+mode+' -r '+method + ' '+regstr+' '+lamstr +' > regr.log'
+ print "Running "+cmd
+ os.system(cmd)
+
+ # Read the resulting files
+ cfs=np.loadtxt('coeff.dat')
+ used=np.loadtxt('selected.dat',dtype=int)
+ mindex=np.loadtxt('mindex.dat',dtype=int).reshape(-1,dim)
+ mindex=mindex[used]
+ if (mode=='msc'):
+ Sig=np.loadtxt('Sig.dat')
+ else:
+ Sig=[]
+
+ # Return coefficient, multiindex, coef. covariance matrix, and indices of used basis terms
+ return (cfs,mindex,Sig,used)
+
+#############################################################
+#############################################################
+#############################################################
+
+def regression_iter(xdata,ydata,mode,basisparams,regparams,iterparams):
+ """
+ Iterative regression involving multiindex growth.
+ See inputs and outputs of regression(), with additional argument
+
+ iterparams : a tuple (niter,eps,update_weights,update_mindex)
+ * niter : Number of iterations
+ * eps : Nugget for iterative reweighting
+ * update_weights : boolean flag whether to recompute the weights or not
+ * update_mindex : boolean flag whether to update multiindex or not
+
+ """
+
+ # Read the inputs
+ pctype,mindex=basisparams
+ method,methodpars=regparams
+ niter,eps,update_weights,update_mindex=iterparams
+
+ # Set the current parameters
+ basisparams_cur=[pctype,mindex]
+ regparams_cur=[method,methodpars]
+
+
+ nrange=np.arange(mindex.shape[0])
+ cur_used=nrange
+ npc=mindex.shape[0]
+ for i in range(niter):
+ print "Iteration %d / %d " % (i+1,niter)
+ print "Initial mindex size ", basisparams_cur[1].shape[0]
+ cfs_cur,mindex_cur,Sig,used=regression(xdata,ydata,mode,basisparams_cur,regparams_cur)
+ print "New mindex size ", mindex_cur.shape[0]
+
+
+ #tmp=cur_used[used]
+ #cur_used=tmp.copy()
+
+ npc_cur=mindex_cur.shape[0]
+
+ # Update weights or not
+ if (update_weights==True):
+ regparams_cur[1]=1./(abs(cfs_cur)+eps)
+ else:
+ tmp=regparams_cur[1]
+ regparams_cur[1]=tmp[list(used)] #read used.dat and replace it here
+
+ # Update multiindex or not
+ if (update_mindex==True and i.
+#
+# Need help with UQTk? Check out the resources on http://www.sandia.gov/UQToolkit/
+# or e-mail uqtk-users@software.sandia.gov
+# (subscription details listed at http://www.sandia.gov/UQToolkit/)
+# Other questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+usage ()
+{
+ echo "No command-line parameters, execute as is"
+ exit
+}
+
+while getopts ":h" opt; do
+ case $opt in
+ h) usage
+ ;;
+ \?) echo "Invalid option -$OPTARG" >&2; usage
+ ;;
+ esac
+done
+
+
+# Run this command from within a build directory.
+# Customize the macros below to your specfic directory preferences
+UQTK_SRC_DIR=$PWD/../UQTk
+UQTK_INSTALL_DIR=$UQTK_SRC_DIR-install
+
+echo "This script assumes the UQTk source code is in $UQTK_SRC_DIR"
+echo "and will be installed in $UQTK_INSTALL_DIR"
+
+# Specficy compiler and library paths as needed
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$UQTK_INSTALL_DIR \
+ -DCMAKE_Fortran_COMPILER=gfortran \
+ -DCMAKE_C_COMPILER=gcc \
+ -DCMAKE_CXX_COMPILER=g++ \
+ -DPYTHON_EXECUTABLE:FILEPATH=/opt/local/bin/python \
+ -DPYTHON_LIBRARY:FILEPATH=/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/libpython2.7.dylib \
+ -DPyUQTk=ON \
+ $UQTK_SRC_DIR
diff --git a/config/config-gcc-base.sh b/config/config-gcc-base.sh
new file mode 100755
index 00000000..38d964c7
--- /dev/null
+++ b/config/config-gcc-base.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Need help with UQTk? Check out the resources on http://www.sandia.gov/UQToolkit/
+# or e-mail uqtk-users@software.sandia.gov
+# (subscription details listed at http://www.sandia.gov/UQToolkit/)
+# Other questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+usage ()
+{
+ echo "No command-line parameters, execute as is"
+ exit
+}
+
+while getopts ":h" opt; do
+ case $opt in
+ h) usage
+ ;;
+ \?) echo "Invalid option -$OPTARG" >&2; usage
+ ;;
+ esac
+done
+
+# Run this script from a build directory
+# Customize the paths below to reflect your directory preferences
+UQTK_SRC_DIR=$PWD/../UQTk
+UQTK_INSTALL_DIR=$UQTK_SRC_DIR-install
+
+echo "This script assumes the UQTk source code is in $UQTK_SRC_DIR"
+echo "and will be installed in $UQTK_INSTALL_DIR"
+
+# Specify compiler and library paths as needed
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$UQTK_INSTALL_DIR \
+ -DCMAKE_Fortran_COMPILER=gfortran \
+ -DCMAKE_C_COMPILER=gcc \
+ -DCMAKE_CXX_COMPILER=g++ \
+ $UQTK_SRC_DIR
diff --git a/config/config-grover-intel.sh b/config/config-grover-intel.sh
new file mode 100755
index 00000000..6250da77
--- /dev/null
+++ b/config/config-grover-intel.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0.4
+# Copyright (2017) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+usage ()
+{
+ echo "No command-line parameters, execute as is"
+ exit
+}
+
+while getopts ":h" opt; do
+ case $opt in
+ h) usage
+ ;;
+ \?) echo "Invalid option -$OPTARG" >&2; usage
+ ;;
+ esac
+done
+
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$PWD/../UQTk-install \
+ -DCMAKE_Fortran_COMPILER=/opt/intel/fc/Compiler/11.1/080/bin/ia64/ifort \
+ -DCMAKE_C_COMPILER=/opt/intel/cc/Compiler/11.1/080/bin/ia64/icc \
+ -DCMAKE_CXX_COMPILER=/opt/intel/cc/Compiler/11.1/080/bin/ia64/icpc \
+ -DIntelLibPath=/opt/intel/fc/Compiler/11.1/080/lib/ia64 \
+ -DPyUQTk=OFF \
+ ../UQTk
diff --git a/config/config-teton.sh b/config/config-teton.sh
new file mode 100755
index 00000000..c0b25d97
--- /dev/null
+++ b/config/config-teton.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+#=====================================================================================
+# The UQ Toolkit (UQTk) version 3.0
+# Copyright (2015) Sandia Corporation
+# http://www.sandia.gov/UQToolkit/
+#
+# Copyright (2015) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+# with Sandia Corporation, the U.S. Government retains certain rights in this software.
+#
+# This file is part of The UQ Toolkit (UQTk)
+#
+# UQTk is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UQTk is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UQTk. If not, see .
+#
+# Questions? Contact Bert Debusschere
+# Sandia National Laboratories, Livermore, CA, USA
+#=====================================================================================
+
+usage ()
+{
+ echo "Usage : $0 -d -c -p -h"
+ exit
+}
+
+uqtksrc="${HOME}/Projects/UQTk/3.0/gitdir"
+pyintf="OFF"
+ctype="gnu"
+
+while getopts ":p:c:d:h" opt; do
+ case $opt in
+ p) pyintf="$OPTARG"
+ ;;
+ c) ctype="$OPTARG"
+ ;;
+ d) uqtksrc="$OPTARG"
+ ;;
+ h) usage
+ ;;
+ \?) echo "Invalid option -$OPTARG" >&2; usage
+ ;;
+ esac
+done
+
+echo "============================================"
+echo "Compiling UQTk with:"
+echo " - $ctype compilers"
+echo " - python interface $pyintf"
+echo "============================================"
+
+#PATH2MUQ=${HOME}/Projects/muq-install
+GNUROOT=/opt/local
+
+if [ "${ctype}" == "gnu53" ]; then
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$PWD/../install_5.3 \
+ -DCMAKE_Fortran_COMPILER=/usr/local/opt/gcc53/bin/gfortran-5.3.0 \
+ -DCMAKE_C_COMPILER=/usr/local/opt/gcc53/bin/gcc-5.3.0 \
+ -DCMAKE_CXX_COMPILER=/usr/local/opt/gcc53/bin/g++-5.3.0 \
+ -DPATH2MUQ=${PATH2MUQ} \
+ -DPyUQTk=${pyintf} \
+ ${uqtksrc}
+elif [ "${ctype}" == "gnu61" ]; then
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$PWD/../install_6.1 \
+ -DCMAKE_Fortran_COMPILER=$GNUROOT/gcc61/bin/gfortran-6.1.0 \
+ -DCMAKE_C_COMPILER=$GNUROOT/gcc61/bin/gcc-6.1.0 \
+ -DCMAKE_CXX_COMPILER=$GNUROOT/gcc61/bin/g++-6.1.0 \
+ -DPATH2MUQ=${PATH2MUQ} \
+ -DPyUQTk=${pyintf} \
+ ${uqtksrc}
+elif [ "${ctype}" == "gnu61m" ]; then
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$PWD/../install_6.1m \
+ -DCMAKE_Fortran_COMPILER=$GNUROOT/gcc61/bin/mpif90 \
+ -DCMAKE_C_COMPILER=$GNUROOT/gcc61/bin/mpicc \
+ -DCMAKE_CXX_COMPILER=$GNUROOT/gcc61/bin/mpic++ \
+ -DPATH2MUQ=${PATH2MUQ} \
+ -DPyUQTk=${pyintf} \
+ ${uqtksrc}
+elif [ "${ctype}" == "gnu71" ]; then
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$PWD/../install_7.1 \
+ -DCMAKE_Fortran_COMPILER=$GNUROOT/gcc71/bin/gfortran-7.1.0 \
+ -DCMAKE_C_COMPILER=$GNUROOT/gcc71/bin/gcc-7.1.0 \
+ -DCMAKE_CXX_COMPILER=$GNUROOT/gcc71/bin/g++-7.1.0 \
+ -DPATH2MUQ=${PATH2MUQ} \
+ -DPyUQTk=${pyintf} \
+ ${uqtksrc}
+elif [ "${ctype}" == "intel" ]; then
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$PWD/../install_intel \
+ -DCMAKE_Fortran_COMPILER=/opt/intel/composerxe/bin/ifort \
+ -DCMAKE_C_COMPILER=/opt/intel/composerxe/bin/icc \
+ -DCMAKE_CXX_COMPILER=/opt/intel/composerxe/bin/icpc \
+ -DPyUQTk=${pyintf} \
+ ${uqtksrc}
+elif [ "${ctype}" == "clang" ]; then
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$PWD/../install_clang \
+ -DCMAKE_Fortran_COMPILER=$GNUROOT/gcc61/bin/gfortran-6.1.0 \
+ -DCMAKE_C_COMPILER=clang \
+ -DCMAKE_CXX_COMPILER=clang++ \
+ -DClangLibPath=/opt/local/gcc61/lib \
+ -DPyUQTk=${pyintf} \
+ ${uqtksrc}
+else
+ echo "Unknown compiler: ${ctype}"
+fi
diff --git a/cpp/.!98082!.DS_Store b/cpp/.!98082!.DS_Store
new file mode 100644
index 00000000..e69de29b
diff --git a/cpp/.!98083!.DS_Store b/cpp/.!98083!.DS_Store
new file mode 100644
index 00000000..e69de29b
diff --git a/cpp/.DS_Store b/cpp/.DS_Store
new file mode 100644
index 00000000..45c70cb2
Binary files /dev/null and b/cpp/.DS_Store differ
diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt
new file mode 100644
index 00000000..852bdadc
--- /dev/null
+++ b/cpp/CMakeLists.txt
@@ -0,0 +1,5 @@
+project (UQTk)
+
+add_subdirectory (lib)
+add_subdirectory (app)
+add_subdirectory (tests)
diff --git a/cpp/app/CMakeLists.txt b/cpp/app/CMakeLists.txt
new file mode 100644
index 00000000..954032bf
--- /dev/null
+++ b/cpp/app/CMakeLists.txt
@@ -0,0 +1,13 @@
+add_subdirectory (gen_mi)
+add_subdirectory (generate_quad)
+add_subdirectory (model_inf)
+add_subdirectory (pce_eval)
+add_subdirectory (pce_quad)
+add_subdirectory (pce_resp)
+add_subdirectory (pce_rv)
+add_subdirectory (pce_sens)
+add_subdirectory (pdf_cl)
+add_subdirectory (sens)
+add_subdirectory (regression)
+add_subdirectory (gp_regr)
+add_subdirectory (gkpSparse)
diff --git a/cpp/app/gen_mi/CMakeLists.txt b/cpp/app/gen_mi/CMakeLists.txt
new file mode 100644
index 00000000..9002bb9a
--- /dev/null
+++ b/cpp/app/gen_mi/CMakeLists.txt
@@ -0,0 +1,47 @@
+
+add_executable (gen_mi gen_mi.cpp)
+
+target_link_libraries (gen_mi uqtkpce )
+target_link_libraries (gen_mi uqtkarray)
+target_link_libraries (gen_mi uqtktools)
+
+target_link_libraries (gen_mi depdsfmt )
+target_link_libraries (gen_mi deplapack)
+target_link_libraries (gen_mi depblas )
+target_link_libraries (gen_mi depfigtree )
+target_link_libraries (gen_mi depann )
+
+# Link fortran libraries
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ if ("${GnuLibPath}" STREQUAL "")
+ target_link_libraries (gen_mi gfortran stdc++)
+ else()
+ target_link_libraries (gen_mi ${GnuLibPath}/libgfortran.a ${GnuLibPath}/libquadmath.a stdc++)
+ endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel
+ if ("${IntelLibPath}" STREQUAL "")
+ target_link_libraries (gen_mi ifcore)
+ else()
+ target_link_libraries (gen_mi ${IntelLibPath}/libifcore.a)
+ endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ # using Clang
+ if ("${ClangLibPath}" STREQUAL "")
+ target_link_libraries (gen_mi gfortran stdc++)
+ else()
+ target_link_libraries (gen_mi ${ClangLibPath}/libgfortran.dylib ${ClangLibPath}/libquadmath.dylib ${ClangLibPath}/libstdc++.dylib)
+ endif()
+endif()
+
+include_directories(../../lib/include)
+include_directories(../../lib/array )
+include_directories(../../lib/tools )
+
+include_directories(../../../dep/dsfmt)
+include_directories(../../../dep/figtree)
+
+
+INSTALL(TARGETS gen_mi DESTINATION bin)
+
diff --git a/cpp/app/gen_mi/gen_mi.cpp b/cpp/app/gen_mi/gen_mi.cpp
new file mode 100644
index 00000000..b48017de
--- /dev/null
+++ b/cpp/app/gen_mi/gen_mi.cpp
@@ -0,0 +1,193 @@
+/* =====================================================================================
+ The UQ Toolkit (UQTk) version 3.0.4
+ Copyright (2017) Sandia Corporation
+ http://www.sandia.gov/UQToolkit/
+
+ Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+ with Sandia Corporation, the U.S. Government retains certain rights in this software.
+
+ This file is part of The UQ Toolkit (UQTk)
+
+ UQTk is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ UQTk is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with UQTk. If not, see .
+
+ Questions? Contact Bert Debusschere
+ Sandia National Laboratories, Livermore, CA, USA
+===================================================================================== */
+/// \file gen_mi.cpp
+/// \author K. Sargsyan 2014 -
+/// \brief Command-line utility to generate multiindex
+
+#include "tools.h"
+#include "arrayio.h"
+#include "arraytools.h"
+#include
+
+using namespace std;
+
+
+/// default multiindex type
+#define MI_TYPE "TO"
+/// default multiindex sequence
+#define MI_SEQ "NONE"
+/// default order
+#define ORD 1
+/// default dimensionality
+#define DIM 3
+/// default parameter filename
+#define PARAM_FILE "mi_param.dat"
+/// default verbosity
+#define VERBOSITY 1
+
+
+
+/******************************************************************************/
+/// Displays information about this program
+int usage(){
+ printf("This program to generate multiindex files given rules.\n");
+ printf("usage: gen_mi [-h] [-x] [-s] [-p] [-q] [-f] [-v ]\n");
+ printf(" -h : print out this help message \n");
+ printf(" -x : define the multiindex type (default=%s) \n",MI_TYPE);
+ printf(" -s : define the multiindex sequence (default=%s) \n",MI_SEQ);
+ printf(" -p : define the first parameter (default=%d) \n",ORD);
+ printf(" -q : define the second parameter (default=%d) \n",DIM);
+ printf(" -f : define the parameter filename for multiindex (default=%s) \n",PARAM_FILE);
+ printf(" -v : define verboosity 0-no output/1-output info (default=%d) \n",VERBOSITY);
+ printf("================================================================================\n");
+ printf("Input : None \n");
+ printf("Output : File 'mindex.dat'\n");
+ printf("--------------------------------------------------------------------------------\n");
+ printf("================================================================================\n");
+ exit(0);
+ return 0;
+}
+
+
+/// Main program: Generates multiindex of requested type with given parameters
+int main(int argc, char *argv[])
+{
+
+
+ /// Set the default values
+ int nord = ORD;
+ int ndim = DIM;
+ int verb = VERBOSITY;
+ char* param_file= (char *)PARAM_FILE;
+ char* mi_type = (char *)MI_TYPE;
+ char* mi_seq = (char *)MI_SEQ;
+
+ bool pflag = false;
+ bool sflag = false;
+ bool qflag = false;
+ bool fflag = false;
+
+ /// Read the user input
+ int c;
+
+ while ((c=getopt(argc,(char **)argv,"hx:s:p:q:f:v:"))!=-1){
+ switch (c) {
+ case 'h':
+ usage();
+ break;
+ case 'x':
+ mi_type = optarg;
+ break;
+ case 's':
+ mi_seq = optarg;
+ sflag=true;
+ break;
+ case 'p':
+ nord = strtol(optarg, (char **)NULL,0);
+ pflag=true;
+ break;
+ case 'q':
+ ndim = strtol(optarg, (char **)NULL,0);
+ qflag=true;
+ break;
+ case 'f':
+ param_file = optarg;
+ fflag=true;
+ break;
+ case 'v':
+ verb = strtol(optarg, (char **)NULL,0);
+ break;
+ default :
+ break;
+ }
+ }
+
+ /*----------------------------------------------------------------------------*/
+ /// Print the input information on screen
+ if ( verb > 0 ) {
+ fprintf(stdout,"mi_type = %s \n",mi_type);
+ if (sflag) fprintf(stdout,"mi_seq = %s \n",mi_seq);
+ if (qflag) fprintf(stdout,"ndim = %d \n",ndim);
+ if (pflag) fprintf(stdout,"nord = %d \n",nord);
+ if (fflag) fprintf(stdout,"param_file = %s \n",param_file);
+ }
+ /*----------------------------------------------------------------------------*/
+
+ if(fflag && (pflag)){
+ printf("gen_mi(): Can not specify both parameter file and order. Exiting.\n");
+ exit(1);
+ }
+
+ // Cast multiindex type as string
+ string mi_type_str(mi_type);
+
+ int npc;
+ Array2D mindex;
+
+ // Choose between TO, TP or HDMR
+
+ // Total order
+ if (mi_type_str == "TO") {
+ if ( not sflag )
+ npc=computeMultiIndex(ndim,nord,mindex);
+ else
+ npc=computeMultiIndex(ndim,nord,mindex,string(mi_seq));
+ }
+
+ else if (mi_type_str == "TP") {
+ Array1D maxorders;
+ Array2D maxorders2d;
+ read_datafileVS(maxorders2d,param_file);
+ getCol(maxorders2d, 0, maxorders);
+ npc=computeMultiIndexTP(maxorders, mindex);
+ }
+
+ // HDMR ordering
+ else if(mi_type_str=="HDMR"){
+ Array1D maxorders;
+ Array2D maxorders2d;
+ read_datafileVS(maxorders2d,param_file);
+ getCol(maxorders2d, 0, maxorders);
+ npc=computeMultiIndexHDMR(ndim, maxorders, mindex);
+ }
+
+ else {
+ printf("gen_mi():: Multiindex type %s is not recognized. \n", mi_type);
+ exit(1);
+ }
+
+ /// Write to file mindex.dat
+ write_datafile(mindex, "mindex.dat");
+ if ( verb > 0 )
+ cout << "Generated multiindex of size " << npc
+ << " and stored in mindex.dat" << endl;
+
+ return 0;
+
+}
+
+
diff --git a/cpp/app/generate_quad/CMakeLists.txt b/cpp/app/generate_quad/CMakeLists.txt
new file mode 100644
index 00000000..d7ef6da1
--- /dev/null
+++ b/cpp/app/generate_quad/CMakeLists.txt
@@ -0,0 +1,48 @@
+
+add_executable (generate_quad generate_quad.cpp)
+
+target_link_libraries (generate_quad uqtkquad )
+target_link_libraries (generate_quad uqtktools)
+target_link_libraries (generate_quad uqtkarray)
+
+target_link_libraries (generate_quad depdsfmt )
+target_link_libraries (generate_quad deplapack)
+target_link_libraries (generate_quad depblas )
+target_link_libraries (generate_quad depfigtree )
+target_link_libraries (generate_quad depann )
+
+# Link fortran libraries
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ if ("${GnuLibPath}" STREQUAL "")
+ target_link_libraries (generate_quad gfortran stdc++)
+ else()
+ target_link_libraries (generate_quad ${GnuLibPath}/libgfortran.a ${GnuLibPath}/libquadmath.a stdc++)
+ endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel C++
+ if ("${IntelLibPath}" STREQUAL "")
+ target_link_libraries (generate_quad ifcore)
+ else()
+ target_link_libraries (generate_quad ${IntelLibPath}/libifcore.a)
+ endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ # using Clang
+ if ("${ClangLibPath}" STREQUAL "")
+ target_link_libraries (generate_quad gfortran stdc++)
+ else()
+ target_link_libraries (generate_quad ${ClangLibPath}/libgfortran.dylib ${ClangLibPath}/libquadmath.dylib ${ClangLibPath}/libstdc++.dylib)
+ endif()
+endif()
+
+include_directories(../../lib/include)
+include_directories(../../lib/tools )
+include_directories(../../lib/quad )
+include_directories(../../lib/array )
+
+include_directories(../../../dep/dsfmt)
+include_directories(../../../dep/figtree)
+
+
+INSTALL(TARGETS generate_quad DESTINATION bin)
+
diff --git a/cpp/app/generate_quad/generate_quad.cpp b/cpp/app/generate_quad/generate_quad.cpp
new file mode 100644
index 00000000..002b610e
--- /dev/null
+++ b/cpp/app/generate_quad/generate_quad.cpp
@@ -0,0 +1,263 @@
+/* =====================================================================================
+ The UQ Toolkit (UQTk) version 3.0.4
+ Copyright (2017) Sandia Corporation
+ http://www.sandia.gov/UQToolkit/
+
+ Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+ with Sandia Corporation, the U.S. Government retains certain rights in this software.
+
+ This file is part of The UQ Toolkit (UQTk)
+
+ UQTk is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ UQTk is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with UQTk. If not, see .
+
+ Questions? Contact Bert Debusschere
+ Sandia National Laboratories, Livermore, CA, USA
+===================================================================================== */
+/// \file generate_quad.cpp
+/// \author K. Sargsyan 2013 -
+/// \brief Command-line utility to generate quadrature points
+
+#include
+#include "quad.h"
+#include "tools.h"
+#include "arrayio.h"
+
+using namespace std;
+
+/// default value of parameter (level for sparse quadrature, or number of grid points for full quadrature)
+#define PARAM 3
+/// default data dimensionality
+#define DIM 2
+/// default sparseness type (full or sparse)
+#define FSTYPE "sparse"
+/// default quadrature type
+#define QUADTYPE "CC"
+/// default alpha parameter for chaos
+#define ALPHA 0.0
+/// default beta parameter for chaos
+#define BETA 1.0
+/// default domain file
+#define DOMAIN_FILE "param_domain.dat"
+/// default verbosity
+#define VERBOSITY 1
+
+/******************************************************************************/
+/// \brief Displays information about this program
+int usage(){
+ printf("usage: generate_quad [-h] [-r] [-d] [-g] [-x] [-p] [-a] [-b] [-s] [-v ]\n");
+ printf(" -h : print out this help message \n");
+ printf(" -r : use if building the next quadrature level on top of existing rule\n");
+ printf(" -d : define the data dimensionality (default=%d) \n",DIM);
+ printf(" -g : define the quad type, implemented 'CC','CCO','NC','NCO','LU','HG','JB','GLG','SW','pdf'. (default=%s) \n",QUADTYPE);
+ printf(" -x : define 'full' or 'sparse' (default=%s) \n",FSTYPE);
+ printf(" -p : define the level or nquad parameter(default=%d) \n",PARAM);
+ printf(" -a : define the alpha parameter of the quadrature (default=%lg) \n",ALPHA);
+ printf(" -b : define the beta parameter of the quadrature (default=%lg) \n",BETA);
+ printf(" -s : define the domain file for compact-support quadratures (default=%s) \n",DOMAIN_FILE);
+ printf(" -v : define verbosity 0-no output/1-output info (default=%d) \n",VERBOSITY);
+ printf("================================================================================\n");
+ printf("Input : If -r flagged, files qdpts.dat, wghts.dat, indices.dat required as quadrature will be built on top of them\n");
+ printf("Output : qdpts.dat, wghts.dat, indices.dat - quadrature points, weights, and indices w.r.t. default quadrature domain\n");
+ printf(" xqdpts.dat, xwghts.dat - quadrature points and weights w.r.t. given physical domain for compact domains,\n");
+ printf(" if the domain is given by -s\n");
+ printf(" *_new.dat - newly generated points/weights; if -r is not flagged these are the same as all points/wghts)\n");
+ printf("--------------------------------------------------------------------------------\n");
+ printf("Comments: -r flag may be activated only after a run with the SAME parameters, otherwise incremental addition does not make sense!\n");
+ printf("================================================================================\n");
+ exit(0);
+ return 0;
+}
+/******************************************************************************/
+
+
+
+/// Main program: Generates various kinds of quadrature points and weights
+int main (int argc, char *argv[])
+{
+ /// Set the default values
+ int verb = VERBOSITY ;
+ int ndim = DIM ;
+ char* quadType = (char *) QUADTYPE;
+ char* fsType = (char *) FSTYPE ;
+ int param = PARAM ;
+ double alpha = ALPHA;
+ double beta = BETA;
+ char* domain_file = (char *) DOMAIN_FILE;
+
+ /// Read the user input
+ int c;
+
+ bool rflag=false;
+ bool aflag=false;
+ bool bflag=false;
+ bool sflag=false;
+
+ while ((c=getopt(argc,(char **)argv,"hrd:g:x:p:a:b:s:v:"))!=-1){
+ switch (c) {
+ case 'h':
+ usage();
+ break;
+ case 'r':
+ rflag=true;
+ break;
+ case 'd':
+ ndim = strtol(optarg, (char **)NULL,0);
+ break;
+ case 'g':
+ quadType = optarg;
+ break;
+ case 'x':
+ fsType = optarg;
+ break;
+ case 'p':
+ param = strtol(optarg, (char **)NULL,0);
+ break;
+ case 'a':
+ aflag=true;
+ alpha = strtod(optarg, (char **)NULL);
+ break;
+ case 'b':
+ bflag=true;
+ beta = strtod(optarg, (char **)NULL);
+ break;
+ case 's':
+ sflag=true;
+ domain_file = optarg;
+ break;
+ case 'v':
+ verb = strtol(optarg, (char **)NULL,0);
+ break;
+ default :
+ break;
+ }
+ }
+
+ /// Print the input information on screen
+ if ( verb > 0 ) {
+ fprintf(stdout,"generate_quad() : parameters ================================= \n");
+ fprintf(stdout," ndim = %d \n",ndim);
+ fprintf(stdout," quadType = %s \n",quadType);
+ fprintf(stdout," fsType = %s \n",fsType);
+ fprintf(stdout," param = %d \n",param);
+ if (aflag)
+ fprintf(stdout," alpha = %lg \n",alpha);
+ if (bflag)
+ fprintf(stdout," beta = %lg \n",beta);
+ if (rflag)
+ fprintf(stdout,"generate_quad() : building on top of existing quad points\n");
+ if (sflag)
+ fprintf(stdout,"generate_quad() : domain file %s is provided\n",domain_file);
+ }
+ /*----------------------------------------------------------------------------*/
+
+ /// Parameter sanity checks
+ if (rflag && string(fsType)=="full")
+ throw Tantrum("Incremental addition makes sense only in the sparse mode!");
+ if (sflag && string(quadType)!="CC"
+ && string(quadType)!="CCO"
+ && string(quadType)!="NC"
+ && string(quadType)!="NCO"
+ && string(quadType)!="LU"
+ && string(quadType)!="JB")
+ throw Tantrum("Input domain should be provided only for compact-support quadratures!");
+
+
+ /// Declare the quadrature rule object
+ Array1D quadtypes(ndim,string(quadType));
+
+ Array1D alphas(ndim,alpha);
+ Array1D betas(ndim,beta);
+ Array1D params(ndim,param);
+
+ Quad spRule(quadtypes,fsType,params,alphas, betas);
+ spRule.SetVerbosity(verb);
+
+ // Declare arrays
+ Array1D newPtInd;
+ Array2D qdpts;
+ Array1D wghts;
+
+ spRule.SetRule();
+
+ // DEBUG
+ //Array1D ind;
+ //spRule.compressRule(ind);
+
+ /// Extract the properties of the rule
+ spRule.GetRule(qdpts,wghts);
+ int nQdpts=qdpts.XSize();
+
+
+ /// Write-out to files
+ write_datafile(qdpts,"qdpts.dat");
+ write_datafile_1d(wghts,"wghts.dat");
+
+ /// Scale if domain is provided
+ if (sflag){
+ /// Set the domain
+ Array1D aa(ndim,-1.e0);
+ Array1D bb(ndim,1.e0);
+ Array2D aabb(ndim,2,0.e0);
+
+ if(ifstream(domain_file)){
+ read_datafile(aabb,domain_file);
+ for (int i=0;i xqdpts(nQdpts,ndim);
+ // Array2D xqdpts_new(nNewQdpts,ndim);
+ Array1D xwghts(nQdpts);
+ // Array1D xwghts_new(nNewQdpts);
+
+ // Scale points according to the given domain
+ for(int it=0;it 0 ) {
+ //fprintf(stdout,"generate_quad() : generated %d new quadrature points\n",nNewQdpts);
+ fprintf(stdout,"generate_quad() : total number of quadrature points: %d\n",nQdpts);
+ fprintf(stdout,"generate_quad() : done ========================================\n");
+ }
+
+ return 0;
+}
+
+
diff --git a/cpp/app/gkpSparse/CMakeLists.txt b/cpp/app/gkpSparse/CMakeLists.txt
new file mode 100644
index 00000000..28c07ba5
--- /dev/null
+++ b/cpp/app/gkpSparse/CMakeLists.txt
@@ -0,0 +1,47 @@
+enable_language(Fortran)
+enable_language(CXX)
+
+add_executable (gkpSparse gkpSparse.cpp gkpclib.cpp gkpflib.f)
+
+target_link_libraries (gkpSparse uqtktools)
+target_link_libraries (gkpSparse uqtkarray)
+
+target_link_libraries (gkpSparse depdsfmt )
+target_link_libraries (gkpSparse deplapack)
+target_link_libraries (gkpSparse depblas )
+target_link_libraries (gkpSparse depfigtree )
+target_link_libraries (gkpSparse depann )
+
+# Link fortran libraries
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ if ("${GnuLibPath}" STREQUAL "")
+ target_link_libraries (gkpSparse gfortran stdc++)
+ else()
+ target_link_libraries (gkpSparse ${GnuLibPath}/libgfortran.a ${GnuLibPath}/libquadmath.a stdc++)
+ endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel C++
+ if ("${IntelLibPath}" STREQUAL "")
+ target_link_libraries (gkpSparse ifcore ifport)
+ else()
+ target_link_libraries (gkpSparse ${IntelLibPath}/libifcore.a ${IntelLibPath}/libifport.a)
+ endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ # using Clang
+ if ("${ClangLibPath}" STREQUAL "")
+ target_link_libraries (gkpSparse gfortran stdc++)
+ else()
+ target_link_libraries (gkpSparse ${ClangLibPath}/libgfortran.dylib ${ClangLibPath}/libquadmath.dylib ${ClangLibPath}/libstdc++.dylib)
+ endif()
+endif()
+
+include_directories(../../lib/include)
+include_directories(../../lib/tools )
+include_directories(../../lib/array )
+
+include_directories(../../../dep/dsfmt)
+include_directories(../../../dep/figtree)
+
+INSTALL(TARGETS gkpSparse DESTINATION bin)
+
diff --git a/cpp/app/gkpSparse/gkpSparse.cpp b/cpp/app/gkpSparse/gkpSparse.cpp
new file mode 100644
index 00000000..6736b6dd
--- /dev/null
+++ b/cpp/app/gkpSparse/gkpSparse.cpp
@@ -0,0 +1,164 @@
+/* =====================================================================================
+ The UQ Toolkit (UQTk) version 3.0.4
+ Copyright (2017) Sandia Corporation
+ http://www.sandia.gov/UQToolkit/
+
+ Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+ with Sandia Corporation, the U.S. Government retains certain rights in this software.
+
+ This file is part of The UQ Toolkit (UQTk)
+
+ UQTk is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ UQTk is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with UQTk. If not, see .
+
+ Questions? Contact Bert Debusschere
+ Sandia National Laboratories, Livermore, CA, USA
+===================================================================================== */
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "arrayio.h"
+#include "gkplib.h"
+#define NDIM 2
+#define NLEV 2
+#define VERBOSITY 1
+#define PDFTYPE "unif"
+
+/******************************************************************************/
+/// \brief Displays information about this program
+int usage(){
+ printf("usage: gkpSparse [-h] [-d] [-l] [-v ]\n");
+ printf(" -h : print out this help message \n");
+ printf(" -d : define the data dimensionality (default=%d) \n",NDIM);
+ printf(" -l : define the level or nquad parameter(default=%d) \n",NLEV);
+ printf(" -t : pdf type 'unif'/'norm'/'cc' (default=%s) \n",PDFTYPE);
+ printf(" -v : define verboosity 0-no output/1-output info (default=%d) \n",VERBOSITY);
+ printf("================================================================================\n");
+ printf("Output : qdpts.dat, wghts.dat - quadrature points, weights w.r.t. default quadrature domain\n");
+ printf("================================================================================\n");
+ exit(0);
+ return (0);
+}
+
+int main(int argc, char *argv[]) {
+
+ double *qpts=NULL, *w=NULL;
+ int dim=NDIM, lev=NLEV, verb=VERBOSITY, nqpts;
+ char *pdftype = (char *) PDFTYPE;
+ bool anisFlag = false;
+
+ /// Read the user input
+ int c;
+ while ((c=getopt(argc,(char **)argv,"had:l:v:t:"))!=-1){
+ switch (c) {
+ case 'h':
+ usage();
+ break;
+ case 'd':
+ dim = strtol(optarg, (char **)NULL,0);
+ break;
+ case 'l':
+ lev = strtol(optarg, (char **)NULL,0);
+ break;
+ case 'v':
+ verb = strtol(optarg, (char **)NULL,0);
+ break;
+ case 't':
+ pdftype = optarg;
+ break;
+ case 'a':
+ anisFlag = true;
+ break;
+ default :
+ break;
+ }
+ }
+
+ if ( verb > 0 ) {
+ fprintf(stdout,"gkpSparse : parameters ================================= \n");
+ fprintf(stdout," ndim = %d \n",dim);
+ fprintf(stdout," nlev = %d \n",lev);
+ fprintf(stdout," pdf = %s \n",pdftype);
+ fprintf(stdout," anis = %d \n",anisFlag);
+ fprintf(stdout," verb = %d \n",verb);
+ }
+
+
+ Array2D levList;
+ if (anisFlag) {
+ read_datafileVS(levList,"levList.dat");
+ assert(levList.XSize() == dim);
+ assert(levList.YSize() == 1 );
+ }
+
+ /* Get sparse quad */
+ if ( std::string(pdftype) == std::string("unif") ) {
+ if (anisFlag)
+ getSpgAnisQW ( getGKPunif, getOrderGKPunif, dim, levList.GetArrayPointer(), &nqpts, &qpts, &w ) ;
+ else
+ getSpgQW ( getGKPunif, getOrderGKPunif, dim, lev, &nqpts, &qpts, &w ) ;
+ }
+ else if ( std::string(pdftype) == std::string("norm") ) {
+ if (anisFlag)
+ getSpgAnisQW ( getGKPnorm, getOrderGKPnorm, dim, levList.GetArrayPointer(), &nqpts, &qpts, &w ) ;
+ else
+ getSpgQW ( getGKPnorm, getOrderGKPnorm, dim, lev, &nqpts, &qpts, &w ) ;
+ }
+ else if ( std::string(pdftype) == std::string("cc") ) {
+ if (anisFlag)
+ getSpgAnisQW ( getCC, getOrderCC, dim, levList.GetArrayPointer(), &nqpts, &qpts, &w ) ;
+ else
+ getSpgQW ( getCC, getOrderCC, dim, lev, &nqpts, &qpts, &w ) ;
+ }
+ else {
+ std::cout<<"Unknown quadrature type: "< 0 ) {
+ std::cout<<"No. of quadrature points: "<.
+
+ Questions? Contact Bert Debusschere
+ Sandia National Laboratories, Livermore, CA, USA
+===================================================================================== */
+#include "math.h"
+#include "tools.h"
+#include "gkplib.h"
+
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+
+
+/* (1) */
+static double x1[] = {0.0000000};
+static double w1[] = {2.0000000};
+
+/* (1+2) */
+static double x3[] = {-0.77459666924148337704,0.0, 0.77459666924148337704 };
+static double w3[] = {0.555555555555555555556,0.888888888888888888889,0.555555555555555555556};
+
+/* (1+2+4) */
+static double x7[] = {-0.96049126870802028342,-0.77459666924148337704,-0.43424374934680255800,
+ 0.0,
+ 0.43424374934680255800, 0.77459666924148337704, 0.96049126870802028342};
+static double w7[] = { 0.104656226026467265194,0.268488089868333440729,0.401397414775962222905,
+ 0.450916538658474142345,
+ 0.401397414775962222905,0.268488089868333440729,0.104656226026467265194};
+
+/* (1+2+4+8) */
+static double x15[] = {-0.99383196321275502221,-0.96049126870802028342,-0.88845923287225699889,
+ -0.77459666924148337704,-0.62110294673722640294,-0.43424374934680255800,
+ -0.22338668642896688163, 0.0, 0.22338668642896688163,
+ 0.43424374934680255800, 0.62110294673722640294, 0.77459666924148337704,
+ 0.88845923287225699889, 0.96049126870802028342, 0.99383196321275502221 };
+static double w15[] = {0.0170017196299402603390,0.0516032829970797396969,0.0929271953151245376859,
+ 0.134415255243784220360, 0.171511909136391380787, 0.200628529376989021034,
+ 0.219156858401587496404, 0.225510499798206687386, 0.219156858401587496404,
+ 0.200628529376989021034, 0.171511909136391380787, 0.134415255243784220360,
+ 0.0929271953151245376859,0.0516032829970797396969, 0.0170017196299402603390};
+
+/* (1+2+4+8+16) */
+static double x31[] = {-0.99909812496766759766,-0.99383196321275502221,-0.98153114955374010687,
+ -0.96049126870802028342,-0.92965485742974005667,-0.88845923287225699889,
+ -0.83672593816886873550,-0.77459666924148337704,-0.70249620649152707861,
+ -0.62110294673722640294,-0.53131974364437562397,-0.43424374934680255800,
+ -0.33113539325797683309,-0.22338668642896688163,-0.11248894313318662575,
+ 0.0,
+ 0.11248894313318662575, 0.22338668642896688163, 0.33113539325797683309,
+ 0.43424374934680255800, 0.53131974364437562397, 0.62110294673722640294,
+ 0.70249620649152707861, 0.77459666924148337704, 0.83672593816886873550,
+ 0.88845923287225699889, 0.92965485742974005667, 0.96049126870802028342,
+ 0.98153114955374010687, 0.99383196321275502221, 0.99909812496766759766 };
+static double w31[] = {0.00254478079156187441540,0.00843456573932110624631,0.0164460498543878109338,
+ 0.0258075980961766535646, 0.0359571033071293220968, 0.0464628932617579865414,
+ 0.0569795094941233574122, 0.0672077542959907035404, 0.0768796204990035310427,
+ 0.0857559200499903511542, 0.0936271099812644736167, 0.100314278611795578771,
+ 0.105669893580234809744, 0.109578421055924638237, 0.111956873020953456880,
+ 0.112755256720768691607,
+ 0.111956873020953456880, 0.109578421055924638237, 0.105669893580234809744,
+ 0.100314278611795578771, 0.0936271099812644736167, 0.0857559200499903511542,
+ 0.0768796204990035310427, 0.0672077542959907035404, 0.0569795094941233574122,
+ 0.0464628932617579865414, 0.0359571033071293220968, 0.0258075980961766535646,
+ 0.0164460498543878109338, 0.00843456573932110624631,0.00254478079156187441540 };
+
+/* (1+2+4+8+16+32) */
+static double x63[] = {-0.99987288812035761194,-0.99909812496766759766,-0.99720625937222195908,
+ -0.99383196321275502221,-0.98868475754742947994,-0.98153114955374010687,
+ -0.97218287474858179658,-0.96049126870802028342,-0.94634285837340290515,
+ -0.92965485742974005667,-0.91037115695700429250,-0.88845923287225699889,
+ -0.86390793819369047715,-0.83672593816886873550,-0.80694053195021761186,
+ -0.77459666924148337704,-0.73975604435269475868,-0.70249620649152707861,
+ -0.66290966002478059546,-0.62110294673722640294,-0.57719571005204581484,
+ -0.53131974364437562397,-0.48361802694584102756,-0.43424374934680255800,
+ -0.38335932419873034692,-0.33113539325797683309,-0.27774982202182431507,
+ -0.22338668642896688163,-0.16823525155220746498,-0.11248894313318662575,
+ -0.056344313046592789972,0.0, 0.056344313046592789972,
+ 0.11248894313318662575, 0.16823525155220746498, 0.22338668642896688163,
+ 0.27774982202182431507, 0.33113539325797683309, 0.38335932419873034692,
+ 0.43424374934680255800, 0.48361802694584102756, 0.53131974364437562397,
+ 0.57719571005204581484, 0.62110294673722640294, 0.66290966002478059546,
+ 0.70249620649152707861, 0.73975604435269475868, 0.77459666924148337704,
+ 0.80694053195021761186, 0.83672593816886873550, 0.86390793819369047715,
+ 0.88845923287225699889, 0.91037115695700429250, 0.92965485742974005667,
+ 0.94634285837340290515, 0.96049126870802028342, 0.97218287474858179658,
+ 0.98153114955374010687, 0.98868475754742947994, 0.99383196321275502221,
+ 0.99720625937222195908, 0.99909812496766759766, 0.99987288812035761194 };
+static double w63[] = {0.000363221481845530659694,0.00126515655623006801137,0.00257904979468568827243,
+ 0.00421763044155885483908, 0.00611550682211724633968,0.00822300795723592966926,
+ 0.0104982469096213218983, 0.0129038001003512656260, 0.0154067504665594978021,
+ 0.0179785515681282703329, 0.0205942339159127111492, 0.0232314466399102694433,
+ 0.0258696793272147469108, 0.0284897547458335486125, 0.0310735511116879648799,
+ 0.0336038771482077305417, 0.0360644327807825726401, 0.0384398102494555320386,
+ 0.0407155101169443189339, 0.0428779600250077344929, 0.0449145316536321974143,
+ 0.0468135549906280124026, 0.0485643304066731987159, 0.0501571393058995374137,
+ 0.0515832539520484587768, 0.0528349467901165198621, 0.0539054993352660639269,
+ 0.0547892105279628650322, 0.0554814043565593639878, 0.0559784365104763194076,
+ 0.0562776998312543012726, 0.0563776283603847173877, 0.0562776998312543012726,
+ 0.0559784365104763194076, 0.0554814043565593639878, 0.0547892105279628650322,
+ 0.0539054993352660639269, 0.0528349467901165198621, 0.0515832539520484587768,
+ 0.0501571393058995374137, 0.0485643304066731987159, 0.0468135549906280124026,
+ 0.0449145316536321974143, 0.0428779600250077344929, 0.0407155101169443189339,
+ 0.0384398102494555320386, 0.0360644327807825726401, 0.0336038771482077305417,
+ 0.0310735511116879648799, 0.0284897547458335486125, 0.0258696793272147469108,
+ 0.0232314466399102694433, 0.0205942339159127111492, 0.0179785515681282703329,
+ 0.0154067504665594978021, 0.0129038001003512656260, 0.0104982469096213218983,
+ 0.00822300795723592966926, 0.00611550682211724633968,0.00421763044155885483908,
+ 0.00257904979468568827243, 0.00126515655623006801137,0.000363221481845530659694 };
+
+/* (1) */
+static double xn1[] = {0.0000000000000000};
+static double wn1[] = {1.0000000000000000};
+
+/* (1+2) */
+static double xn3[] = {-1.73205080756887719, 0.000000000000000000, 1.73205080756887719};
+static double wn3[] = {0.166666666666666657, 0.66666666666666663, 0.166666666666666657};
+
+/* (1+2+6) */
+static double xn9[] = {-4.18495601767273229, -2.86127957605705818, -1.73205080756887719,
+ -0.741095349994540853, 0.00000000000000000, 0.741095349994540853,
+ 1.73205080756887719, 2.86127957605705818, 4.18495601767273229 };
+static double wn9[] = { 9.42694575565174701E-05, 0.00799632547089352934, 0.0948509485094851251,
+ 0.270074329577937755, 0.253968253968254065, 0.270074329577937755,
+ 0.0948509485094851251,0.00799632547089352934,9.42694575565174701E-05 };
+
+/* (1+2+6+10) */
+static double xn19[] = {-6.36339449433636961, -5.18701603991365623, -4.18495601767273229,
+ -3.20533379449919442, -2.86127957605705818, -2.59608311504920231,
+ -1.73205080756887719, -1.23042363402730603, -0.741095349994540853,
+ 0.0000000000000000,
+ 0.741095349994540853, 1.23042363402730603, 1.73205080756887719,
+ 2.59608311504920231, 2.86127957605705818, 3.20533379449919442,
+ 4.18495601767273229, 5.18701603991365623, 6.36339449433636961 };
+static double wn19[] = { 8.62968460222986318E-10, 6.09480873146898402E-07, 6.01233694598479965E-05,
+ 0.00288488043650675591, -0.00633722479337375712, 0.0180852342547984622,
+ 0.0640960546868076103, 0.0611517301252477163, 0.208324991649608771,
+ 0.303467199854206227,
+ 0.208324991649608771, 0.0611517301252477163, 0.0640960546868076103,
+ 0.0180852342547984622, -0.00633722479337375712, 0.00288488043650675591,
+ 6.01233694598479965E-05, 6.09480873146898402E-07,8.62968460222986318E-10 };
+
+/* (1+2+6+10+16) */
+static double xn35[] = {-9.0169397898903032, -7.98077179859056063, -7.12210670080461661,
+ -6.36339449433636961, -5.69817776848810986, -5.18701603991365623,
+ -4.73643308595229673, -4.18495601767273229, -3.63531851903727832,
+ -3.20533379449919442, -2.86127957605705818, -2.59608311504920231,
+ -2.23362606167694189, -1.73205080756887719, -1.23042363402730603,
+ -0.741095349994540853, -0.248992297579960609,
+ 0.00000000000000000,
+ 0.248992297579960609, 0.741095349994540853,
+ 1.23042363402730603, 1.73205080756887719, 2.23362606167694189,
+ 2.59608311504920231, 2.86127957605705818, 3.20533379449919442,
+ 3.63531851903727832, 4.18495601767273229, 4.73643308595229673,
+ 5.18701603991365623, 5.69817776848810986, 6.36339449433636961,
+ 7.12210670080461661, 7.98077179859056063, 9.0169397898903032 };
+static double wn35[] = { 1.05413265823340136E-18, 5.45004126506381281E-15, 3.09722235760629949E-12,
+ 4.60117603486559168E-10, 2.13941944795610622E-08, 2.46764213457981401E-07,
+ 2.73422068011878881E-06, 3.57293481989753322E-05, 0.000275242141167851312,
+ 0.000818953927502267349, 0.00231134524035220713, 0.00315544626918755127,
+ 0.015673473751851151, 0.0452736854651503914, 0.0923647267169863534,
+ 0.148070831155215854, 0.191760115888044341,
+ 0.000514894508069213769,
+ 0.191760115888044341, 0.148070831155215854,
+ 0.0923647267169863534, 0.0452736854651503914, 0.015673473751851151,
+ 0.00315544626918755127, 0.00231134524035220713, 0.000818953927502267349,
+ 0.000275242141167851312, 3.57293481989753322E-05, 2.73422068011878881E-06,
+ 2.46764213457981401E-07, 2.13941944795610622E-08, 4.60117603486559168E-10,
+ 3.09722235760629949E-12, 5.45004126506381281E-15, 1.05413265823340136E-18 };
+
+
+
+void getCC ( int n, int *nq, double **x, double **w ) {
+
+ if ((n-1)%2 != 0) std::cout<lev) lev=levList[j];
+
+ /* Initial estimate for number of quad points */
+ (*nqpts) = getSpgSize ( getOrder, dim, lev );
+#ifdef DEBUG
+ std::cout<<(*nqpts)<levList[j]) {
+ goodElem = false;
+ std::cout<0);
+ assert(spgSize>1);
+
+ int isgn=0, i1=0, j1=0, index=0;
+
+ do {
+
+ heap_ext_(&spgSize,&isgn,&i1,&j1,&index);
+ if (index < 0) {
+ isgn = 0;
+ for ( int j = 0; j < dim; j++ ) {
+ if ( qpts[(i1-1)*dim+j] < qpts[(j1-1)*dim+j] ) {
+ isgn = -1; break;
+ }
+ else if ( qpts[(j1-1)*dim+j] < qpts[(i1-1)*dim+j] ) {
+ isgn = 1;
+ break;
+ }
+ }
+ }
+
+ if (index > 0) {
+ double dtmp ;
+ for ( int j = 0; j < dim; j++ ) {
+ double dtmp = qpts[(i1-1)*dim+j];
+ qpts[(i1-1)*dim+j] = qpts[(j1-1)*dim+j];
+ qpts[(j1-1)*dim+j] = dtmp;
+ }
+ dtmp = w[i1-1];
+ w[i1-1] = w[j1-1];
+ w[j1-1] = dtmp;
+ }
+
+ } while (index != 0);
+
+
+ return;
+
+}
+
+void getTensorProd(int dim, double *qpts, double *w, int *spgSize, int *n1D,
+ double **x1D, double **w1D, double qfac) {
+
+ int n1 = 1, n2 = 1;
+ for (int i=1; i.
+c$$$
+c$$$ Questions? Contact Bert Debusschere
+c$$$ Sandia National Laboratories, Livermore, CA, USA
+c$$$=====================================================================================
+ subroutine heap_ext(n,isgn,i,j,index)
+
+ implicit none
+ integer n,isgn,i,j,index
+
+ integer l,l1,n1
+
+ common /hpwrk/ l,l1,n1
+
+ if (index) 90,10,80
+ 10 n1 = n
+ l = 1+n/2
+ 20 l = l-1
+ 30 l1 = l
+ 40 i = l1+l1
+ if (i-n1) 50,60,70
+ 50 j = i+1
+ index = -2
+ return
+ 60 j = l1
+ l1 = i
+ index = -1
+ return
+ 70 if (l.gt.1) goto 20
+ if (n1.eq.1) goto 110
+ i = n1
+ n1 = n1-1
+ j = 1
+ index = 1
+ return
+ 80 if (index-1) 30,30,40
+ 90 if (index.eq.-1) goto 100
+ if (isgn.lt.0) i=i+1
+ goto 60
+ 100 if (isgn.le.0) goto 70
+ index = 2
+ return
+ 110 index = 0
+ return
+ end
+
+
diff --git a/cpp/app/gkpSparse/gkplib.h b/cpp/app/gkpSparse/gkplib.h
new file mode 100644
index 00000000..792248cf
--- /dev/null
+++ b/cpp/app/gkpSparse/gkplib.h
@@ -0,0 +1,89 @@
+/* =====================================================================================
+ The UQ Toolkit (UQTk) version 3.0.4
+ Copyright (2017) Sandia Corporation
+ http://www.sandia.gov/UQToolkit/
+
+ Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+ with Sandia Corporation, the U.S. Government retains certain rights in this software.
+
+ This file is part of The UQ Toolkit (UQTk)
+
+ UQTk is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ UQTk is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with UQTk. If not, see .
+
+ Questions? Contact Bert Debusschere
+ Sandia National Laboratories, Livermore, CA, USA
+===================================================================================== */
+#ifndef GKPLIB
+#define GKPLIB
+
+/** \file gkplib.h
+ * Functions related to Gauss-Kronrod-Patterson sparse quadrature construction
+ */
+
+/// \brief retrieve pointers to 1D Clenshaw-Curtis rules
+void getCC ( int n, int *nq, double **x, double **w );
+
+/// \brief get order of Clenshaw-Curtis rules based on level
+int getOrderCC ( int lev ) ;
+
+/// \brief retrieve pointers to 1D Gauss-Kronrod-Patterson rules for
+/// uniform pdf based on the quadrature level
+void getGKPunif ( int n, int *nq, double **x, double **w );
+
+/// \brief retrieve pointers to 1D Kronrod-Patterson rules for
+/// normal pdf based on the quadrature level
+void getGKPnorm ( int n, int *nq, double **x, double **w );
+
+/// \brief get order of uniform Gauss-Kronrod-Patterson rules based on level
+int getOrderGKPunif ( int lev ) ;
+
+/// \brief get order of normal Gauss-Kronrod-Patterson rules based on level
+int getOrderGKPnorm ( int lev ) ;
+
+/// \brief List of decompositions of 'n' into 'dim' parts. The
+/// implementation is based on Algorithm 5 of Combinatorial Algorithms
+/// by Albert Nijenhuis, Herbert Wilf
+void getCompNintoDim(int n, int dim, int *nelem, int **plist) ;
+
+/// \brief Initial estimate for sparse grid size
+int getSpgSize ( int getOrder ( int ), int dim, int lev );
+
+/// \brief Sort sparse grid in lexicographical order
+void sortSpg ( int dim, int spgSize, double *qpts, double *w );
+
+/// \brief compute dim-dimensional tensor grid based a series of 1D rules
+void getTensorProd(int dim, double *qpts, double *w, int *spgSize, int *n1D,
+ double **x1D, double **w1D, double qfac);
+
+/// \brief Main function that connects the user setup for pdftype,
+/// dimensionality, and quadrature level and various pieces of the
+/// sparse quadrature algorithm employing Gauss-Kronrod-Patterson rules
+void getSpgQW ( void get1DQW ( int , int *, double **, double** ), int getOrder ( int ),
+ int dim, int lev, int *nqpts, double **qpts, double
+ **w );
+
+void getSpgAnisQW ( void get1DQW ( int , int *, double **, double** ), int getOrder ( int ),
+ int dim, int *levList, int *nqpts, double **qpts, double **w ) ;
+
+void getCC ( int n, int *nq, double **x, double **w );
+int getOrderCC ( int lev );
+
+/// brief Fortran function for sorting an array of items. The array
+/// operations happen outside this function, based on a series of
+/// flags passed between the user code and this function. This
+/// implementation is based on Algorithm 15 of Combinatorial Algorithms
+/// by Albert Nijenhuis, Herbert Wilf
+extern "C" void heap_ext_(const int *,const int *, int *, int *, int *);
+
+#endif
diff --git a/cpp/app/gp_regr/CMakeLists.txt b/cpp/app/gp_regr/CMakeLists.txt
new file mode 100644
index 00000000..90a033a5
--- /dev/null
+++ b/cpp/app/gp_regr/CMakeLists.txt
@@ -0,0 +1,64 @@
+project (UQTk)
+
+add_executable (gp_regr gp_regr.cpp)
+
+target_link_libraries (gp_regr uqtkgproc )
+target_link_libraries (gp_regr uqtkpce )
+target_link_libraries (gp_regr uqtkbcs )
+target_link_libraries (gp_regr uqtkquad )
+target_link_libraries (gp_regr uqtktools)
+target_link_libraries (gp_regr uqtkarray)
+
+target_link_libraries (gp_regr depdsfmt )
+target_link_libraries (gp_regr deplbfgs )
+target_link_libraries (gp_regr depcvode )
+target_link_libraries (gp_regr depnvec )
+target_link_libraries (gp_regr depslatec)
+target_link_libraries (gp_regr deplapack)
+target_link_libraries (gp_regr depblas )
+target_link_libraries (gp_regr depfigtree )
+target_link_libraries (gp_regr depann )
+
+# Link fortran libraries
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ # using GCC
+ if ("${GnuLibPath}" STREQUAL "")
+ target_link_libraries (gp_regr gfortran stdc++)
+ else()
+ target_link_libraries (gp_regr ${GnuLibPath}/libgfortran.a ${GnuLibPath}/libquadmath.a stdc++)
+ endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
+ # using Intel
+ if ("${IntelLibPath}" STREQUAL "")
+ target_link_libraries (gp_regr ifcore)
+ target_link_libraries (gp_regr ifport)
+ else()
+ target_link_libraries (gp_regr ${IntelLibPath}/libifcore.a)
+ target_link_libraries (gp_regr ${IntelLibPath}/libifport.a)
+ endif()
+elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ # using Clang
+ if ("${ClangLibPath}" STREQUAL "")
+ target_link_libraries (gp_regr gfortran stdc++)
+ else()
+ target_link_libraries (gp_regr ${ClangLibPath}/libgfortran.dylib ${ClangLibPath}/libquadmath.dylib ${ClangLibPath}/libstdc++.dylib)
+ endif()
+endif()
+
+include_directories(../../lib/pce )
+include_directories(../../lib/array )
+include_directories(../../lib/include)
+include_directories(../../lib/quad )
+include_directories(../../lib/tools )
+include_directories(../../lib/bcs )
+include_directories(../../lib/gproc )
+
+
+include_directories(../../../dep/dsfmt)
+include_directories(../../../dep/lbfgs)
+include_directories(../../../dep/figtree)
+include_directories(../../../dep/cvode-2.7.0/include)
+include_directories("${PROJECT_BINARY_DIR}/../../../dep/cvode-2.7.0/include")
+
+INSTALL(TARGETS gp_regr DESTINATION bin)
+
diff --git a/cpp/app/gp_regr/gp_regr.cpp b/cpp/app/gp_regr/gp_regr.cpp
new file mode 100644
index 00000000..5f92155c
--- /dev/null
+++ b/cpp/app/gp_regr/gp_regr.cpp
@@ -0,0 +1,296 @@
+/* =====================================================================================
+ The UQ Toolkit (UQTk) version 3.0.4
+ Copyright (2017) Sandia Corporation
+ http://www.sandia.gov/UQToolkit/
+
+ Copyright (2017) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000
+ with Sandia Corporation, the U.S. Government retains certain rights in this software.
+
+ This file is part of The UQ Toolkit (UQTk)
+
+ UQTk is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ UQTk is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with UQTk. If not, see .
+
+ Questions? Contact Bert Debusschere
+ Sandia National Laboratories, Livermore, CA, USA
+===================================================================================== */
+/// \file gp_regr.cpp
+/// \author K. Sargsyan 2015 -
+/// \brief Command-line utility for Gaussian Process regression
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "Array1D.h"
+#include "Array2D.h"
+
+#include "PCSet.h"
+#include "error_handlers.h"
+#include "ftndefs.h"
+#include "gen_defs.h"
+#include "assert.h"
+#include "quad.h"
+#include "gproc.h"
+
+#include "arrayio.h"
+#include "tools.h"
+#include "arraytools.h"
+#include "dsfmt_add.h"
+
+using namespace std;
+
+
+
+/// default x-file
+#define XFILE "xdata.dat"
+/// default y-file
+#define YFILE "ydata.dat"
+/// default flag to output mean (m), mean+std (ms) or mean+std+cov (msc)
+#define MSC "ms"
+/// default PC order
+#define ORD 3
+
+/******************************************************************************/
+
+/// Displays information about this program
+int usage(){
+ printf("usage: gp_regr [-h] [-x] [-y] [-m] [-t] [-o] [-l] [-w] [-s]\n");
+ printf(" -h : print out this help message \n");
+ printf(" -x : xdata filename, matrix Nxd (default=%s) \n",XFILE);
+ printf(" -y : ydata filename, matrix Nxe (default=%s) \n",YFILE);
+ printf(" -m : flag to determine whether only mean is needed or not (default=%s) \n",MSC);
+ printf(" -t