Index: applgrid/tags/applgrid-01-06-36/bin/Makefile.am
===================================================================
--- applgrid/tags/applgrid-01-06-36/bin/Makefile.am	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/bin/Makefile.am	(revision 2010)
@@ -0,0 +1,3 @@
+EXTRA_DIST = applgrid-config.in 
+bin_SCRIPTS = applgrid-config
+
Index: applgrid/tags/applgrid-01-06-36/bin/applgrid-config.in
===================================================================
--- applgrid/tags/applgrid-01-06-36/bin/applgrid-config.in	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/bin/applgrid-config.in	(revision 2010)
@@ -0,0 +1,60 @@
+#! /usr/bin/env bash
+
+# These variables need to exist
+prefix=@prefix@
+# datadir = @datadir@
+
+
+if [[ $# -eq 0 || -n $( echo $* | egrep -- "--help|-h" ) ]]; then
+    echo "applgrid-config: configuration tool for the APPLgrid"
+    echo "                 fast cross section convolution code"
+    echo "                 http://projects.hepforge.org/applgrid/"
+    echo
+    echo "Usage: applgrid-config [[--help|-h] | [--prefix] | [...]]"
+    echo "Options:"
+    echo "  --help | -h    : this help"
+    echo
+    echo "  --prefix       : installation prefix (cf. autoconf)"
+    echo "  --incdir       : path to the APPLgrid header directory"
+    echo "  --libdir       : path to the APPLgrid library directory"
+    echo "  --cxxflags     : compiler flags for the C preprocessor"
+    echo "  --ldflags      : compiler flags for the linker just for c code"
+    echo "  --ldfflags     : compiler flags for the linker including the fortan interface"
+    echo "  --share        : path to APPLgrid pdf conbination config files"
+    echo
+    echo "  --version      : release version number"
+fi
+
+OUT=""
+
+tmp=$( echo "$*" | egrep -- '--\<prefix\>')
+test -n "$tmp" && OUT="$OUT @prefix@"
+
+tmp=$( echo "$*" | egrep -- '--\<incdir\>')
+test -n "$tmp" && OUT="$OUT @includedir@"
+
+tmp=$( echo "$*" | egrep -- '--\<cxxflags\>')
+test -n "$tmp" && OUT="$OUT -I@includedir@  @ROOT_CXXFLAGS@ @HOPPET_CXXFLAGS@ @CXXFLAGS@ "
+
+tmp=$( echo "$*" | egrep -- '--\<libdir\>')
+test -n "$tmp" && OUT="$OUT @exec_prefix@@libdir@"
+
+
+tmp=$( echo "$*" | egrep -- '--\<ldfflags\>')
+test -n "$tmp" && OUT="$OUT -L@exec_prefix@@libdir@ -lfAPPLgrid -lAPPLgrid @LIBS@ @ROOT_LDFLAGS@ @HOPPET_LDFLAGS@ @FRTLLIB@" 
+
+tmp=$( echo "$*" | egrep -- '--\<ldcflags\>')
+test -n "$tmp" && OUT="$OUT -L@exec_prefix@@libdir@ -lAPPLgrid @LIBS@ @ROOT_LDFLAGS@ @HOPPET_LDFLAGS@ @FRTLLIB@"
+
+tmp=$( echo "$*" | egrep -- '--\<ldflags\>')
+test -n "$tmp" && OUT="$OUT -L@exec_prefix@@libdir@ -lAPPLgrid @LIBS@ @ROOT_LDFLAGS@ @HOPPET_LDFLAGS@ @FRTLLIB@"
+
+tmp=$( echo "$*" | egrep -- '--\<share\>')
+test -n "$tmp" && OUT="$OUT @datarootdir@/@PACKAGE@"
+
+
+## Version
+tmp=$( echo "$*" | egrep -- '--\<version\>')
+test -n "$tmp" && OUT="$OUT @PACKAGE_VERSION@"
+
+echo $OUT
Index: applgrid/tags/applgrid-01-06-36/bin/Makefile.in
===================================================================
--- applgrid/tags/applgrid-01-06-36/bin/Makefile.in	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/bin/Makefile.in	(revision 2010)
@@ -0,0 +1,524 @@
+# Makefile.in generated by automake 1.16.5 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2021 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+  if test -z '$(MAKELEVEL)'; then \
+    false; \
+  elif test -n '$(MAKE_HOST)'; then \
+    true; \
+  elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+    true; \
+  else \
+    false; \
+  fi; \
+}
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = bin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/amconfig.h
+CONFIG_CLEAN_FILES = applgrid-config
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(bindir)"
+SCRIPTS = $(bin_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/applgrid-config.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPPFLAGS = @CPPFLAGS@
+CSCOPE = @CSCOPE@
+CTAGS = @CTAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ETAGS = @ETAGS@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FRTLLIB = @FRTLLIB@
+GREP = @GREP@
+HOPPETPATH = @HOPPETPATH@
+HOPPET_CXXFLAGS = @HOPPET_CXXFLAGS@
+HOPPET_LDFLAGS = @HOPPET_LDFLAGS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LHAPDFPATH = @LHAPDFPATH@
+LHAPDF_CXXFLAGS = @LHAPDF_CXXFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+ROOTPATH = @ROOTPATH@
+ROOT_CXXFLAGS = @ROOT_CXXFLAGS@
+ROOT_LDFLAGS = @ROOT_LDFLAGS@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+USEROOT = @USEROOT@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+EXTRA_DIST = applgrid-config.in 
+bin_SCRIPTS = applgrid-config
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu bin/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu bin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+applgrid-config: $(top_builddir)/config.status $(srcdir)/applgrid-config.in
+	cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+install-binSCRIPTS: $(bin_SCRIPTS)
+	@$(NORMAL_INSTALL)
+	@list='$(bin_SCRIPTS)'; test -n "$(bindir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
+	fi; \
+	for p in $$list; do \
+	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+	  if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+	done | \
+	sed -e 'p;s,.*/,,;n' \
+	    -e 'h;s|.*|.|' \
+	    -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+	$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+	  { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+	    if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+	      if (++n[d] == $(am__install_max)) { \
+		print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+	    else { print "f", d "/" $$4, $$1 } } \
+	  END { for (d in files) print "f", d, files[d] }' | \
+	while read type dir files; do \
+	     if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+	     test -z "$$files" || { \
+	       echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+	       $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+	     } \
+	; done
+
+uninstall-binSCRIPTS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_SCRIPTS)'; test -n "$(bindir)" || exit 0; \
+	files=`for p in $$list; do echo "$$p"; done | \
+	       sed -e 's,.*/,,;$(transform)'`; \
+	dir='$(DESTDIR)$(bindir)'; $(am__uninstall_files_from_dir)
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+distdir: $(BUILT_SOURCES)
+	$(MAKE) $(AM_MAKEFLAGS) distdir-am
+
+distdir-am: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS)
+installdirs:
+	for dir in "$(DESTDIR)$(bindir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binSCRIPTS
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	cscopelist-am ctags-am distclean distclean-generic \
+	distclean-libtool distdir dvi dvi-am html html-am info info-am \
+	install install-am install-binSCRIPTS install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \
+	uninstall-am uninstall-binSCRIPTS
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
Index: applgrid/tags/applgrid-01-06-36/COPYING
===================================================================
--- applgrid/tags/applgrid-01-06-36/COPYING	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/COPYING	(revision 2010)
@@ -0,0 +1,674 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
Index: applgrid/tags/applgrid-01-06-36/amconfig.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/amconfig.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/amconfig.h	(revision 2010)
@@ -0,0 +1,82 @@
+/* amconfig.h.  Generated from amconfig.in by configure.  */
+/* amconfig.in.  Generated from configure.ac by autoheader.  */
+
+/* Define 0 if the cern root package is installed */
+#define HAVE_CERNROOT 1
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define 1 if hoppet is installed */
+#define HAVE_HOPPET 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define 1 if LHAPDF is installed */
+#define HAVE_LHAPDF 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdio.h> header file. */
+#define HAVE_STDIO_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the <utime.h> header file. */
+#define HAVE_UTIME_H 1
+
+/* Define to 1 if `utime(file, NULL)' sets file's timestamp to the present. */
+#define HAVE_UTIME_NULL 1
+
+/* LHAPDF version */
+#define LHAPDF_VERSION "6.4.1a"
+
+/* Define to the sub-directory where libtool stores uninstalled libraries. */
+#define LT_OBJDIR ".libs/"
+
+/* Name of package */
+#define PACKAGE "applgrid"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "sutt@cern.ch"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "APPLgrid"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "APPLgrid 1.6.36"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "applgrid"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL "https://applgrid.hepforge.org"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "1.6.36"
+
+/* Define to 1 if all of the C90 standard headers exist (not just the ones
+   required in a freestanding environment). This macro is provided for
+   backward compatibility; new code need not use it. */
+#define STDC_HEADERS 1
+
+/* Version number of package */
+#define VERSION "1.6.36"
Index: applgrid/tags/applgrid-01-06-36/Makefile.am
===================================================================
--- applgrid/tags/applgrid-01-06-36/Makefile.am	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/Makefile.am	(revision 2010)
@@ -0,0 +1,23 @@
+AUTOMAKE_OPTIONS = foreign 
+ACLOCAL  = aclocal
+AUTOMAKE = automake
+
+docdir   = ${datadir}/doc/${PACKAGE}
+dist_doc_DATA   = README INSTALL COPYING
+
+sharedir = ${datadir}/${PACKAGE}
+dist_share_DATA = share/*.config 
+
+if USE_ROOT
+install-data-am:
+	cp src/*.pcm ${libdir}
+endif
+
+clean-local:
+	rm -rf autom4te.cache
+
+SUBDIRS = src bin
+
+## Clean out SVN files and root dictionaries
+dist-hook:
+	rm -rf `find $(distdir) -name ".svn"` `find $(distdir) -name \*Dict\*`
Index: applgrid/tags/applgrid-01-06-36/missing
===================================================================
--- applgrid/tags/applgrid-01-06-36/missing	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/missing	(revision 2010)
@@ -0,0 +1,215 @@
+#! /bin/sh
+# Common wrapper for a few potentially missing GNU programs.
+
+scriptversion=2013-10-28.13; # UTC
+
+# Copyright (C) 1996-2014 Free Software Foundation, Inc.
+# Originally written by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+if test $# -eq 0; then
+  echo 1>&2 "Try '$0 --help' for more information"
+  exit 1
+fi
+
+case $1 in
+
+  --is-lightweight)
+    # Used by our autoconf macros to check whether the available missing
+    # script is modern enough.
+    exit 0
+    ;;
+
+  --run)
+    # Back-compat with the calling convention used by older automake.
+    shift
+    ;;
+
+  -h|--h|--he|--hel|--help)
+    echo "\
+$0 [OPTION]... PROGRAM [ARGUMENT]...
+
+Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due
+to PROGRAM being missing or too old.
+
+Options:
+  -h, --help      display this help and exit
+  -v, --version   output version information and exit
+
+Supported PROGRAM values:
+  aclocal   autoconf  autoheader   autom4te  automake  makeinfo
+  bison     yacc      flex         lex       help2man
+
+Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and
+'g' are ignored when checking the name.
+
+Send bug reports to <bug-automake@gnu.org>."
+    exit $?
+    ;;
+
+  -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
+    echo "missing $scriptversion (GNU Automake)"
+    exit $?
+    ;;
+
+  -*)
+    echo 1>&2 "$0: unknown '$1' option"
+    echo 1>&2 "Try '$0 --help' for more information"
+    exit 1
+    ;;
+
+esac
+
+# Run the given program, remember its exit status.
+"$@"; st=$?
+
+# If it succeeded, we are done.
+test $st -eq 0 && exit 0
+
+# Also exit now if we it failed (or wasn't found), and '--version' was
+# passed; such an option is passed most likely to detect whether the
+# program is present and works.
+case $2 in --version|--help) exit $st;; esac
+
+# Exit code 63 means version mismatch.  This often happens when the user
+# tries to use an ancient version of a tool on a file that requires a
+# minimum version.
+if test $st -eq 63; then
+  msg="probably too old"
+elif test $st -eq 127; then
+  # Program was missing.
+  msg="missing on your system"
+else
+  # Program was found and executed, but failed.  Give up.
+  exit $st
+fi
+
+perl_URL=http://www.perl.org/
+flex_URL=http://flex.sourceforge.net/
+gnu_software_URL=http://www.gnu.org/software
+
+program_details ()
+{
+  case $1 in
+    aclocal|automake)
+      echo "The '$1' program is part of the GNU Automake package:"
+      echo "<$gnu_software_URL/automake>"
+      echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:"
+      echo "<$gnu_software_URL/autoconf>"
+      echo "<$gnu_software_URL/m4/>"
+      echo "<$perl_URL>"
+      ;;
+    autoconf|autom4te|autoheader)
+      echo "The '$1' program is part of the GNU Autoconf package:"
+      echo "<$gnu_software_URL/autoconf/>"
+      echo "It also requires GNU m4 and Perl in order to run:"
+      echo "<$gnu_software_URL/m4/>"
+      echo "<$perl_URL>"
+      ;;
+  esac
+}
+
+give_advice ()
+{
+  # Normalize program name to check for.
+  normalized_program=`echo "$1" | sed '
+    s/^gnu-//; t
+    s/^gnu//; t
+    s/^g//; t'`
+
+  printf '%s\n' "'$1' is $msg."
+
+  configure_deps="'configure.ac' or m4 files included by 'configure.ac'"
+  case $normalized_program in
+    autoconf*)
+      echo "You should only need it if you modified 'configure.ac',"
+      echo "or m4 files included by it."
+      program_details 'autoconf'
+      ;;
+    autoheader*)
+      echo "You should only need it if you modified 'acconfig.h' or"
+      echo "$configure_deps."
+      program_details 'autoheader'
+      ;;
+    automake*)
+      echo "You should only need it if you modified 'Makefile.am' or"
+      echo "$configure_deps."
+      program_details 'automake'
+      ;;
+    aclocal*)
+      echo "You should only need it if you modified 'acinclude.m4' or"
+      echo "$configure_deps."
+      program_details 'aclocal'
+      ;;
+   autom4te*)
+      echo "You might have modified some maintainer files that require"
+      echo "the 'autom4te' program to be rebuilt."
+      program_details 'autom4te'
+      ;;
+    bison*|yacc*)
+      echo "You should only need it if you modified a '.y' file."
+      echo "You may want to install the GNU Bison package:"
+      echo "<$gnu_software_URL/bison/>"
+      ;;
+    lex*|flex*)
+      echo "You should only need it if you modified a '.l' file."
+      echo "You may want to install the Fast Lexical Analyzer package:"
+      echo "<$flex_URL>"
+      ;;
+    help2man*)
+      echo "You should only need it if you modified a dependency" \
+           "of a man page."
+      echo "You may want to install the GNU Help2man package:"
+      echo "<$gnu_software_URL/help2man/>"
+    ;;
+    makeinfo*)
+      echo "You should only need it if you modified a '.texi' file, or"
+      echo "any other file indirectly affecting the aspect of the manual."
+      echo "You might want to install the Texinfo package:"
+      echo "<$gnu_software_URL/texinfo/>"
+      echo "The spurious makeinfo call might also be the consequence of"
+      echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might"
+      echo "want to install GNU make:"
+      echo "<$gnu_software_URL/make/>"
+      ;;
+    *)
+      echo "You might have modified some files without having the proper"
+      echo "tools for further handling them.  Check the 'README' file, it"
+      echo "often tells you about the needed prerequisites for installing"
+      echo "this package.  You may also peek at any GNU archive site, in"
+      echo "case some other package contains this missing '$1' program."
+      ;;
+  esac
+}
+
+give_advice "$1" | sed -e '1s/^/WARNING: /' \
+                       -e '2,$s/^/         /' >&2
+
+# Propagate the correct exit status (expected to be 127 for a program
+# not found, 63 for a program that failed due to version mismatch).
+exit $st
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:

Property changes on: applgrid/tags/applgrid-01-06-36/missing
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: applgrid/tags/applgrid-01-06-36/aclocal.m4
===================================================================
--- applgrid/tags/applgrid-01-06-36/aclocal.m4	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/aclocal.m4	(revision 2010)
@@ -0,0 +1,10203 @@
+# generated automatically by aclocal 1.16.5 -*- Autoconf -*-
+
+# Copyright (C) 1996-2021 Free Software Foundation, Inc.
+
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
+m4_ifndef([AC_AUTOCONF_VERSION],
+  [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],,
+[m4_warning([this file was generated for autoconf 2.71.
+You have another version of autoconf.  It may work, but is not guaranteed to.
+If you have problems, you may need to regenerate the build system entirely.
+To do so, use the procedure documented by the package, typically 'autoreconf'.])])
+
+# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*-
+#
+#   Copyright (C) 1996-2001, 2003-2015 Free Software Foundation, Inc.
+#   Written by Gordon Matzigkeit, 1996
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+m4_define([_LT_COPYING], [dnl
+# Copyright (C) 2014 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# GNU Libtool is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of of the License, or
+# (at your option) any later version.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program or library that is built
+# using GNU Libtool, you may include this file under the  same
+# distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+])
+
+# serial 58 LT_INIT
+
+
+# LT_PREREQ(VERSION)
+# ------------------
+# Complain and exit if this libtool version is less that VERSION.
+m4_defun([LT_PREREQ],
+[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1,
+       [m4_default([$3],
+		   [m4_fatal([Libtool version $1 or higher is required],
+		             63)])],
+       [$2])])
+
+
+# _LT_CHECK_BUILDDIR
+# ------------------
+# Complain if the absolute build directory name contains unusual characters
+m4_defun([_LT_CHECK_BUILDDIR],
+[case `pwd` in
+  *\ * | *\	*)
+    AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;;
+esac
+])
+
+
+# LT_INIT([OPTIONS])
+# ------------------
+AC_DEFUN([LT_INIT],
+[AC_PREREQ([2.62])dnl We use AC_PATH_PROGS_FEATURE_CHECK
+AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
+AC_BEFORE([$0], [LT_LANG])dnl
+AC_BEFORE([$0], [LT_OUTPUT])dnl
+AC_BEFORE([$0], [LTDL_INIT])dnl
+m4_require([_LT_CHECK_BUILDDIR])dnl
+
+dnl Autoconf doesn't catch unexpanded LT_ macros by default:
+m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl
+m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl
+dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4
+dnl unless we require an AC_DEFUNed macro:
+AC_REQUIRE([LTOPTIONS_VERSION])dnl
+AC_REQUIRE([LTSUGAR_VERSION])dnl
+AC_REQUIRE([LTVERSION_VERSION])dnl
+AC_REQUIRE([LTOBSOLETE_VERSION])dnl
+m4_require([_LT_PROG_LTMAIN])dnl
+
+_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}])
+
+dnl Parse OPTIONS
+_LT_SET_OPTIONS([$0], [$1])
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS=$ltmain
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+AC_SUBST(LIBTOOL)dnl
+
+_LT_SETUP
+
+# Only expand once:
+m4_define([LT_INIT])
+])# LT_INIT
+
+# Old names:
+AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT])
+AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_PROG_LIBTOOL], [])
+dnl AC_DEFUN([AM_PROG_LIBTOOL], [])
+
+
+# _LT_PREPARE_CC_BASENAME
+# -----------------------
+m4_defun([_LT_PREPARE_CC_BASENAME], [
+# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
+func_cc_basename ()
+{
+    for cc_temp in @S|@*""; do
+      case $cc_temp in
+        compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;;
+        distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;;
+        \-*) ;;
+        *) break;;
+      esac
+    done
+    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+}
+])# _LT_PREPARE_CC_BASENAME
+
+
+# _LT_CC_BASENAME(CC)
+# -------------------
+# It would be clearer to call AC_REQUIREs from _LT_PREPARE_CC_BASENAME,
+# but that macro is also expanded into generated libtool script, which
+# arranges for $SED and $ECHO to be set by different means.
+m4_defun([_LT_CC_BASENAME],
+[m4_require([_LT_PREPARE_CC_BASENAME])dnl
+AC_REQUIRE([_LT_DECL_SED])dnl
+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl
+func_cc_basename $1
+cc_basename=$func_cc_basename_result
+])
+
+
+# _LT_FILEUTILS_DEFAULTS
+# ----------------------
+# It is okay to use these file commands and assume they have been set
+# sensibly after 'm4_require([_LT_FILEUTILS_DEFAULTS])'.
+m4_defun([_LT_FILEUTILS_DEFAULTS],
+[: ${CP="cp -f"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+])# _LT_FILEUTILS_DEFAULTS
+
+
+# _LT_SETUP
+# ---------
+m4_defun([_LT_SETUP],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl
+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl
+
+_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl
+dnl
+_LT_DECL([], [host_alias], [0], [The host system])dnl
+_LT_DECL([], [host], [0])dnl
+_LT_DECL([], [host_os], [0])dnl
+dnl
+_LT_DECL([], [build_alias], [0], [The build system])dnl
+_LT_DECL([], [build], [0])dnl
+_LT_DECL([], [build_os], [0])dnl
+dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([LT_PATH_LD])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+dnl
+AC_REQUIRE([AC_PROG_LN_S])dnl
+test -z "$LN_S" && LN_S="ln -s"
+_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl
+dnl
+AC_REQUIRE([LT_CMD_MAX_LEN])dnl
+_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl
+_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl
+dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl
+m4_require([_LT_CMD_RELOAD])dnl
+m4_require([_LT_CHECK_MAGIC_METHOD])dnl
+m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl
+m4_require([_LT_CMD_OLD_ARCHIVE])dnl
+m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+m4_require([_LT_WITH_SYSROOT])dnl
+m4_require([_LT_CMD_TRUNCATE])dnl
+
+_LT_CONFIG_LIBTOOL_INIT([
+# See if we are running on zsh, and set the options that allow our
+# commands through without removal of \ escapes INIT.
+if test -n "\${ZSH_VERSION+set}"; then
+   setopt NO_GLOB_SUBST
+fi
+])
+if test -n "${ZSH_VERSION+set}"; then
+   setopt NO_GLOB_SUBST
+fi
+
+_LT_CHECK_OBJDIR
+
+m4_require([_LT_TAG_COMPILER])dnl
+
+case $host_os in
+aix3*)
+  # AIX sometimes has problems with the GCC collect2 program.  For some
+  # reason, if we set the COLLECT_NAMES environment variable, the problems
+  # vanish in a puff of smoke.
+  if test set != "${COLLECT_NAMES+set}"; then
+    COLLECT_NAMES=
+    export COLLECT_NAMES
+  fi
+  ;;
+esac
+
+# Global variables:
+ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a '.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+old_CC=$CC
+old_CFLAGS=$CFLAGS
+
+# Set sane defaults for various variables
+test -z "$CC" && CC=cc
+test -z "$LTCC" && LTCC=$CC
+test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+test -z "$LD" && LD=ld
+test -z "$ac_objext" && ac_objext=o
+
+_LT_CC_BASENAME([$compiler])
+
+# Only perform the check for file, if the check method requires it
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+case $deplibs_check_method in
+file_magic*)
+  if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+    _LT_PATH_MAGIC
+  fi
+  ;;
+esac
+
+# Use C for the default configuration in the libtool script
+LT_SUPPORTED_TAG([CC])
+_LT_LANG_C_CONFIG
+_LT_LANG_DEFAULT_CONFIG
+_LT_CONFIG_COMMANDS
+])# _LT_SETUP
+
+
+# _LT_PREPARE_SED_QUOTE_VARS
+# --------------------------
+# Define a few sed substitution that help us do robust quoting.
+m4_defun([_LT_PREPARE_SED_QUOTE_VARS],
+[# Backslashify metacharacters that are still active within
+# double-quoted strings.
+sed_quote_subst='s/\([["`$\\]]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([["`\\]]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+])
+
+# _LT_PROG_LTMAIN
+# ---------------
+# Note that this code is called both from 'configure', and 'config.status'
+# now that we use AC_CONFIG_COMMANDS to generate libtool.  Notably,
+# 'config.status' has no value for ac_aux_dir unless we are using Automake,
+# so we pass a copy along to make sure it has a sensible value anyway.
+m4_defun([_LT_PROG_LTMAIN],
+[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl
+_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir'])
+ltmain=$ac_aux_dir/ltmain.sh
+])# _LT_PROG_LTMAIN
+
+
+
+# So that we can recreate a full libtool script including additional
+# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS
+# in macros and then make a single call at the end using the 'libtool'
+# label.
+
+
+# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS])
+# ----------------------------------------
+# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+m4_define([_LT_CONFIG_LIBTOOL_INIT],
+[m4_ifval([$1],
+          [m4_append([_LT_OUTPUT_LIBTOOL_INIT],
+                     [$1
+])])])
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_INIT])
+
+
+# _LT_CONFIG_LIBTOOL([COMMANDS])
+# ------------------------------
+# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+m4_define([_LT_CONFIG_LIBTOOL],
+[m4_ifval([$1],
+          [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS],
+                     [$1
+])])])
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS])
+
+
+# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS])
+# -----------------------------------------------------
+m4_defun([_LT_CONFIG_SAVE_COMMANDS],
+[_LT_CONFIG_LIBTOOL([$1])
+_LT_CONFIG_LIBTOOL_INIT([$2])
+])
+
+
+# _LT_FORMAT_COMMENT([COMMENT])
+# -----------------------------
+# Add leading comment marks to the start of each line, and a trailing
+# full-stop to the whole comment if one is not present already.
+m4_define([_LT_FORMAT_COMMENT],
+[m4_ifval([$1], [
+m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])],
+              [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.])
+)])
+
+
+
+
+
+# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?])
+# -------------------------------------------------------------------
+# CONFIGNAME is the name given to the value in the libtool script.
+# VARNAME is the (base) name used in the configure script.
+# VALUE may be 0, 1 or 2 for a computed quote escaped value based on
+# VARNAME.  Any other value will be used directly.
+m4_define([_LT_DECL],
+[lt_if_append_uniq([lt_decl_varnames], [$2], [, ],
+    [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name],
+	[m4_ifval([$1], [$1], [$2])])
+    lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3])
+    m4_ifval([$4],
+	[lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])])
+    lt_dict_add_subkey([lt_decl_dict], [$2],
+	[tagged?], [m4_ifval([$5], [yes], [no])])])
+])
+
+
+# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION])
+# --------------------------------------------------------
+m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])])
+
+
+# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...])
+# ------------------------------------------------
+m4_define([lt_decl_tag_varnames],
+[_lt_decl_filter([tagged?], [yes], $@)])
+
+
+# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..])
+# ---------------------------------------------------------
+m4_define([_lt_decl_filter],
+[m4_case([$#],
+  [0], [m4_fatal([$0: too few arguments: $#])],
+  [1], [m4_fatal([$0: too few arguments: $#: $1])],
+  [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)],
+  [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)],
+  [lt_dict_filter([lt_decl_dict], $@)])[]dnl
+])
+
+
+# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...])
+# --------------------------------------------------
+m4_define([lt_decl_quote_varnames],
+[_lt_decl_filter([value], [1], $@)])
+
+
+# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...])
+# ---------------------------------------------------
+m4_define([lt_decl_dquote_varnames],
+[_lt_decl_filter([value], [2], $@)])
+
+
+# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...])
+# ---------------------------------------------------
+m4_define([lt_decl_varnames_tagged],
+[m4_assert([$# <= 2])dnl
+_$0(m4_quote(m4_default([$1], [[, ]])),
+    m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]),
+    m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))])
+m4_define([_lt_decl_varnames_tagged],
+[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])])
+
+
+# lt_decl_all_varnames([SEPARATOR], [VARNAME1...])
+# ------------------------------------------------
+m4_define([lt_decl_all_varnames],
+[_$0(m4_quote(m4_default([$1], [[, ]])),
+     m4_if([$2], [],
+	   m4_quote(lt_decl_varnames),
+	m4_quote(m4_shift($@))))[]dnl
+])
+m4_define([_lt_decl_all_varnames],
+[lt_join($@, lt_decl_varnames_tagged([$1],
+			lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl
+])
+
+
+# _LT_CONFIG_STATUS_DECLARE([VARNAME])
+# ------------------------------------
+# Quote a variable value, and forward it to 'config.status' so that its
+# declaration there will have the same value as in 'configure'.  VARNAME
+# must have a single quote delimited value for this to work.
+m4_define([_LT_CONFIG_STATUS_DECLARE],
+[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`'])
+
+
+# _LT_CONFIG_STATUS_DECLARATIONS
+# ------------------------------
+# We delimit libtool config variables with single quotes, so when
+# we write them to config.status, we have to be sure to quote all
+# embedded single quotes properly.  In configure, this macro expands
+# each variable declared with _LT_DECL (and _LT_TAGDECL) into:
+#
+#    <var>='`$ECHO "$<var>" | $SED "$delay_single_quote_subst"`'
+m4_defun([_LT_CONFIG_STATUS_DECLARATIONS],
+[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames),
+    [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])])
+
+
+# _LT_LIBTOOL_TAGS
+# ----------------
+# Output comment and list of tags supported by the script
+m4_defun([_LT_LIBTOOL_TAGS],
+[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl
+available_tags='_LT_TAGS'dnl
+])
+
+
+# _LT_LIBTOOL_DECLARE(VARNAME, [TAG])
+# -----------------------------------
+# Extract the dictionary values for VARNAME (optionally with TAG) and
+# expand to a commented shell variable setting:
+#
+#    # Some comment about what VAR is for.
+#    visible_name=$lt_internal_name
+m4_define([_LT_LIBTOOL_DECLARE],
+[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1],
+					   [description])))[]dnl
+m4_pushdef([_libtool_name],
+    m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl
+m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])),
+    [0], [_libtool_name=[$]$1],
+    [1], [_libtool_name=$lt_[]$1],
+    [2], [_libtool_name=$lt_[]$1],
+    [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl
+m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl
+])
+
+
+# _LT_LIBTOOL_CONFIG_VARS
+# -----------------------
+# Produce commented declarations of non-tagged libtool config variables
+# suitable for insertion in the LIBTOOL CONFIG section of the 'libtool'
+# script.  Tagged libtool config variables (even for the LIBTOOL CONFIG
+# section) are produced by _LT_LIBTOOL_TAG_VARS.
+m4_defun([_LT_LIBTOOL_CONFIG_VARS],
+[m4_foreach([_lt_var],
+    m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)),
+    [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])])
+
+
+# _LT_LIBTOOL_TAG_VARS(TAG)
+# -------------------------
+m4_define([_LT_LIBTOOL_TAG_VARS],
+[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames),
+    [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])])
+
+
+# _LT_TAGVAR(VARNAME, [TAGNAME])
+# ------------------------------
+m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])])
+
+
+# _LT_CONFIG_COMMANDS
+# -------------------
+# Send accumulated output to $CONFIG_STATUS.  Thanks to the lists of
+# variables for single and double quote escaping we saved from calls
+# to _LT_DECL, we can put quote escaped variables declarations
+# into 'config.status', and then the shell code to quote escape them in
+# for loops in 'config.status'.  Finally, any additional code accumulated
+# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded.
+m4_defun([_LT_CONFIG_COMMANDS],
+[AC_PROVIDE_IFELSE([LT_OUTPUT],
+	dnl If the libtool generation code has been placed in $CONFIG_LT,
+	dnl instead of duplicating it all over again into config.status,
+	dnl then we will have config.status run $CONFIG_LT later, so it
+	dnl needs to know what name is stored there:
+        [AC_CONFIG_COMMANDS([libtool],
+            [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])],
+    dnl If the libtool generation code is destined for config.status,
+    dnl expand the accumulated commands and init code now:
+    [AC_CONFIG_COMMANDS([libtool],
+        [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])])
+])#_LT_CONFIG_COMMANDS
+
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT],
+[
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+sed_quote_subst='$sed_quote_subst'
+double_quote_subst='$double_quote_subst'
+delay_variable_subst='$delay_variable_subst'
+_LT_CONFIG_STATUS_DECLARATIONS
+LTCC='$LTCC'
+LTCFLAGS='$LTCFLAGS'
+compiler='$compiler_DEFAULT'
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+\$[]1
+_LTECHO_EOF'
+}
+
+# Quote evaled strings.
+for var in lt_decl_all_varnames([[ \
+]], lt_decl_quote_varnames); do
+    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+    *[[\\\\\\\`\\"\\\$]]*)
+      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
+      ;;
+    *)
+      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+      ;;
+    esac
+done
+
+# Double-quote double-evaled strings.
+for var in lt_decl_all_varnames([[ \
+]], lt_decl_dquote_varnames); do
+    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+    *[[\\\\\\\`\\"\\\$]]*)
+      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
+      ;;
+    *)
+      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+      ;;
+    esac
+done
+
+_LT_OUTPUT_LIBTOOL_INIT
+])
+
+# _LT_GENERATED_FILE_INIT(FILE, [COMMENT])
+# ------------------------------------
+# Generate a child script FILE with all initialization necessary to
+# reuse the environment learned by the parent script, and make the
+# file executable.  If COMMENT is supplied, it is inserted after the
+# '#!' sequence but before initialization text begins.  After this
+# macro, additional text can be appended to FILE to form the body of
+# the child script.  The macro ends with non-zero status if the
+# file could not be fully written (such as if the disk is full).
+m4_ifdef([AS_INIT_GENERATED],
+[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])],
+[m4_defun([_LT_GENERATED_FILE_INIT],
+[m4_require([AS_PREPARE])]dnl
+[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl
+[lt_write_fail=0
+cat >$1 <<_ASEOF || lt_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+$2
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$1 <<\_ASEOF || lt_write_fail=1
+AS_SHELL_SANITIZE
+_AS_PREPARE
+exec AS_MESSAGE_FD>&1
+_ASEOF
+test 0 = "$lt_write_fail" && chmod +x $1[]dnl
+m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT
+
+# LT_OUTPUT
+# ---------
+# This macro allows early generation of the libtool script (before
+# AC_OUTPUT is called), incase it is used in configure for compilation
+# tests.
+AC_DEFUN([LT_OUTPUT],
+[: ${CONFIG_LT=./config.lt}
+AC_MSG_NOTICE([creating $CONFIG_LT])
+_LT_GENERATED_FILE_INIT(["$CONFIG_LT"],
+[# Run this file to recreate a libtool stub with the current configuration.])
+
+cat >>"$CONFIG_LT" <<\_LTEOF
+lt_cl_silent=false
+exec AS_MESSAGE_LOG_FD>>config.log
+{
+  echo
+  AS_BOX([Running $as_me.])
+} >&AS_MESSAGE_LOG_FD
+
+lt_cl_help="\
+'$as_me' creates a local libtool stub from the current configuration,
+for use in further configure time tests before the real libtool is
+generated.
+
+Usage: $[0] [[OPTIONS]]
+
+  -h, --help      print this help, then exit
+  -V, --version   print version number, then exit
+  -q, --quiet     do not print progress messages
+  -d, --debug     don't remove temporary files
+
+Report bugs to <bug-libtool@gnu.org>."
+
+lt_cl_version="\
+m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl
+m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION])
+configured by $[0], generated by m4_PACKAGE_STRING.
+
+Copyright (C) 2011 Free Software Foundation, Inc.
+This config.lt script is free software; the Free Software Foundation
+gives unlimited permision to copy, distribute and modify it."
+
+while test 0 != $[#]
+do
+  case $[1] in
+    --version | --v* | -V )
+      echo "$lt_cl_version"; exit 0 ;;
+    --help | --h* | -h )
+      echo "$lt_cl_help"; exit 0 ;;
+    --debug | --d* | -d )
+      debug=: ;;
+    --quiet | --q* | --silent | --s* | -q )
+      lt_cl_silent=: ;;
+
+    -*) AC_MSG_ERROR([unrecognized option: $[1]
+Try '$[0] --help' for more information.]) ;;
+
+    *) AC_MSG_ERROR([unrecognized argument: $[1]
+Try '$[0] --help' for more information.]) ;;
+  esac
+  shift
+done
+
+if $lt_cl_silent; then
+  exec AS_MESSAGE_FD>/dev/null
+fi
+_LTEOF
+
+cat >>"$CONFIG_LT" <<_LTEOF
+_LT_OUTPUT_LIBTOOL_COMMANDS_INIT
+_LTEOF
+
+cat >>"$CONFIG_LT" <<\_LTEOF
+AC_MSG_NOTICE([creating $ofile])
+_LT_OUTPUT_LIBTOOL_COMMANDS
+AS_EXIT(0)
+_LTEOF
+chmod +x "$CONFIG_LT"
+
+# configure is writing to config.log, but config.lt does its own redirection,
+# appending to config.log, which fails on DOS, as config.log is still kept
+# open by configure.  Here we exec the FD to /dev/null, effectively closing
+# config.log, so it can be properly (re)opened and appended to by config.lt.
+lt_cl_success=:
+test yes = "$silent" &&
+  lt_config_lt_args="$lt_config_lt_args --quiet"
+exec AS_MESSAGE_LOG_FD>/dev/null
+$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false
+exec AS_MESSAGE_LOG_FD>>config.log
+$lt_cl_success || AS_EXIT(1)
+])# LT_OUTPUT
+
+
+# _LT_CONFIG(TAG)
+# ---------------
+# If TAG is the built-in tag, create an initial libtool script with a
+# default configuration from the untagged config vars.  Otherwise add code
+# to config.status for appending the configuration named by TAG from the
+# matching tagged config vars.
+m4_defun([_LT_CONFIG],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+_LT_CONFIG_SAVE_COMMANDS([
+  m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl
+  m4_if(_LT_TAG, [C], [
+    # See if we are running on zsh, and set the options that allow our
+    # commands through without removal of \ escapes.
+    if test -n "${ZSH_VERSION+set}"; then
+      setopt NO_GLOB_SUBST
+    fi
+
+    cfgfile=${ofile}T
+    trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+    $RM "$cfgfile"
+
+    cat <<_LT_EOF >> "$cfgfile"
+#! $SHELL
+# Generated automatically by $as_me ($PACKAGE) $VERSION
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+
+# Provide generalized library-building support services.
+# Written by Gordon Matzigkeit, 1996
+
+_LT_COPYING
+_LT_LIBTOOL_TAGS
+
+# Configured defaults for sys_lib_dlsearch_path munging.
+: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"}
+
+# ### BEGIN LIBTOOL CONFIG
+_LT_LIBTOOL_CONFIG_VARS
+_LT_LIBTOOL_TAG_VARS
+# ### END LIBTOOL CONFIG
+
+_LT_EOF
+
+    cat <<'_LT_EOF' >> "$cfgfile"
+
+# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE
+
+_LT_PREPARE_MUNGE_PATH_LIST
+_LT_PREPARE_CC_BASENAME
+
+# ### END FUNCTIONS SHARED WITH CONFIGURE
+
+_LT_EOF
+
+  case $host_os in
+  aix3*)
+    cat <<\_LT_EOF >> "$cfgfile"
+# AIX sometimes has problems with the GCC collect2 program.  For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test set != "${COLLECT_NAMES+set}"; then
+  COLLECT_NAMES=
+  export COLLECT_NAMES
+fi
+_LT_EOF
+    ;;
+  esac
+
+  _LT_PROG_LTMAIN
+
+  # We use sed instead of cat because bash on DJGPP gets confused if
+  # if finds mixed CR/LF and LF-only lines.  Since sed operates in
+  # text mode, it properly converts lines to CR/LF.  This bash problem
+  # is reportedly fixed, but why not run on old versions too?
+  sed '$q' "$ltmain" >> "$cfgfile" \
+     || (rm -f "$cfgfile"; exit 1)
+
+   mv -f "$cfgfile" "$ofile" ||
+    (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+  chmod +x "$ofile"
+],
+[cat <<_LT_EOF >> "$ofile"
+
+dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded
+dnl in a comment (ie after a #).
+# ### BEGIN LIBTOOL TAG CONFIG: $1
+_LT_LIBTOOL_TAG_VARS(_LT_TAG)
+# ### END LIBTOOL TAG CONFIG: $1
+_LT_EOF
+])dnl /m4_if
+],
+[m4_if([$1], [], [
+    PACKAGE='$PACKAGE'
+    VERSION='$VERSION'
+    RM='$RM'
+    ofile='$ofile'], [])
+])dnl /_LT_CONFIG_SAVE_COMMANDS
+])# _LT_CONFIG
+
+
+# LT_SUPPORTED_TAG(TAG)
+# ---------------------
+# Trace this macro to discover what tags are supported by the libtool
+# --tag option, using:
+#    autoconf --trace 'LT_SUPPORTED_TAG:$1'
+AC_DEFUN([LT_SUPPORTED_TAG], [])
+
+
+# C support is built-in for now
+m4_define([_LT_LANG_C_enabled], [])
+m4_define([_LT_TAGS], [])
+
+
+# LT_LANG(LANG)
+# -------------
+# Enable libtool support for the given language if not already enabled.
+AC_DEFUN([LT_LANG],
+[AC_BEFORE([$0], [LT_OUTPUT])dnl
+m4_case([$1],
+  [C],			[_LT_LANG(C)],
+  [C++],		[_LT_LANG(CXX)],
+  [Go],			[_LT_LANG(GO)],
+  [Java],		[_LT_LANG(GCJ)],
+  [Fortran 77],		[_LT_LANG(F77)],
+  [Fortran],		[_LT_LANG(FC)],
+  [Windows Resource],	[_LT_LANG(RC)],
+  [m4_ifdef([_LT_LANG_]$1[_CONFIG],
+    [_LT_LANG($1)],
+    [m4_fatal([$0: unsupported language: "$1"])])])dnl
+])# LT_LANG
+
+
+# _LT_LANG(LANGNAME)
+# ------------------
+m4_defun([_LT_LANG],
+[m4_ifdef([_LT_LANG_]$1[_enabled], [],
+  [LT_SUPPORTED_TAG([$1])dnl
+  m4_append([_LT_TAGS], [$1 ])dnl
+  m4_define([_LT_LANG_]$1[_enabled], [])dnl
+  _LT_LANG_$1_CONFIG($1)])dnl
+])# _LT_LANG
+
+
+m4_ifndef([AC_PROG_GO], [
+# NOTE: This macro has been submitted for inclusion into   #
+#  GNU Autoconf as AC_PROG_GO.  When it is available in    #
+#  a released version of Autoconf we should remove this    #
+#  macro and use it instead.                               #
+m4_defun([AC_PROG_GO],
+[AC_LANG_PUSH(Go)dnl
+AC_ARG_VAR([GOC],     [Go compiler command])dnl
+AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl
+_AC_ARG_VAR_LDFLAGS()dnl
+AC_CHECK_TOOL(GOC, gccgo)
+if test -z "$GOC"; then
+  if test -n "$ac_tool_prefix"; then
+    AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo])
+  fi
+fi
+if test -z "$GOC"; then
+  AC_CHECK_PROG(GOC, gccgo, gccgo, false)
+fi
+])#m4_defun
+])#m4_ifndef
+
+
+# _LT_LANG_DEFAULT_CONFIG
+# -----------------------
+m4_defun([_LT_LANG_DEFAULT_CONFIG],
+[AC_PROVIDE_IFELSE([AC_PROG_CXX],
+  [LT_LANG(CXX)],
+  [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])])
+
+AC_PROVIDE_IFELSE([AC_PROG_F77],
+  [LT_LANG(F77)],
+  [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])])
+
+AC_PROVIDE_IFELSE([AC_PROG_FC],
+  [LT_LANG(FC)],
+  [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])])
+
+dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal
+dnl pulling things in needlessly.
+AC_PROVIDE_IFELSE([AC_PROG_GCJ],
+  [LT_LANG(GCJ)],
+  [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],
+    [LT_LANG(GCJ)],
+    [AC_PROVIDE_IFELSE([LT_PROG_GCJ],
+      [LT_LANG(GCJ)],
+      [m4_ifdef([AC_PROG_GCJ],
+	[m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])])
+       m4_ifdef([A][M_PROG_GCJ],
+	[m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])])
+       m4_ifdef([LT_PROG_GCJ],
+	[m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])])
+
+AC_PROVIDE_IFELSE([AC_PROG_GO],
+  [LT_LANG(GO)],
+  [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])])
+
+AC_PROVIDE_IFELSE([LT_PROG_RC],
+  [LT_LANG(RC)],
+  [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])])
+])# _LT_LANG_DEFAULT_CONFIG
+
+# Obsolete macros:
+AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)])
+AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)])
+AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)])
+AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)])
+AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_CXX], [])
+dnl AC_DEFUN([AC_LIBTOOL_F77], [])
+dnl AC_DEFUN([AC_LIBTOOL_FC], [])
+dnl AC_DEFUN([AC_LIBTOOL_GCJ], [])
+dnl AC_DEFUN([AC_LIBTOOL_RC], [])
+
+
+# _LT_TAG_COMPILER
+# ----------------
+m4_defun([_LT_TAG_COMPILER],
+[AC_REQUIRE([AC_PROG_CC])dnl
+
+_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl
+_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl
+_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl
+_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+])# _LT_TAG_COMPILER
+
+
+# _LT_COMPILER_BOILERPLATE
+# ------------------------
+# Check for compiler boilerplate output or warnings with
+# the simple compiler test code.
+m4_defun([_LT_COMPILER_BOILERPLATE],
+[m4_require([_LT_DECL_SED])dnl
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+])# _LT_COMPILER_BOILERPLATE
+
+
+# _LT_LINKER_BOILERPLATE
+# ----------------------
+# Check for linker boilerplate output or warnings with
+# the simple link test code.
+m4_defun([_LT_LINKER_BOILERPLATE],
+[m4_require([_LT_DECL_SED])dnl
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+])# _LT_LINKER_BOILERPLATE
+
+# _LT_REQUIRED_DARWIN_CHECKS
+# -------------------------
+m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
+  case $host_os in
+    rhapsody* | darwin*)
+    AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:])
+    AC_CHECK_TOOL([NMEDIT], [nmedit], [:])
+    AC_CHECK_TOOL([LIPO], [lipo], [:])
+    AC_CHECK_TOOL([OTOOL], [otool], [:])
+    AC_CHECK_TOOL([OTOOL64], [otool64], [:])
+    _LT_DECL([], [DSYMUTIL], [1],
+      [Tool to manipulate archived DWARF debug symbol files on Mac OS X])
+    _LT_DECL([], [NMEDIT], [1],
+      [Tool to change global to local symbols on Mac OS X])
+    _LT_DECL([], [LIPO], [1],
+      [Tool to manipulate fat objects and archives on Mac OS X])
+    _LT_DECL([], [OTOOL], [1],
+      [ldd/readelf like tool for Mach-O binaries on Mac OS X])
+    _LT_DECL([], [OTOOL64], [1],
+      [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4])
+
+    AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod],
+      [lt_cv_apple_cc_single_mod=no
+      if test -z "$LT_MULTI_MODULE"; then
+	# By default we will add the -single_module flag. You can override
+	# by either setting the environment variable LT_MULTI_MODULE
+	# non-empty at configure time, or by adding -multi_module to the
+	# link flags.
+	rm -rf libconftest.dylib*
+	echo "int foo(void){return 1;}" > conftest.c
+	echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD
+	$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+	  -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+        _lt_result=$?
+	# If there is a non-empty error log, and "single_module"
+	# appears in it, assume the flag caused a linker warning
+        if test -s conftest.err && $GREP single_module conftest.err; then
+	  cat conftest.err >&AS_MESSAGE_LOG_FD
+	# Otherwise, if the output was created with a 0 exit code from
+	# the compiler, it worked.
+	elif test -f libconftest.dylib && test 0 = "$_lt_result"; then
+	  lt_cv_apple_cc_single_mod=yes
+	else
+	  cat conftest.err >&AS_MESSAGE_LOG_FD
+	fi
+	rm -rf libconftest.dylib*
+	rm -f conftest.*
+      fi])
+
+    AC_CACHE_CHECK([for -exported_symbols_list linker flag],
+      [lt_cv_ld_exported_symbols_list],
+      [lt_cv_ld_exported_symbols_list=no
+      save_LDFLAGS=$LDFLAGS
+      echo "_main" > conftest.sym
+      LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
+      AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+	[lt_cv_ld_exported_symbols_list=yes],
+	[lt_cv_ld_exported_symbols_list=no])
+	LDFLAGS=$save_LDFLAGS
+    ])
+
+    AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load],
+      [lt_cv_ld_force_load=no
+      cat > conftest.c << _LT_EOF
+int forced_loaded() { return 2;}
+_LT_EOF
+      echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD
+      $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD
+      echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD
+      $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD
+      echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD
+      $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD
+      cat > conftest.c << _LT_EOF
+int main() { return 0;}
+_LT_EOF
+      echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD
+      $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+      _lt_result=$?
+      if test -s conftest.err && $GREP force_load conftest.err; then
+	cat conftest.err >&AS_MESSAGE_LOG_FD
+      elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then
+	lt_cv_ld_force_load=yes
+      else
+	cat conftest.err >&AS_MESSAGE_LOG_FD
+      fi
+        rm -f conftest.err libconftest.a conftest conftest.c
+        rm -rf conftest.dSYM
+    ])
+    case $host_os in
+    rhapsody* | darwin1.[[012]])
+      _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;;
+    darwin1.*)
+      _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
+    darwin*)
+      case $MACOSX_DEPLOYMENT_TARGET,$host in
+	10.[[012]],*|,*powerpc*-darwin[[5-8]]*)
+	  _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
+	*)
+	  _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;;
+      esac
+    ;;
+  esac
+    if test yes = "$lt_cv_apple_cc_single_mod"; then
+      _lt_dar_single_mod='$single_module'
+    fi
+    if test yes = "$lt_cv_ld_exported_symbols_list"; then
+      _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym'
+    else
+      _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib'
+    fi
+    if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then
+      _lt_dsymutil='~$DSYMUTIL $lib || :'
+    else
+      _lt_dsymutil=
+    fi
+    ;;
+  esac
+])
+
+
+# _LT_DARWIN_LINKER_FEATURES([TAG])
+# ---------------------------------
+# Checks for linker and compiler features on darwin
+m4_defun([_LT_DARWIN_LINKER_FEATURES],
+[
+  m4_require([_LT_REQUIRED_DARWIN_CHECKS])
+  _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+  _LT_TAGVAR(hardcode_direct, $1)=no
+  _LT_TAGVAR(hardcode_automatic, $1)=yes
+  _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+  if test yes = "$lt_cv_ld_force_load"; then
+    _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+    m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes],
+                  [FC],  [_LT_TAGVAR(compiler_needs_object, $1)=yes])
+  else
+    _LT_TAGVAR(whole_archive_flag_spec, $1)=''
+  fi
+  _LT_TAGVAR(link_all_deplibs, $1)=yes
+  _LT_TAGVAR(allow_undefined_flag, $1)=$_lt_dar_allow_undefined
+  case $cc_basename in
+     ifort*|nagfor*) _lt_dar_can_shared=yes ;;
+     *) _lt_dar_can_shared=$GCC ;;
+  esac
+  if test yes = "$_lt_dar_can_shared"; then
+    output_verbose_link_cmd=func_echo_all
+    _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil"
+    _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil"
+    _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil"
+    _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil"
+    m4_if([$1], [CXX],
+[   if test yes != "$lt_cv_apple_cc_single_mod"; then
+      _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil"
+      _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil"
+    fi
+],[])
+  else
+  _LT_TAGVAR(ld_shlibs, $1)=no
+  fi
+])
+
+# _LT_SYS_MODULE_PATH_AIX([TAGNAME])
+# ----------------------------------
+# Links a minimal program and checks the executable
+# for the system default hardcoded library path. In most cases,
+# this is /usr/lib:/lib, but when the MPI compilers are used
+# the location of the communication and MPI libs are included too.
+# If we don't find anything, use the default library path according
+# to the aix ld manual.
+# Store the results from the different compilers for each TAGNAME.
+# Allow to override them for all tags through lt_cv_aix_libpath.
+m4_defun([_LT_SYS_MODULE_PATH_AIX],
+[m4_require([_LT_DECL_SED])dnl
+if test set = "${lt_cv_aix_libpath+set}"; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])],
+  [AC_LINK_IFELSE([AC_LANG_PROGRAM],[
+  lt_aix_libpath_sed='[
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }]'
+  _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+    _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi],[])
+  if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+    _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=/usr/lib:/lib
+  fi
+  ])
+  aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])
+fi
+])# _LT_SYS_MODULE_PATH_AIX
+
+
+# _LT_SHELL_INIT(ARG)
+# -------------------
+m4_define([_LT_SHELL_INIT],
+[m4_divert_text([M4SH-INIT], [$1
+])])# _LT_SHELL_INIT
+
+
+
+# _LT_PROG_ECHO_BACKSLASH
+# -----------------------
+# Find how we can fake an echo command that does not interpret backslash.
+# In particular, with Autoconf 2.60 or later we add some code to the start
+# of the generated configure script that will find a shell with a builtin
+# printf (that we can use as an echo command).
+m4_defun([_LT_PROG_ECHO_BACKSLASH],
+[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+
+AC_MSG_CHECKING([how to print strings])
+# Test print first, because it will be a builtin if present.
+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
+   test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+  ECHO='print -r --'
+elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+  ECHO='printf %s\n'
+else
+  # Use this function as a fallback that always works.
+  func_fallback_echo ()
+  {
+    eval 'cat <<_LTECHO_EOF
+$[]1
+_LTECHO_EOF'
+  }
+  ECHO='func_fallback_echo'
+fi
+
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+    $ECHO "$*"
+}
+
+case $ECHO in
+  printf*) AC_MSG_RESULT([printf]) ;;
+  print*) AC_MSG_RESULT([print -r]) ;;
+  *) AC_MSG_RESULT([cat]) ;;
+esac
+
+m4_ifdef([_AS_DETECT_SUGGESTED],
+[_AS_DETECT_SUGGESTED([
+  test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || (
+    ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+    ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+    ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+    PATH=/empty FPATH=/empty; export PATH FPATH
+    test "X`printf %s $ECHO`" = "X$ECHO" \
+      || test "X`print -r -- $ECHO`" = "X$ECHO" )])])
+
+_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts])
+_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes])
+])# _LT_PROG_ECHO_BACKSLASH
+
+
+# _LT_WITH_SYSROOT
+# ----------------
+AC_DEFUN([_LT_WITH_SYSROOT],
+[AC_MSG_CHECKING([for sysroot])
+AC_ARG_WITH([sysroot],
+[AS_HELP_STRING([--with-sysroot@<:@=DIR@:>@],
+  [Search for dependent libraries within DIR (or the compiler's sysroot
+   if not specified).])],
+[], [with_sysroot=no])
+
+dnl lt_sysroot will always be passed unquoted.  We quote it here
+dnl in case the user passed a directory name.
+lt_sysroot=
+case $with_sysroot in #(
+ yes)
+   if test yes = "$GCC"; then
+     lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+   fi
+   ;; #(
+ /*)
+   lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"`
+   ;; #(
+ no|'')
+   ;; #(
+ *)
+   AC_MSG_RESULT([$with_sysroot])
+   AC_MSG_ERROR([The sysroot must be an absolute path.])
+   ;;
+esac
+
+ AC_MSG_RESULT([${lt_sysroot:-no}])
+_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl
+[dependent libraries, and where our libraries should be installed.])])
+
+# _LT_ENABLE_LOCK
+# ---------------
+m4_defun([_LT_ENABLE_LOCK],
+[AC_ARG_ENABLE([libtool-lock],
+  [AS_HELP_STRING([--disable-libtool-lock],
+    [avoid locking (might break parallel builds)])])
+test no = "$enable_libtool_lock" || enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+ia64-*-hpux*)
+  # Find out what ABI is being produced by ac_compile, and set mode
+  # options accordingly.
+  echo 'int i;' > conftest.$ac_ext
+  if AC_TRY_EVAL(ac_compile); then
+    case `/usr/bin/file conftest.$ac_objext` in
+      *ELF-32*)
+	HPUX_IA64_MODE=32
+	;;
+      *ELF-64*)
+	HPUX_IA64_MODE=64
+	;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+*-*-irix6*)
+  # Find out what ABI is being produced by ac_compile, and set linker
+  # options accordingly.
+  echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext
+  if AC_TRY_EVAL(ac_compile); then
+    if test yes = "$lt_cv_prog_gnu_ld"; then
+      case `/usr/bin/file conftest.$ac_objext` in
+	*32-bit*)
+	  LD="${LD-ld} -melf32bsmip"
+	  ;;
+	*N32*)
+	  LD="${LD-ld} -melf32bmipn32"
+	  ;;
+	*64-bit*)
+	  LD="${LD-ld} -melf64bmip"
+	;;
+      esac
+    else
+      case `/usr/bin/file conftest.$ac_objext` in
+	*32-bit*)
+	  LD="${LD-ld} -32"
+	  ;;
+	*N32*)
+	  LD="${LD-ld} -n32"
+	  ;;
+	*64-bit*)
+	  LD="${LD-ld} -64"
+	  ;;
+      esac
+    fi
+  fi
+  rm -rf conftest*
+  ;;
+
+mips64*-*linux*)
+  # Find out what ABI is being produced by ac_compile, and set linker
+  # options accordingly.
+  echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext
+  if AC_TRY_EVAL(ac_compile); then
+    emul=elf
+    case `/usr/bin/file conftest.$ac_objext` in
+      *32-bit*)
+	emul="${emul}32"
+	;;
+      *64-bit*)
+	emul="${emul}64"
+	;;
+    esac
+    case `/usr/bin/file conftest.$ac_objext` in
+      *MSB*)
+	emul="${emul}btsmip"
+	;;
+      *LSB*)
+	emul="${emul}ltsmip"
+	;;
+    esac
+    case `/usr/bin/file conftest.$ac_objext` in
+      *N32*)
+	emul="${emul}n32"
+	;;
+    esac
+    LD="${LD-ld} -m $emul"
+  fi
+  rm -rf conftest*
+  ;;
+
+x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \
+s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+  # Find out what ABI is being produced by ac_compile, and set linker
+  # options accordingly.  Note that the listed cases only cover the
+  # situations where additional linker options are needed (such as when
+  # doing 32-bit compilation for a host where ld defaults to 64-bit, or
+  # vice versa); the common cases where no linker options are needed do
+  # not appear in the list.
+  echo 'int i;' > conftest.$ac_ext
+  if AC_TRY_EVAL(ac_compile); then
+    case `/usr/bin/file conftest.o` in
+      *32-bit*)
+	case $host in
+	  x86_64-*kfreebsd*-gnu)
+	    LD="${LD-ld} -m elf_i386_fbsd"
+	    ;;
+	  x86_64-*linux*)
+	    case `/usr/bin/file conftest.o` in
+	      *x86-64*)
+		LD="${LD-ld} -m elf32_x86_64"
+		;;
+	      *)
+		LD="${LD-ld} -m elf_i386"
+		;;
+	    esac
+	    ;;
+	  powerpc64le-*linux*)
+	    LD="${LD-ld} -m elf32lppclinux"
+	    ;;
+	  powerpc64-*linux*)
+	    LD="${LD-ld} -m elf32ppclinux"
+	    ;;
+	  s390x-*linux*)
+	    LD="${LD-ld} -m elf_s390"
+	    ;;
+	  sparc64-*linux*)
+	    LD="${LD-ld} -m elf32_sparc"
+	    ;;
+	esac
+	;;
+      *64-bit*)
+	case $host in
+	  x86_64-*kfreebsd*-gnu)
+	    LD="${LD-ld} -m elf_x86_64_fbsd"
+	    ;;
+	  x86_64-*linux*)
+	    LD="${LD-ld} -m elf_x86_64"
+	    ;;
+	  powerpcle-*linux*)
+	    LD="${LD-ld} -m elf64lppc"
+	    ;;
+	  powerpc-*linux*)
+	    LD="${LD-ld} -m elf64ppc"
+	    ;;
+	  s390*-*linux*|s390*-*tpf*)
+	    LD="${LD-ld} -m elf64_s390"
+	    ;;
+	  sparc*-*linux*)
+	    LD="${LD-ld} -m elf64_sparc"
+	    ;;
+	esac
+	;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+
+*-*-sco3.2v5*)
+  # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+  SAVE_CFLAGS=$CFLAGS
+  CFLAGS="$CFLAGS -belf"
+  AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
+    [AC_LANG_PUSH(C)
+     AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])
+     AC_LANG_POP])
+  if test yes != "$lt_cv_cc_needs_belf"; then
+    # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+    CFLAGS=$SAVE_CFLAGS
+  fi
+  ;;
+*-*solaris*)
+  # Find out what ABI is being produced by ac_compile, and set linker
+  # options accordingly.
+  echo 'int i;' > conftest.$ac_ext
+  if AC_TRY_EVAL(ac_compile); then
+    case `/usr/bin/file conftest.o` in
+    *64-bit*)
+      case $lt_cv_prog_gnu_ld in
+      yes*)
+        case $host in
+        i?86-*-solaris*|x86_64-*-solaris*)
+          LD="${LD-ld} -m elf_x86_64"
+          ;;
+        sparc*-*-solaris*)
+          LD="${LD-ld} -m elf64_sparc"
+          ;;
+        esac
+        # GNU ld 2.21 introduced _sol2 emulations.  Use them if available.
+        if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+          LD=${LD-ld}_sol2
+        fi
+        ;;
+      *)
+	if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+	  LD="${LD-ld} -64"
+	fi
+	;;
+      esac
+      ;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+esac
+
+need_locks=$enable_libtool_lock
+])# _LT_ENABLE_LOCK
+
+
+# _LT_PROG_AR
+# -----------
+m4_defun([_LT_PROG_AR],
+[AC_CHECK_TOOLS(AR, [ar], false)
+: ${AR=ar}
+: ${AR_FLAGS=cru}
+_LT_DECL([], [AR], [1], [The archiver])
+_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive])
+
+AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file],
+  [lt_cv_ar_at_file=no
+   AC_COMPILE_IFELSE([AC_LANG_PROGRAM],
+     [echo conftest.$ac_objext > conftest.lst
+      lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD'
+      AC_TRY_EVAL([lt_ar_try])
+      if test 0 -eq "$ac_status"; then
+	# Ensure the archiver fails upon bogus file names.
+	rm -f conftest.$ac_objext libconftest.a
+	AC_TRY_EVAL([lt_ar_try])
+	if test 0 -ne "$ac_status"; then
+          lt_cv_ar_at_file=@
+        fi
+      fi
+      rm -f conftest.* libconftest.a
+     ])
+  ])
+
+if test no = "$lt_cv_ar_at_file"; then
+  archiver_list_spec=
+else
+  archiver_list_spec=$lt_cv_ar_at_file
+fi
+_LT_DECL([], [archiver_list_spec], [1],
+  [How to feed a file listing to the archiver])
+])# _LT_PROG_AR
+
+
+# _LT_CMD_OLD_ARCHIVE
+# -------------------
+m4_defun([_LT_CMD_OLD_ARCHIVE],
+[_LT_PROG_AR
+
+AC_CHECK_TOOL(STRIP, strip, :)
+test -z "$STRIP" && STRIP=:
+_LT_DECL([], [STRIP], [1], [A symbol stripping program])
+
+AC_CHECK_TOOL(RANLIB, ranlib, :)
+test -z "$RANLIB" && RANLIB=:
+_LT_DECL([], [RANLIB], [1],
+    [Commands used to install an old-style archive])
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+  case $host_os in
+  bitrig* | openbsd*)
+    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
+    ;;
+  *)
+    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
+    ;;
+  esac
+  old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
+fi
+
+case $host_os in
+  darwin*)
+    lock_old_archive_extraction=yes ;;
+  *)
+    lock_old_archive_extraction=no ;;
+esac
+_LT_DECL([], [old_postinstall_cmds], [2])
+_LT_DECL([], [old_postuninstall_cmds], [2])
+_LT_TAGDECL([], [old_archive_cmds], [2],
+    [Commands used to build an old-style archive])
+_LT_DECL([], [lock_old_archive_extraction], [0],
+    [Whether to use a lock for old archive extraction])
+])# _LT_CMD_OLD_ARCHIVE
+
+
+# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+#		[OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE])
+# ----------------------------------------------------------------
+# Check whether the given compiler option works
+AC_DEFUN([_LT_COMPILER_OPTION],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_SED])dnl
+AC_CACHE_CHECK([$1], [$2],
+  [$2=no
+   m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4])
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+   lt_compiler_flag="$3"  ## exclude from sc_useless_quotes_in_assignment
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   # The option is referenced via a variable to avoid confusing sed.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+   (eval "$lt_compile" 2>conftest.err)
+   ac_status=$?
+   cat conftest.err >&AS_MESSAGE_LOG_FD
+   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+   if (exit $ac_status) && test -s "$ac_outfile"; then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings other than the usual output.
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+       $2=yes
+     fi
+   fi
+   $RM conftest*
+])
+
+if test yes = "[$]$2"; then
+    m4_if([$5], , :, [$5])
+else
+    m4_if([$6], , :, [$6])
+fi
+])# _LT_COMPILER_OPTION
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], [])
+
+
+# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+#                  [ACTION-SUCCESS], [ACTION-FAILURE])
+# ----------------------------------------------------
+# Check whether the given linker option works
+AC_DEFUN([_LT_LINKER_OPTION],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_SED])dnl
+AC_CACHE_CHECK([$1], [$2],
+  [$2=no
+   save_LDFLAGS=$LDFLAGS
+   LDFLAGS="$LDFLAGS $3"
+   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+     # The linker can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     if test -s conftest.err; then
+       # Append any errors to the config.log.
+       cat conftest.err 1>&AS_MESSAGE_LOG_FD
+       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         $2=yes
+       fi
+     else
+       $2=yes
+     fi
+   fi
+   $RM -r conftest*
+   LDFLAGS=$save_LDFLAGS
+])
+
+if test yes = "[$]$2"; then
+    m4_if([$4], , :, [$4])
+else
+    m4_if([$5], , :, [$5])
+fi
+])# _LT_LINKER_OPTION
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], [])
+
+
+# LT_CMD_MAX_LEN
+#---------------
+AC_DEFUN([LT_CMD_MAX_LEN],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+# find the maximum length of command line arguments
+AC_MSG_CHECKING([the maximum length of command line arguments])
+AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
+  i=0
+  teststring=ABCD
+
+  case $build_os in
+  msdosdjgpp*)
+    # On DJGPP, this test can blow up pretty badly due to problems in libc
+    # (any single argument exceeding 2000 bytes causes a buffer overrun
+    # during glob expansion).  Even if it were fixed, the result of this
+    # check would be larger than it should be.
+    lt_cv_sys_max_cmd_len=12288;    # 12K is about right
+    ;;
+
+  gnu*)
+    # Under GNU Hurd, this test is not required because there is
+    # no limit to the length of command line arguments.
+    # Libtool will interpret -1 as no limit whatsoever
+    lt_cv_sys_max_cmd_len=-1;
+    ;;
+
+  cygwin* | mingw* | cegcc*)
+    # On Win9x/ME, this test blows up -- it succeeds, but takes
+    # about 5 minutes as the teststring grows exponentially.
+    # Worse, since 9x/ME are not pre-emptively multitasking,
+    # you end up with a "frozen" computer, even though with patience
+    # the test eventually succeeds (with a max line length of 256k).
+    # Instead, let's just punt: use the minimum linelength reported by
+    # all of the supported platforms: 8192 (on NT/2K/XP).
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  mint*)
+    # On MiNT this can take a long time and run out of memory.
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  amigaos*)
+    # On AmigaOS with pdksh, this test takes hours, literally.
+    # So we just punt and use a minimum line length of 8192.
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*)
+    # This has been around since 386BSD, at least.  Likely further.
+    if test -x /sbin/sysctl; then
+      lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+    elif test -x /usr/sbin/sysctl; then
+      lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
+    else
+      lt_cv_sys_max_cmd_len=65536	# usable default for all BSDs
+    fi
+    # And add a safety zone
+    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+    ;;
+
+  interix*)
+    # We know the value 262144 and hardcode it with a safety zone (like BSD)
+    lt_cv_sys_max_cmd_len=196608
+    ;;
+
+  os2*)
+    # The test takes a long time on OS/2.
+    lt_cv_sys_max_cmd_len=8192
+    ;;
+
+  osf*)
+    # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+    # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+    # nice to cause kernel panics so lets avoid the loop below.
+    # First set a reasonable default.
+    lt_cv_sys_max_cmd_len=16384
+    #
+    if test -x /sbin/sysconfig; then
+      case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+        *1*) lt_cv_sys_max_cmd_len=-1 ;;
+      esac
+    fi
+    ;;
+  sco3.2v5*)
+    lt_cv_sys_max_cmd_len=102400
+    ;;
+  sysv5* | sco5v6* | sysv4.2uw2*)
+    kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+    if test -n "$kargmax"; then
+      lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[	 ]]//'`
+    else
+      lt_cv_sys_max_cmd_len=32768
+    fi
+    ;;
+  *)
+    lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+    if test -n "$lt_cv_sys_max_cmd_len" && \
+       test undefined != "$lt_cv_sys_max_cmd_len"; then
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+    else
+      # Make teststring a little bigger before we do anything with it.
+      # a 1K string should be a reasonable start.
+      for i in 1 2 3 4 5 6 7 8; do
+        teststring=$teststring$teststring
+      done
+      SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+      # If test is not a shell built-in, we'll probably end up computing a
+      # maximum length that is only half of the actual maximum length, but
+      # we can't tell.
+      while { test X`env echo "$teststring$teststring" 2>/dev/null` \
+	         = "X$teststring$teststring"; } >/dev/null 2>&1 &&
+	      test 17 != "$i" # 1/2 MB should be enough
+      do
+        i=`expr $i + 1`
+        teststring=$teststring$teststring
+      done
+      # Only check the string length outside the loop.
+      lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
+      teststring=
+      # Add a significant safety factor because C++ compilers can tack on
+      # massive amounts of additional arguments before passing them to the
+      # linker.  It appears as though 1/2 is a usable value.
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
+    fi
+    ;;
+  esac
+])
+if test -n "$lt_cv_sys_max_cmd_len"; then
+  AC_MSG_RESULT($lt_cv_sys_max_cmd_len)
+else
+  AC_MSG_RESULT(none)
+fi
+max_cmd_len=$lt_cv_sys_max_cmd_len
+_LT_DECL([], [max_cmd_len], [0],
+    [What is the maximum length of a command?])
+])# LT_CMD_MAX_LEN
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], [])
+
+
+# _LT_HEADER_DLFCN
+# ----------------
+m4_defun([_LT_HEADER_DLFCN],
+[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl
+])# _LT_HEADER_DLFCN
+
+
+# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE,
+#                      ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING)
+# ----------------------------------------------------------------
+m4_defun([_LT_TRY_DLOPEN_SELF],
+[m4_require([_LT_HEADER_DLFCN])dnl
+if test yes = "$cross_compiling"; then :
+  [$4]
+else
+  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+  lt_status=$lt_dlunknown
+  cat > conftest.$ac_ext <<_LT_EOF
+[#line $LINENO "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+#  define LT_DLGLOBAL		RTLD_GLOBAL
+#else
+#  ifdef DL_GLOBAL
+#    define LT_DLGLOBAL		DL_GLOBAL
+#  else
+#    define LT_DLGLOBAL		0
+#  endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+   find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+#  ifdef RTLD_LAZY
+#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
+#  else
+#    ifdef DL_LAZY
+#      define LT_DLLAZY_OR_NOW		DL_LAZY
+#    else
+#      ifdef RTLD_NOW
+#        define LT_DLLAZY_OR_NOW	RTLD_NOW
+#      else
+#        ifdef DL_NOW
+#          define LT_DLLAZY_OR_NOW	DL_NOW
+#        else
+#          define LT_DLLAZY_OR_NOW	0
+#        endif
+#      endif
+#    endif
+#  endif
+#endif
+
+/* When -fvisibility=hidden is used, assume the code has been annotated
+   correspondingly for the symbols needed.  */
+#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
+int main ()
+{
+  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+  int status = $lt_dlunknown;
+
+  if (self)
+    {
+      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
+      else
+        {
+	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
+          else puts (dlerror ());
+	}
+      /* dlclose (self); */
+    }
+  else
+    puts (dlerror ());
+
+  return status;
+}]
+_LT_EOF
+  if AC_TRY_EVAL(ac_link) && test -s "conftest$ac_exeext" 2>/dev/null; then
+    (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null
+    lt_status=$?
+    case x$lt_status in
+      x$lt_dlno_uscore) $1 ;;
+      x$lt_dlneed_uscore) $2 ;;
+      x$lt_dlunknown|x*) $3 ;;
+    esac
+  else :
+    # compilation failed
+    $3
+  fi
+fi
+rm -fr conftest*
+])# _LT_TRY_DLOPEN_SELF
+
+
+# LT_SYS_DLOPEN_SELF
+# ------------------
+AC_DEFUN([LT_SYS_DLOPEN_SELF],
+[m4_require([_LT_HEADER_DLFCN])dnl
+if test yes != "$enable_dlopen"; then
+  enable_dlopen=unknown
+  enable_dlopen_self=unknown
+  enable_dlopen_self_static=unknown
+else
+  lt_cv_dlopen=no
+  lt_cv_dlopen_libs=
+
+  case $host_os in
+  beos*)
+    lt_cv_dlopen=load_add_on
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=yes
+    ;;
+
+  mingw* | pw32* | cegcc*)
+    lt_cv_dlopen=LoadLibrary
+    lt_cv_dlopen_libs=
+    ;;
+
+  cygwin*)
+    lt_cv_dlopen=dlopen
+    lt_cv_dlopen_libs=
+    ;;
+
+  darwin*)
+    # if libdl is installed we need to link against it
+    AC_CHECK_LIB([dl], [dlopen],
+		[lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],[
+    lt_cv_dlopen=dyld
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=yes
+    ])
+    ;;
+
+  tpf*)
+    # Don't try to run any link tests for TPF.  We know it's impossible
+    # because TPF is a cross-compiler, and we know how we open DSOs.
+    lt_cv_dlopen=dlopen
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=no
+    ;;
+
+  *)
+    AC_CHECK_FUNC([shl_load],
+	  [lt_cv_dlopen=shl_load],
+      [AC_CHECK_LIB([dld], [shl_load],
+	    [lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld],
+	[AC_CHECK_FUNC([dlopen],
+	      [lt_cv_dlopen=dlopen],
+	  [AC_CHECK_LIB([dl], [dlopen],
+		[lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],
+	    [AC_CHECK_LIB([svld], [dlopen],
+		  [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld],
+	      [AC_CHECK_LIB([dld], [dld_link],
+		    [lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld])
+	      ])
+	    ])
+	  ])
+	])
+      ])
+    ;;
+  esac
+
+  if test no = "$lt_cv_dlopen"; then
+    enable_dlopen=no
+  else
+    enable_dlopen=yes
+  fi
+
+  case $lt_cv_dlopen in
+  dlopen)
+    save_CPPFLAGS=$CPPFLAGS
+    test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+    save_LDFLAGS=$LDFLAGS
+    wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+    save_LIBS=$LIBS
+    LIBS="$lt_cv_dlopen_libs $LIBS"
+
+    AC_CACHE_CHECK([whether a program can dlopen itself],
+	  lt_cv_dlopen_self, [dnl
+	  _LT_TRY_DLOPEN_SELF(
+	    lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes,
+	    lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross)
+    ])
+
+    if test yes = "$lt_cv_dlopen_self"; then
+      wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+      AC_CACHE_CHECK([whether a statically linked program can dlopen itself],
+	  lt_cv_dlopen_self_static, [dnl
+	  _LT_TRY_DLOPEN_SELF(
+	    lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes,
+	    lt_cv_dlopen_self_static=no,  lt_cv_dlopen_self_static=cross)
+      ])
+    fi
+
+    CPPFLAGS=$save_CPPFLAGS
+    LDFLAGS=$save_LDFLAGS
+    LIBS=$save_LIBS
+    ;;
+  esac
+
+  case $lt_cv_dlopen_self in
+  yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+  *) enable_dlopen_self=unknown ;;
+  esac
+
+  case $lt_cv_dlopen_self_static in
+  yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+  *) enable_dlopen_self_static=unknown ;;
+  esac
+fi
+_LT_DECL([dlopen_support], [enable_dlopen], [0],
+	 [Whether dlopen is supported])
+_LT_DECL([dlopen_self], [enable_dlopen_self], [0],
+	 [Whether dlopen of programs is supported])
+_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0],
+	 [Whether dlopen of statically linked programs is supported])
+])# LT_SYS_DLOPEN_SELF
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], [])
+
+
+# _LT_COMPILER_C_O([TAGNAME])
+# ---------------------------
+# Check to see if options -c and -o are simultaneously supported by compiler.
+# This macro does not hard code the compiler like AC_PROG_CC_C_O.
+m4_defun([_LT_COMPILER_C_O],
+[m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
+  [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)],
+  [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&AS_MESSAGE_LOG_FD
+   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+     fi
+   fi
+   chmod u+w . 2>&AS_MESSAGE_LOG_FD
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+])
+_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1],
+	[Does compiler simultaneously support -c and -o options?])
+])# _LT_COMPILER_C_O
+
+
+# _LT_COMPILER_FILE_LOCKS([TAGNAME])
+# ----------------------------------
+# Check to see if we can do hard links to lock some files if needed
+m4_defun([_LT_COMPILER_FILE_LOCKS],
+[m4_require([_LT_ENABLE_LOCK])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+_LT_COMPILER_C_O([$1])
+
+hard_links=nottested
+if test no = "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" && test no != "$need_locks"; then
+  # do not overwrite the value of need_locks provided by the user
+  AC_MSG_CHECKING([if we can lock with hard links])
+  hard_links=yes
+  $RM conftest*
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  touch conftest.a
+  ln conftest.a conftest.b 2>&5 || hard_links=no
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  AC_MSG_RESULT([$hard_links])
+  if test no = "$hard_links"; then
+    AC_MSG_WARN(['$CC' does not support '-c -o', so 'make -j' may be unsafe])
+    need_locks=warn
+  fi
+else
+  need_locks=no
+fi
+_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?])
+])# _LT_COMPILER_FILE_LOCKS
+
+
+# _LT_CHECK_OBJDIR
+# ----------------
+m4_defun([_LT_CHECK_OBJDIR],
+[AC_CACHE_CHECK([for objdir], [lt_cv_objdir],
+[rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+  lt_cv_objdir=.libs
+else
+  # MS-DOS does not allow filenames that begin with a dot.
+  lt_cv_objdir=_libs
+fi
+rmdir .libs 2>/dev/null])
+objdir=$lt_cv_objdir
+_LT_DECL([], [objdir], [0],
+         [The name of the directory that contains temporary libtool files])dnl
+m4_pattern_allow([LT_OBJDIR])dnl
+AC_DEFINE_UNQUOTED([LT_OBJDIR], "$lt_cv_objdir/",
+  [Define to the sub-directory where libtool stores uninstalled libraries.])
+])# _LT_CHECK_OBJDIR
+
+
+# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME])
+# --------------------------------------
+# Check hardcoding attributes.
+m4_defun([_LT_LINKER_HARDCODE_LIBPATH],
+[AC_MSG_CHECKING([how to hardcode library paths into programs])
+_LT_TAGVAR(hardcode_action, $1)=
+if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" ||
+   test -n "$_LT_TAGVAR(runpath_var, $1)" ||
+   test yes = "$_LT_TAGVAR(hardcode_automatic, $1)"; then
+
+  # We can hardcode non-existent directories.
+  if test no != "$_LT_TAGVAR(hardcode_direct, $1)" &&
+     # If the only mechanism to avoid hardcoding is shlibpath_var, we
+     # have to relink, otherwise we might link with an installed library
+     # when we should be linking with a yet-to-be-installed one
+     ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" &&
+     test no != "$_LT_TAGVAR(hardcode_minus_L, $1)"; then
+    # Linking always hardcodes the temporary library directory.
+    _LT_TAGVAR(hardcode_action, $1)=relink
+  else
+    # We can link without hardcoding, and we can hardcode nonexisting dirs.
+    _LT_TAGVAR(hardcode_action, $1)=immediate
+  fi
+else
+  # We cannot hardcode anything, or else we can only hardcode existing
+  # directories.
+  _LT_TAGVAR(hardcode_action, $1)=unsupported
+fi
+AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)])
+
+if test relink = "$_LT_TAGVAR(hardcode_action, $1)" ||
+   test yes = "$_LT_TAGVAR(inherit_rpath, $1)"; then
+  # Fast installation is not supported
+  enable_fast_install=no
+elif test yes = "$shlibpath_overrides_runpath" ||
+     test no = "$enable_shared"; then
+  # Fast installation is not necessary
+  enable_fast_install=needless
+fi
+_LT_TAGDECL([], [hardcode_action], [0],
+    [How to hardcode a shared library path into an executable])
+])# _LT_LINKER_HARDCODE_LIBPATH
+
+
+# _LT_CMD_STRIPLIB
+# ----------------
+m4_defun([_LT_CMD_STRIPLIB],
+[m4_require([_LT_DECL_EGREP])
+striplib=
+old_striplib=
+AC_MSG_CHECKING([whether stripping libraries is possible])
+if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+  test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+  test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+  AC_MSG_RESULT([yes])
+else
+# FIXME - insert some real tests, host_os isn't really good enough
+  case $host_os in
+  darwin*)
+    if test -n "$STRIP"; then
+      striplib="$STRIP -x"
+      old_striplib="$STRIP -S"
+      AC_MSG_RESULT([yes])
+    else
+      AC_MSG_RESULT([no])
+    fi
+    ;;
+  *)
+    AC_MSG_RESULT([no])
+    ;;
+  esac
+fi
+_LT_DECL([], [old_striplib], [1], [Commands to strip libraries])
+_LT_DECL([], [striplib], [1])
+])# _LT_CMD_STRIPLIB
+
+
+# _LT_PREPARE_MUNGE_PATH_LIST
+# ---------------------------
+# Make sure func_munge_path_list() is defined correctly.
+m4_defun([_LT_PREPARE_MUNGE_PATH_LIST],
+[[# func_munge_path_list VARIABLE PATH
+# -----------------------------------
+# VARIABLE is name of variable containing _space_ separated list of
+# directories to be munged by the contents of PATH, which is string
+# having a format:
+# "DIR[:DIR]:"
+#       string "DIR[ DIR]" will be prepended to VARIABLE
+# ":DIR[:DIR]"
+#       string "DIR[ DIR]" will be appended to VARIABLE
+# "DIRP[:DIRP]::[DIRA:]DIRA"
+#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
+#       "DIRA[ DIRA]" will be appended to VARIABLE
+# "DIR[:DIR]"
+#       VARIABLE will be replaced by "DIR[ DIR]"
+func_munge_path_list ()
+{
+    case x@S|@2 in
+    x)
+        ;;
+    *:)
+        eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'` \@S|@@S|@1\"
+        ;;
+    x:*)
+        eval @S|@1=\"\@S|@@S|@1 `$ECHO @S|@2 | $SED 's/:/ /g'`\"
+        ;;
+    *::*)
+        eval @S|@1=\"\@S|@@S|@1\ `$ECHO @S|@2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
+        eval @S|@1=\"`$ECHO @S|@2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \@S|@@S|@1\"
+        ;;
+    *)
+        eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'`\"
+        ;;
+    esac
+}
+]])# _LT_PREPARE_PATH_LIST
+
+
+# _LT_SYS_DYNAMIC_LINKER([TAG])
+# -----------------------------
+# PORTME Fill in your ld.so characteristics
+m4_defun([_LT_SYS_DYNAMIC_LINKER],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_OBJDUMP])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+m4_require([_LT_PREPARE_MUNGE_PATH_LIST])dnl
+AC_MSG_CHECKING([dynamic linker characteristics])
+m4_if([$1],
+	[], [
+if test yes = "$GCC"; then
+  case $host_os in
+    darwin*) lt_awk_arg='/^libraries:/,/LR/' ;;
+    *) lt_awk_arg='/^libraries:/' ;;
+  esac
+  case $host_os in
+    mingw* | cegcc*) lt_sed_strip_eq='s|=\([[A-Za-z]]:\)|\1|g' ;;
+    *) lt_sed_strip_eq='s|=/|/|g' ;;
+  esac
+  lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+  case $lt_search_path_spec in
+  *\;*)
+    # if the path contains ";" then we assume it to be the separator
+    # otherwise default to the standard path separator (i.e. ":") - it is
+    # assumed that no part of a normal pathname contains ";" but that should
+    # okay in the real world where ";" in dirpaths is itself problematic.
+    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
+    ;;
+  *)
+    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
+    ;;
+  esac
+  # Ok, now we have the path, separated by spaces, we can step through it
+  # and add multilib dir if necessary...
+  lt_tmp_lt_search_path_spec=
+  lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+  # ...but if some path component already ends with the multilib dir we assume
+  # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer).
+  case "$lt_multi_os_dir; $lt_search_path_spec " in
+  "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*)
+    lt_multi_os_dir=
+    ;;
+  esac
+  for lt_sys_path in $lt_search_path_spec; do
+    if test -d "$lt_sys_path$lt_multi_os_dir"; then
+      lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir"
+    elif test -n "$lt_multi_os_dir"; then
+      test -d "$lt_sys_path" && \
+	lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+    fi
+  done
+  lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
+BEGIN {RS = " "; FS = "/|\n";} {
+  lt_foo = "";
+  lt_count = 0;
+  for (lt_i = NF; lt_i > 0; lt_i--) {
+    if ($lt_i != "" && $lt_i != ".") {
+      if ($lt_i == "..") {
+        lt_count++;
+      } else {
+        if (lt_count == 0) {
+          lt_foo = "/" $lt_i lt_foo;
+        } else {
+          lt_count--;
+        }
+      }
+    }
+  }
+  if (lt_foo != "") { lt_freq[[lt_foo]]++; }
+  if (lt_freq[[lt_foo]] == 1) { print lt_foo; }
+}'`
+  # AWK program above erroneously prepends '/' to C:/dos/paths
+  # for these hosts.
+  case $host_os in
+    mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+      $SED 's|/\([[A-Za-z]]:\)|\1|g'` ;;
+  esac
+  sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
+else
+  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+fi])
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=.so
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+AC_ARG_VAR([LT_SYS_LIBRARY_PATH],
+[User-defined run-time library search path.])
+
+case $host_os in
+aix3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname.a'
+  shlibpath_var=LIBPATH
+
+  # AIX 3 has no versioning support, so we append a major version to the name.
+  soname_spec='$libname$release$shared_ext$major'
+  ;;
+
+aix[[4-9]]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  hardcode_into_libs=yes
+  if test ia64 = "$host_cpu"; then
+    # AIX 5 supports IA64
+    library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext'
+    shlibpath_var=LD_LIBRARY_PATH
+  else
+    # With GCC up to 2.95.x, collect2 would create an import file
+    # for dependence libraries.  The import file would start with
+    # the line '#! .'.  This would cause the generated library to
+    # depend on '.', always an invalid library.  This was fixed in
+    # development snapshots of GCC prior to 3.0.
+    case $host_os in
+      aix4 | aix4.[[01]] | aix4.[[01]].*)
+      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+	   echo ' yes '
+	   echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then
+	:
+      else
+	can_build_shared=no
+      fi
+      ;;
+    esac
+    # Using Import Files as archive members, it is possible to support
+    # filename-based versioning of shared library archives on AIX. While
+    # this would work for both with and without runtime linking, it will
+    # prevent static linking of such archives. So we do filename-based
+    # shared library versioning with .so extension only, which is used
+    # when both runtime linking and shared linking is enabled.
+    # Unfortunately, runtime linking may impact performance, so we do
+    # not want this to be the default eventually. Also, we use the
+    # versioned .so libs for executables only if there is the -brtl
+    # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only.
+    # To allow for filename-based versioning support, we need to create
+    # libNAME.so.V as an archive file, containing:
+    # *) an Import File, referring to the versioned filename of the
+    #    archive as well as the shared archive member, telling the
+    #    bitwidth (32 or 64) of that shared object, and providing the
+    #    list of exported symbols of that shared object, eventually
+    #    decorated with the 'weak' keyword
+    # *) the shared object with the F_LOADONLY flag set, to really avoid
+    #    it being seen by the linker.
+    # At run time we better use the real file rather than another symlink,
+    # but for link time we create the symlink libNAME.so -> libNAME.so.V
+
+    case $with_aix_soname,$aix_use_runtimelinking in
+    # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct
+    # soname into executable. Probably we can add versioning support to
+    # collect2, so additional links can be useful in future.
+    aix,yes) # traditional libtool
+      dynamic_linker='AIX unversionable lib.so'
+      # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+      # instead of lib<name>.a to let people know that these are not
+      # typical AIX shared libraries.
+      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+      ;;
+    aix,no) # traditional AIX only
+      dynamic_linker='AIX lib.a[(]lib.so.V[)]'
+      # We preserve .a as extension for shared libraries through AIX4.2
+      # and later when we are not doing run time linking.
+      library_names_spec='$libname$release.a $libname.a'
+      soname_spec='$libname$release$shared_ext$major'
+      ;;
+    svr4,*) # full svr4 only
+      dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)]"
+      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+      # We do not specify a path in Import Files, so LIBPATH fires.
+      shlibpath_overrides_runpath=yes
+      ;;
+    *,yes) # both, prefer svr4
+      dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)], lib.a[(]lib.so.V[)]"
+      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+      # unpreferred sharedlib libNAME.a needs extra handling
+      postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"'
+      postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"'
+      # We do not specify a path in Import Files, so LIBPATH fires.
+      shlibpath_overrides_runpath=yes
+      ;;
+    *,no) # both, prefer aix
+      dynamic_linker="AIX lib.a[(]lib.so.V[)], lib.so.V[(]$shared_archive_member_spec.o[)]"
+      library_names_spec='$libname$release.a $libname.a'
+      soname_spec='$libname$release$shared_ext$major'
+      # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling
+      postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)'
+      postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"'
+      ;;
+    esac
+    shlibpath_var=LIBPATH
+  fi
+  ;;
+
+amigaos*)
+  case $host_cpu in
+  powerpc)
+    # Since July 2007 AmigaOS4 officially supports .so libraries.
+    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    ;;
+  m68k)
+    library_names_spec='$libname.ixlibrary $libname.a'
+    # Create ${libname}_ixlibrary.a entries in /sys/libs.
+    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+    ;;
+  esac
+  ;;
+
+beos*)
+  library_names_spec='$libname$shared_ext'
+  dynamic_linker="$host_os ld.so"
+  shlibpath_var=LIBRARY_PATH
+  ;;
+
+bsdi[[45]]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+  # the default ld.so.conf also contains /usr/contrib/lib and
+  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+  # libtool to hard-code these into programs
+  ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+  version_type=windows
+  shrext_cmds=.dll
+  need_version=no
+  need_lib_prefix=no
+
+  case $GCC,$cc_basename in
+  yes,*)
+    # gcc
+    library_names_spec='$libname.dll.a'
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \$file`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname~
+      chmod a+x \$dldir/$dlname~
+      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+      fi'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+
+    case $host_os in
+    cygwin*)
+      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+      soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
+m4_if([$1], [],[
+      sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"])
+      ;;
+    mingw* | cegcc*)
+      # MinGW DLLs use traditional 'lib' prefix
+      soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
+      ;;
+    pw32*)
+      # pw32 DLLs use 'pw' prefix rather than 'lib'
+      library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
+      ;;
+    esac
+    dynamic_linker='Win32 ld.exe'
+    ;;
+
+  *,cl*)
+    # Native MSVC
+    libname_spec='$name'
+    soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
+    library_names_spec='$libname.dll.lib'
+
+    case $build_os in
+    mingw*)
+      sys_lib_search_path_spec=
+      lt_save_ifs=$IFS
+      IFS=';'
+      for lt_path in $LIB
+      do
+        IFS=$lt_save_ifs
+        # Let DOS variable expansion print the short 8.3 style file name.
+        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+      done
+      IFS=$lt_save_ifs
+      # Convert to MSYS style.
+      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'`
+      ;;
+    cygwin*)
+      # Convert to unix form, then to dos form, then back to unix form
+      # but this time dos style (no spaces!) so that the unix form looks
+      # like /cygdrive/c/PROGRA~1:/cygdr...
+      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      ;;
+    *)
+      sys_lib_search_path_spec=$LIB
+      if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then
+        # It is most probably a Windows format PATH.
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+      else
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      fi
+      # FIXME: find the short name or the path components, as spaces are
+      # common. (e.g. "Program Files" -> "PROGRA~1")
+      ;;
+    esac
+
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \$file`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+    dynamic_linker='Win32 link.exe'
+    ;;
+
+  *)
+    # Assume MSVC wrapper
+    library_names_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext $libname.lib'
+    dynamic_linker='Win32 ld.exe'
+    ;;
+  esac
+  # FIXME: first we should search . and the directory the executable is in
+  shlibpath_var=PATH
+  ;;
+
+darwin* | rhapsody*)
+  dynamic_linker="$host_os dyld"
+  version_type=darwin
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$major$shared_ext $libname$shared_ext'
+  soname_spec='$libname$release$major$shared_ext'
+  shlibpath_overrides_runpath=yes
+  shlibpath_var=DYLD_LIBRARY_PATH
+  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+m4_if([$1], [],[
+  sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"])
+  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+  ;;
+
+dgux*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+freebsd* | dragonfly*)
+  # DragonFly does not have aout.  When/if they implement a new
+  # versioning mechanism, adjust this.
+  if test -x /usr/bin/objformat; then
+    objformat=`/usr/bin/objformat`
+  else
+    case $host_os in
+    freebsd[[23]].*) objformat=aout ;;
+    *) objformat=elf ;;
+    esac
+  fi
+  version_type=freebsd-$objformat
+  case $version_type in
+    freebsd-elf*)
+      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+      soname_spec='$libname$release$shared_ext$major'
+      need_version=no
+      need_lib_prefix=no
+      ;;
+    freebsd-*)
+      library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+      need_version=yes
+      ;;
+  esac
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_os in
+  freebsd2.*)
+    shlibpath_overrides_runpath=yes
+    ;;
+  freebsd3.[[01]]* | freebsdelf3.[[01]]*)
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \
+  freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1)
+    shlibpath_overrides_runpath=no
+    hardcode_into_libs=yes
+    ;;
+  *) # from 4.6 on, and DragonFly
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  esac
+  ;;
+
+haiku*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  dynamic_linker="$host_os runtime_loader"
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+  hardcode_into_libs=yes
+  ;;
+
+hpux9* | hpux10* | hpux11*)
+  # Give a soname corresponding to the major version so that dld.sl refuses to
+  # link against other versions.
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  case $host_cpu in
+  ia64*)
+    shrext_cmds='.so'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.so"
+    shlibpath_var=LD_LIBRARY_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    if test 32 = "$HPUX_IA64_MODE"; then
+      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+      sys_lib_dlsearch_path_spec=/usr/lib/hpux32
+    else
+      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+      sys_lib_dlsearch_path_spec=/usr/lib/hpux64
+    fi
+    ;;
+  hppa*64*)
+    shrext_cmds='.sl'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  *)
+    shrext_cmds='.sl'
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=SHLIB_PATH
+    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    ;;
+  esac
+  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+  postinstall_cmds='chmod 555 $lib'
+  # or fails outright, so override atomically:
+  install_override_mode=555
+  ;;
+
+interix[[3-9]]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $host_os in
+    nonstopux*) version_type=nonstopux ;;
+    *)
+	if test yes = "$lt_cv_prog_gnu_ld"; then
+		version_type=linux # correct to gnu/linux during the next big refactor
+	else
+		version_type=irix
+	fi ;;
+  esac
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='$libname$release$shared_ext$major'
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext'
+  case $host_os in
+  irix5* | nonstopux*)
+    libsuff= shlibsuff=
+    ;;
+  *)
+    case $LD in # libtool.m4 will add one of these switches to LD
+    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+      libsuff= shlibsuff= libmagic=32-bit;;
+    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+      libsuff=32 shlibsuff=N32 libmagic=N32;;
+    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+      libsuff=64 shlibsuff=64 libmagic=64-bit;;
+    *) libsuff= shlibsuff= libmagic=never-match;;
+    esac
+    ;;
+  esac
+  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff"
+  sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff"
+  hardcode_into_libs=yes
+  ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+  dynamic_linker=no
+  ;;
+
+linux*android*)
+  version_type=none # Android doesn't support versioned libraries.
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext'
+  soname_spec='$libname$release$shared_ext'
+  finish_cmds=
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  dynamic_linker='Android linker'
+  # Don't embed -rpath directories since the linker doesn't support them.
+  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+
+  # Some binutils ld are patched to set DT_RUNPATH
+  AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath],
+    [lt_cv_shlibpath_overrides_runpath=no
+    save_LDFLAGS=$LDFLAGS
+    save_libdir=$libdir
+    eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \
+	 LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\""
+    AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+      [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null],
+	 [lt_cv_shlibpath_overrides_runpath=yes])])
+    LDFLAGS=$save_LDFLAGS
+    libdir=$save_libdir
+    ])
+  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  # Ideally, we could use ldconfig to report *all* directores which are
+  # searched for libraries, however this is still not possible.  Aside from not
+  # being certain /sbin/ldconfig is available, command
+  # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64,
+  # even though it is searched at run-time.  Try to do the best guess by
+  # appending ld.so.conf contents (and includes) to the search path.
+  if test -f /etc/ld.so.conf; then
+    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+    sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+  fi
+
+  # We used to test for /lib/ld.so.1 and disable shared libraries on
+  # powerpc, because MkLinux only supported shared libraries with the
+  # GNU dynamic linker.  Since this was broken with cross compilers,
+  # most powerpc-linux boxes support dynamic linking these days and
+  # people can always --disable-shared, the test was removed, and we
+  # assume the GNU/Linux dynamic linker is in use.
+  dynamic_linker='GNU/Linux ld.so'
+  ;;
+
+netbsd*)
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+    dynamic_linker='NetBSD (a.out) ld.so'
+  else
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    dynamic_linker='NetBSD ld.elf_so'
+  fi
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  ;;
+
+newsos6)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+*nto* | *qnx*)
+  version_type=qnx
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='ldqnx.so'
+  ;;
+
+openbsd* | bitrig*)
+  version_type=sunos
+  sys_lib_dlsearch_path_spec=/usr/lib
+  need_lib_prefix=no
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+    need_version=no
+  else
+    need_version=yes
+  fi
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+os2*)
+  libname_spec='$name'
+  version_type=windows
+  shrext_cmds=.dll
+  need_version=no
+  need_lib_prefix=no
+  # OS/2 can only load a DLL with a base name of 8 characters or less.
+  soname_spec='`test -n "$os2dllname" && libname="$os2dllname";
+    v=$($ECHO $release$versuffix | tr -d .-);
+    n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _);
+    $ECHO $n$v`$shared_ext'
+  library_names_spec='${libname}_dll.$libext'
+  dynamic_linker='OS/2 ld.exe'
+  shlibpath_var=BEGINLIBPATH
+  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+  postinstall_cmds='base_file=`basename \$file`~
+    dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~
+    dldir=$destdir/`dirname \$dlpath`~
+    test -d \$dldir || mkdir -p \$dldir~
+    $install_prog $dir/$dlname \$dldir/$dlname~
+    chmod a+x \$dldir/$dlname~
+    if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+      eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+    fi'
+  postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~
+    dlpath=$dir/\$dldll~
+    $RM \$dlpath'
+  ;;
+
+osf3* | osf4* | osf5*)
+  version_type=osf
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='$libname$release$shared_ext$major'
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+  ;;
+
+rdos*)
+  dynamic_linker=no
+  ;;
+
+solaris*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  # ldd complains unless libraries are executable
+  postinstall_cmds='chmod +x $lib'
+  ;;
+
+sunos4*)
+  version_type=sunos
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  if test yes = "$with_gnu_ld"; then
+    need_lib_prefix=no
+  fi
+  need_version=yes
+  ;;
+
+sysv4 | sysv4.3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_vendor in
+    sni)
+      shlibpath_overrides_runpath=no
+      need_lib_prefix=no
+      runpath_var=LD_RUN_PATH
+      ;;
+    siemens)
+      need_lib_prefix=no
+      ;;
+    motorola)
+      need_lib_prefix=no
+      need_version=no
+      shlibpath_overrides_runpath=no
+      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+      ;;
+  esac
+  ;;
+
+sysv4*MP*)
+  if test -d /usr/nec; then
+    version_type=linux # correct to gnu/linux during the next big refactor
+    library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext'
+    soname_spec='$libname$shared_ext.$major'
+    shlibpath_var=LD_LIBRARY_PATH
+  fi
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  version_type=sco
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  if test yes = "$with_gnu_ld"; then
+    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+  else
+    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+    case $host_os in
+      sco3.2v5*)
+        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+	;;
+    esac
+  fi
+  sys_lib_dlsearch_path_spec='/usr/lib'
+  ;;
+
+tpf*)
+  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+uts4*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+*)
+  dynamic_linker=no
+  ;;
+esac
+AC_MSG_RESULT([$dynamic_linker])
+test no = "$dynamic_linker" && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test yes = "$GCC"; then
+  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then
+  sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec
+fi
+
+if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then
+  sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec
+fi
+
+# remember unaugmented sys_lib_dlsearch_path content for libtool script decls...
+configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec
+
+# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code
+func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH"
+
+# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool
+configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
+
+_LT_DECL([], [variables_saved_for_relink], [1],
+    [Variables whose values should be saved in libtool wrapper scripts and
+    restored at link time])
+_LT_DECL([], [need_lib_prefix], [0],
+    [Do we need the "lib" prefix for modules?])
+_LT_DECL([], [need_version], [0], [Do we need a version for libraries?])
+_LT_DECL([], [version_type], [0], [Library versioning type])
+_LT_DECL([], [runpath_var], [0],  [Shared library runtime path variable])
+_LT_DECL([], [shlibpath_var], [0],[Shared library path variable])
+_LT_DECL([], [shlibpath_overrides_runpath], [0],
+    [Is shlibpath searched before the hard-coded library search path?])
+_LT_DECL([], [libname_spec], [1], [Format of library name prefix])
+_LT_DECL([], [library_names_spec], [1],
+    [[List of archive names.  First name is the real one, the rest are links.
+    The last name is the one that the linker finds with -lNAME]])
+_LT_DECL([], [soname_spec], [1],
+    [[The coded name of the library, if different from the real name]])
+_LT_DECL([], [install_override_mode], [1],
+    [Permission mode override for installation of shared libraries])
+_LT_DECL([], [postinstall_cmds], [2],
+    [Command to use after installation of a shared archive])
+_LT_DECL([], [postuninstall_cmds], [2],
+    [Command to use after uninstallation of a shared archive])
+_LT_DECL([], [finish_cmds], [2],
+    [Commands used to finish a libtool library installation in a directory])
+_LT_DECL([], [finish_eval], [1],
+    [[As "finish_cmds", except a single script fragment to be evaled but
+    not shown]])
+_LT_DECL([], [hardcode_into_libs], [0],
+    [Whether we should hardcode library paths into libraries])
+_LT_DECL([], [sys_lib_search_path_spec], [2],
+    [Compile-time system search path for libraries])
+_LT_DECL([sys_lib_dlsearch_path_spec], [configure_time_dlsearch_path], [2],
+    [Detected run-time system search path for libraries])
+_LT_DECL([], [configure_time_lt_sys_library_path], [2],
+    [Explicit LT_SYS_LIBRARY_PATH set during ./configure time])
+])# _LT_SYS_DYNAMIC_LINKER
+
+
+# _LT_PATH_TOOL_PREFIX(TOOL)
+# --------------------------
+# find a file program that can recognize shared library
+AC_DEFUN([_LT_PATH_TOOL_PREFIX],
+[m4_require([_LT_DECL_EGREP])dnl
+AC_MSG_CHECKING([for $1])
+AC_CACHE_VAL(lt_cv_path_MAGIC_CMD,
+[case $MAGIC_CMD in
+[[\\/*] |  ?:[\\/]*])
+  lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path.
+  ;;
+*)
+  lt_save_MAGIC_CMD=$MAGIC_CMD
+  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+dnl $ac_dummy forces splitting on constant user-supplied paths.
+dnl POSIX.2 word splitting is done only on the output of word expansions,
+dnl not every word.  This closes a longstanding sh security hole.
+  ac_dummy="m4_if([$2], , $PATH, [$2])"
+  for ac_dir in $ac_dummy; do
+    IFS=$lt_save_ifs
+    test -z "$ac_dir" && ac_dir=.
+    if test -f "$ac_dir/$1"; then
+      lt_cv_path_MAGIC_CMD=$ac_dir/"$1"
+      if test -n "$file_magic_test_file"; then
+	case $deplibs_check_method in
+	"file_magic "*)
+	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+	  MAGIC_CMD=$lt_cv_path_MAGIC_CMD
+	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+	    $EGREP "$file_magic_regex" > /dev/null; then
+	    :
+	  else
+	    cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such.  This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem.  Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+_LT_EOF
+	  fi ;;
+	esac
+      fi
+      break
+    fi
+  done
+  IFS=$lt_save_ifs
+  MAGIC_CMD=$lt_save_MAGIC_CMD
+  ;;
+esac])
+MAGIC_CMD=$lt_cv_path_MAGIC_CMD
+if test -n "$MAGIC_CMD"; then
+  AC_MSG_RESULT($MAGIC_CMD)
+else
+  AC_MSG_RESULT(no)
+fi
+_LT_DECL([], [MAGIC_CMD], [0],
+	 [Used to examine libraries when file_magic_cmd begins with "file"])dnl
+])# _LT_PATH_TOOL_PREFIX
+
+# Old name:
+AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], [])
+
+
+# _LT_PATH_MAGIC
+# --------------
+# find a file program that can recognize a shared library
+m4_defun([_LT_PATH_MAGIC],
+[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH)
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+  if test -n "$ac_tool_prefix"; then
+    _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH)
+  else
+    MAGIC_CMD=:
+  fi
+fi
+])# _LT_PATH_MAGIC
+
+
+# LT_PATH_LD
+# ----------
+# find the pathname to the GNU or non-GNU linker
+AC_DEFUN([LT_PATH_LD],
+[AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_PROG_ECHO_BACKSLASH])dnl
+
+AC_ARG_WITH([gnu-ld],
+    [AS_HELP_STRING([--with-gnu-ld],
+	[assume the C compiler uses GNU ld @<:@default=no@:>@])],
+    [test no = "$withval" || with_gnu_ld=yes],
+    [with_gnu_ld=no])dnl
+
+ac_prog=ld
+if test yes = "$GCC"; then
+  # Check if gcc -print-prog-name=ld gives a path.
+  AC_MSG_CHECKING([for ld used by $CC])
+  case $host in
+  *-*-mingw*)
+    # gcc leaves a trailing carriage return, which upsets mingw
+    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+  *)
+    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+  esac
+  case $ac_prog in
+    # Accept absolute paths.
+    [[\\/]]* | ?:[[\\/]]*)
+      re_direlt='/[[^/]][[^/]]*/\.\./'
+      # Canonicalize the pathname of ld
+      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+      done
+      test -z "$LD" && LD=$ac_prog
+      ;;
+  "")
+    # If it fails, then pretend we aren't using GCC.
+    ac_prog=ld
+    ;;
+  *)
+    # If it is relative, then search for the first ld in PATH.
+    with_gnu_ld=unknown
+    ;;
+  esac
+elif test yes = "$with_gnu_ld"; then
+  AC_MSG_CHECKING([for GNU ld])
+else
+  AC_MSG_CHECKING([for non-GNU ld])
+fi
+AC_CACHE_VAL(lt_cv_path_LD,
+[if test -z "$LD"; then
+  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+  for ac_dir in $PATH; do
+    IFS=$lt_save_ifs
+    test -z "$ac_dir" && ac_dir=.
+    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+      lt_cv_path_LD=$ac_dir/$ac_prog
+      # Check to see if the program is GNU ld.  I'd rather use --version,
+      # but apparently some variants of GNU ld only accept -v.
+      # Break only if it was the GNU/non-GNU ld that we prefer.
+      case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+      *GNU* | *'with BFD'*)
+	test no != "$with_gnu_ld" && break
+	;;
+      *)
+	test yes != "$with_gnu_ld" && break
+	;;
+      esac
+    fi
+  done
+  IFS=$lt_save_ifs
+else
+  lt_cv_path_LD=$LD # Let the user override the test with a path.
+fi])
+LD=$lt_cv_path_LD
+if test -n "$LD"; then
+  AC_MSG_RESULT($LD)
+else
+  AC_MSG_RESULT(no)
+fi
+test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH])
+_LT_PATH_LD_GNU
+AC_SUBST([LD])
+
+_LT_TAGDECL([], [LD], [1], [The linker used to build libraries])
+])# LT_PATH_LD
+
+# Old names:
+AU_ALIAS([AM_PROG_LD], [LT_PATH_LD])
+AU_ALIAS([AC_PROG_LD], [LT_PATH_LD])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_PROG_LD], [])
+dnl AC_DEFUN([AC_PROG_LD], [])
+
+
+# _LT_PATH_LD_GNU
+#- --------------
+m4_defun([_LT_PATH_LD_GNU],
+[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld,
+[# I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+  lt_cv_prog_gnu_ld=yes
+  ;;
+*)
+  lt_cv_prog_gnu_ld=no
+  ;;
+esac])
+with_gnu_ld=$lt_cv_prog_gnu_ld
+])# _LT_PATH_LD_GNU
+
+
+# _LT_CMD_RELOAD
+# --------------
+# find reload flag for linker
+#   -- PORTME Some linkers may need a different reload flag.
+m4_defun([_LT_CMD_RELOAD],
+[AC_CACHE_CHECK([for $LD option to reload object files],
+  lt_cv_ld_reload_flag,
+  [lt_cv_ld_reload_flag='-r'])
+reload_flag=$lt_cv_ld_reload_flag
+case $reload_flag in
+"" | " "*) ;;
+*) reload_flag=" $reload_flag" ;;
+esac
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+case $host_os in
+  cygwin* | mingw* | pw32* | cegcc*)
+    if test yes != "$GCC"; then
+      reload_cmds=false
+    fi
+    ;;
+  darwin*)
+    if test yes = "$GCC"; then
+      reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs'
+    else
+      reload_cmds='$LD$reload_flag -o $output$reload_objs'
+    fi
+    ;;
+esac
+_LT_TAGDECL([], [reload_flag], [1], [How to create reloadable object files])dnl
+_LT_TAGDECL([], [reload_cmds], [2])dnl
+])# _LT_CMD_RELOAD
+
+
+# _LT_PATH_DD
+# -----------
+# find a working dd
+m4_defun([_LT_PATH_DD],
+[AC_CACHE_CHECK([for a working dd], [ac_cv_path_lt_DD],
+[printf 0123456789abcdef0123456789abcdef >conftest.i
+cat conftest.i conftest.i >conftest2.i
+: ${lt_DD:=$DD}
+AC_PATH_PROGS_FEATURE_CHECK([lt_DD], [dd],
+[if "$ac_path_lt_DD" bs=32 count=1 <conftest2.i >conftest.out 2>/dev/null; then
+  cmp -s conftest.i conftest.out \
+  && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=:
+fi])
+rm -f conftest.i conftest2.i conftest.out])
+])# _LT_PATH_DD
+
+
+# _LT_CMD_TRUNCATE
+# ----------------
+# find command to truncate a binary pipe
+m4_defun([_LT_CMD_TRUNCATE],
+[m4_require([_LT_PATH_DD])
+AC_CACHE_CHECK([how to truncate binary pipes], [lt_cv_truncate_bin],
+[printf 0123456789abcdef0123456789abcdef >conftest.i
+cat conftest.i conftest.i >conftest2.i
+lt_cv_truncate_bin=
+if "$ac_cv_path_lt_DD" bs=32 count=1 <conftest2.i >conftest.out 2>/dev/null; then
+  cmp -s conftest.i conftest.out \
+  && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1"
+fi
+rm -f conftest.i conftest2.i conftest.out
+test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"])
+_LT_DECL([lt_truncate_bin], [lt_cv_truncate_bin], [1],
+  [Command to truncate a binary pipe])
+])# _LT_CMD_TRUNCATE
+
+
+# _LT_CHECK_MAGIC_METHOD
+# ----------------------
+# how to check for library dependencies
+#  -- PORTME fill in with the dynamic library characteristics
+m4_defun([_LT_CHECK_MAGIC_METHOD],
+[m4_require([_LT_DECL_EGREP])
+m4_require([_LT_DECL_OBJDUMP])
+AC_CACHE_CHECK([how to recognize dependent libraries],
+lt_cv_deplibs_check_method,
+[lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# 'unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# that responds to the $file_magic_cmd with a given extended regex.
+# If you have 'file' or equivalent on your system and you're not sure
+# whether 'pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix[[4-9]]*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+beos*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+bsdi[[45]]*)
+  lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)'
+  lt_cv_file_magic_cmd='/usr/bin/file -L'
+  lt_cv_file_magic_test_file=/shlib/libc.so
+  ;;
+
+cygwin*)
+  # func_win32_libid is a shell function defined in ltmain.sh
+  lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+  lt_cv_file_magic_cmd='func_win32_libid'
+  ;;
+
+mingw* | pw32*)
+  # Base MSYS/MinGW do not provide the 'file' command needed by
+  # func_win32_libid shell function, so use a weaker test based on 'objdump',
+  # unless we find 'file', for example because we are cross-compiling.
+  if ( file / ) >/dev/null 2>&1; then
+    lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+    lt_cv_file_magic_cmd='func_win32_libid'
+  else
+    # Keep this pattern in sync with the one in func_win32_libid.
+    lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
+    lt_cv_file_magic_cmd='$OBJDUMP -f'
+  fi
+  ;;
+
+cegcc*)
+  # use the weaker test based on 'objdump'. See mingw*.
+  lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
+  lt_cv_file_magic_cmd='$OBJDUMP -f'
+  ;;
+
+darwin* | rhapsody*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+freebsd* | dragonfly*)
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+    case $host_cpu in
+    i*86 )
+      # Not sure whether the presence of OpenBSD here was a mistake.
+      # Let's accept both of them until this is cleared up.
+      lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library'
+      lt_cv_file_magic_cmd=/usr/bin/file
+      lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+      ;;
+    esac
+  else
+    lt_cv_deplibs_check_method=pass_all
+  fi
+  ;;
+
+haiku*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+hpux10.20* | hpux11*)
+  lt_cv_file_magic_cmd=/usr/bin/file
+  case $host_cpu in
+  ia64*)
+    lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64'
+    lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
+    ;;
+  hppa*64*)
+    [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]']
+    lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
+    ;;
+  *)
+    lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library'
+    lt_cv_file_magic_test_file=/usr/lib/libc.sl
+    ;;
+  esac
+  ;;
+
+interix[[3-9]]*)
+  # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
+  lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$'
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $LD in
+  *-32|*"-32 ") libmagic=32-bit;;
+  *-n32|*"-n32 ") libmagic=N32;;
+  *-64|*"-64 ") libmagic=64-bit;;
+  *) libmagic=never-match;;
+  esac
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+netbsd*)
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+  else
+    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$'
+  fi
+  ;;
+
+newos6*)
+  lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)'
+  lt_cv_file_magic_cmd=/usr/bin/file
+  lt_cv_file_magic_test_file=/usr/lib/libnls.so
+  ;;
+
+*nto* | *qnx*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+openbsd* | bitrig*)
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$'
+  else
+    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+  fi
+  ;;
+
+osf3* | osf4* | osf5*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+rdos*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+solaris*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+sysv4 | sysv4.3*)
+  case $host_vendor in
+  motorola)
+    lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]'
+    lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+    ;;
+  ncr)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  sequent)
+    lt_cv_file_magic_cmd='/bin/file'
+    lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )'
+    ;;
+  sni)
+    lt_cv_file_magic_cmd='/bin/file'
+    lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib"
+    lt_cv_file_magic_test_file=/lib/libc.so
+    ;;
+  siemens)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  pc)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  esac
+  ;;
+
+tpf*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+os2*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+esac
+])
+
+file_magic_glob=
+want_nocaseglob=no
+if test "$build" = "$host"; then
+  case $host_os in
+  mingw* | pw32*)
+    if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+      want_nocaseglob=yes
+    else
+      file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"`
+    fi
+    ;;
+  esac
+fi
+
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+_LT_DECL([], [deplibs_check_method], [1],
+    [Method to check whether dependent libraries are shared objects])
+_LT_DECL([], [file_magic_cmd], [1],
+    [Command to use when deplibs_check_method = "file_magic"])
+_LT_DECL([], [file_magic_glob], [1],
+    [How to find potential files when deplibs_check_method = "file_magic"])
+_LT_DECL([], [want_nocaseglob], [1],
+    [Find potential files using nocaseglob when deplibs_check_method = "file_magic"])
+])# _LT_CHECK_MAGIC_METHOD
+
+
+# LT_PATH_NM
+# ----------
+# find the pathname to a BSD- or MS-compatible name lister
+AC_DEFUN([LT_PATH_NM],
+[AC_REQUIRE([AC_PROG_CC])dnl
+AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM,
+[if test -n "$NM"; then
+  # Let the user override the test.
+  lt_cv_path_NM=$NM
+else
+  lt_nm_to_check=${ac_tool_prefix}nm
+  if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+    lt_nm_to_check="$lt_nm_to_check nm"
+  fi
+  for lt_tmp_nm in $lt_nm_to_check; do
+    lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+    for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+      IFS=$lt_save_ifs
+      test -z "$ac_dir" && ac_dir=.
+      tmp_nm=$ac_dir/$lt_tmp_nm
+      if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then
+	# Check to see if the nm accepts a BSD-compat flag.
+	# Adding the 'sed 1q' prevents false positives on HP-UX, which says:
+	#   nm: unknown option "B" ignored
+	# Tru64's nm complains that /dev/null is an invalid object file
+	# MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty
+	case $build_os in
+	mingw*) lt_bad_file=conftest.nm/nofile ;;
+	*) lt_bad_file=/dev/null ;;
+	esac
+	case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in
+	*$lt_bad_file* | *'Invalid file or object type'*)
+	  lt_cv_path_NM="$tmp_nm -B"
+	  break 2
+	  ;;
+	*)
+	  case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in
+	  */dev/null*)
+	    lt_cv_path_NM="$tmp_nm -p"
+	    break 2
+	    ;;
+	  *)
+	    lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+	    continue # so that we can try to find one that supports BSD flags
+	    ;;
+	  esac
+	  ;;
+	esac
+      fi
+    done
+    IFS=$lt_save_ifs
+  done
+  : ${lt_cv_path_NM=no}
+fi])
+if test no != "$lt_cv_path_NM"; then
+  NM=$lt_cv_path_NM
+else
+  # Didn't find any BSD compatible name lister, look for dumpbin.
+  if test -n "$DUMPBIN"; then :
+    # Let the user override the test.
+  else
+    AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :)
+    case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in
+    *COFF*)
+      DUMPBIN="$DUMPBIN -symbols -headers"
+      ;;
+    *)
+      DUMPBIN=:
+      ;;
+    esac
+  fi
+  AC_SUBST([DUMPBIN])
+  if test : != "$DUMPBIN"; then
+    NM=$DUMPBIN
+  fi
+fi
+test -z "$NM" && NM=nm
+AC_SUBST([NM])
+_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl
+
+AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface],
+  [lt_cv_nm_interface="BSD nm"
+  echo "int some_variable = 0;" > conftest.$ac_ext
+  (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD)
+  (eval "$ac_compile" 2>conftest.err)
+  cat conftest.err >&AS_MESSAGE_LOG_FD
+  (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD)
+  (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
+  cat conftest.err >&AS_MESSAGE_LOG_FD
+  (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD)
+  cat conftest.out >&AS_MESSAGE_LOG_FD
+  if $GREP 'External.*some_variable' conftest.out > /dev/null; then
+    lt_cv_nm_interface="MS dumpbin"
+  fi
+  rm -f conftest*])
+])# LT_PATH_NM
+
+# Old names:
+AU_ALIAS([AM_PROG_NM], [LT_PATH_NM])
+AU_ALIAS([AC_PROG_NM], [LT_PATH_NM])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_PROG_NM], [])
+dnl AC_DEFUN([AC_PROG_NM], [])
+
+# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+# --------------------------------
+# how to determine the name of the shared library
+# associated with a specific link library.
+#  -- PORTME fill in with the dynamic library characteristics
+m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB],
+[m4_require([_LT_DECL_EGREP])
+m4_require([_LT_DECL_OBJDUMP])
+m4_require([_LT_DECL_DLLTOOL])
+AC_CACHE_CHECK([how to associate runtime and link libraries],
+lt_cv_sharedlib_from_linklib_cmd,
+[lt_cv_sharedlib_from_linklib_cmd='unknown'
+
+case $host_os in
+cygwin* | mingw* | pw32* | cegcc*)
+  # two different shell functions defined in ltmain.sh;
+  # decide which one to use based on capabilities of $DLLTOOL
+  case `$DLLTOOL --help 2>&1` in
+  *--identify-strict*)
+    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+    ;;
+  *)
+    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
+    ;;
+  esac
+  ;;
+*)
+  # fallback: assume linklib IS sharedlib
+  lt_cv_sharedlib_from_linklib_cmd=$ECHO
+  ;;
+esac
+])
+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+
+_LT_DECL([], [sharedlib_from_linklib_cmd], [1],
+    [Command to associate shared and link libraries])
+])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+
+
+# _LT_PATH_MANIFEST_TOOL
+# ----------------------
+# locate the manifest tool
+m4_defun([_LT_PATH_MANIFEST_TOOL],
+[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :)
+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool],
+  [lt_cv_path_mainfest_tool=no
+  echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD
+  $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+  cat conftest.err >&AS_MESSAGE_LOG_FD
+  if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+    lt_cv_path_mainfest_tool=yes
+  fi
+  rm -f conftest*])
+if test yes != "$lt_cv_path_mainfest_tool"; then
+  MANIFEST_TOOL=:
+fi
+_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl
+])# _LT_PATH_MANIFEST_TOOL
+
+
+# _LT_DLL_DEF_P([FILE])
+# ---------------------
+# True iff FILE is a Windows DLL '.def' file.
+# Keep in sync with func_dll_def_p in the libtool script
+AC_DEFUN([_LT_DLL_DEF_P],
+[dnl
+  test DEF = "`$SED -n dnl
+    -e '\''s/^[[	 ]]*//'\'' dnl Strip leading whitespace
+    -e '\''/^\(;.*\)*$/d'\'' dnl      Delete empty lines and comments
+    -e '\''s/^\(EXPORTS\|LIBRARY\)\([[	 ]].*\)*$/DEF/p'\'' dnl
+    -e q dnl                          Only consider the first "real" line
+    $1`" dnl
+])# _LT_DLL_DEF_P
+
+
+# LT_LIB_M
+# --------
+# check for math library
+AC_DEFUN([LT_LIB_M],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+LIBM=
+case $host in
+*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*)
+  # These system don't have libm, or don't need it
+  ;;
+*-ncr-sysv4.3*)
+  AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM=-lmw)
+  AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm")
+  ;;
+*)
+  AC_CHECK_LIB(m, cos, LIBM=-lm)
+  ;;
+esac
+AC_SUBST([LIBM])
+])# LT_LIB_M
+
+# Old name:
+AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_CHECK_LIBM], [])
+
+
+# _LT_COMPILER_NO_RTTI([TAGNAME])
+# -------------------------------
+m4_defun([_LT_COMPILER_NO_RTTI],
+[m4_require([_LT_TAG_COMPILER])dnl
+
+_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+
+if test yes = "$GCC"; then
+  case $cc_basename in
+  nvcc*)
+    _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;;
+  *)
+    _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;;
+  esac
+
+  _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions],
+    lt_cv_prog_compiler_rtti_exceptions,
+    [-fno-rtti -fno-exceptions], [],
+    [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"])
+fi
+_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1],
+	[Compiler flag to turn off builtin functions])
+])# _LT_COMPILER_NO_RTTI
+
+
+# _LT_CMD_GLOBAL_SYMBOLS
+# ----------------------
+m4_defun([_LT_CMD_GLOBAL_SYMBOLS],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_PROG_AWK])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+AC_REQUIRE([LT_PATH_LD])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+AC_MSG_CHECKING([command to parse $NM output from $compiler object])
+AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe],
+[
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix.  What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[[BCDEGRST]]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+  symcode='[[BCDT]]'
+  ;;
+cygwin* | mingw* | pw32* | cegcc*)
+  symcode='[[ABCDGISTW]]'
+  ;;
+hpux*)
+  if test ia64 = "$host_cpu"; then
+    symcode='[[ABCDEGRST]]'
+  fi
+  ;;
+irix* | nonstopux*)
+  symcode='[[BCDEGRST]]'
+  ;;
+osf*)
+  symcode='[[BCDEGQRST]]'
+  ;;
+solaris*)
+  symcode='[[BDRT]]'
+  ;;
+sco3.2v5*)
+  symcode='[[DT]]'
+  ;;
+sysv4.2uw2*)
+  symcode='[[DT]]'
+  ;;
+sysv5* | sco5v6* | unixware* | OpenUNIX*)
+  symcode='[[ABDT]]'
+  ;;
+sysv4)
+  symcode='[[DFNSTU]]'
+  ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+case `$NM -V 2>&1` in
+*GNU* | *'with BFD'*)
+  symcode='[[ABCDGIRSTW]]' ;;
+esac
+
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+  # Gets list of data symbols to import.
+  lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'"
+  # Adjust the below global symbol transforms to fixup imported variables.
+  lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'"
+  lt_c_name_hook=" -e 's/^I .* \(.*\)$/  {\"\1\", (void *) 0},/p'"
+  lt_c_name_lib_hook="\
+  -e 's/^I .* \(lib.*\)$/  {\"\1\", (void *) 0},/p'\
+  -e 's/^I .* \(.*\)$/  {\"lib\1\", (void *) 0},/p'"
+else
+  # Disable hooks by default.
+  lt_cv_sys_global_symbol_to_import=
+  lt_cdecl_hook=
+  lt_c_name_hook=
+  lt_c_name_lib_hook=
+fi
+
+# Transform an extracted symbol line into a proper C declaration.
+# Some systems (esp. on ia64) link data and code symbols differently,
+# so use this general approach.
+lt_cv_sys_global_symbol_to_cdecl="sed -n"\
+$lt_cdecl_hook\
+" -e 's/^T .* \(.*\)$/extern int \1();/p'"\
+" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_sys_global_symbol_to_c_name_address="sed -n"\
+$lt_c_name_hook\
+" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
+" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/p'"
+
+# Transform an extracted symbol line into symbol name with lib prefix and
+# symbol address.
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\
+$lt_c_name_lib_hook\
+" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
+" -e 's/^$symcode$symcode* .* \(lib.*\)$/  {\"\1\", (void *) \&\1},/p'"\
+" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"lib\1\", (void *) \&\1},/p'"
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $build_os in
+mingw*)
+  opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+  ;;
+esac
+
+# Try without a prefix underscore, then with it.
+for ac_symprfx in "" "_"; do
+
+  # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+  symxfrm="\\1 $ac_symprfx\\2 \\2"
+
+  # Write the raw and C identifiers.
+  if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+    # Fake it for dumpbin and say T for any non-static function,
+    # D for any global variable and I for any imported variable.
+    # Also find C++ and __fastcall symbols from MSVC++,
+    # which start with @ or ?.
+    lt_cv_sys_global_symbol_pipe="$AWK ['"\
+"     {last_section=section; section=\$ 3};"\
+"     /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
+"     /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+"     /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\
+"     /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\
+"     /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\
+"     \$ 0!~/External *\|/{next};"\
+"     / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+"     {if(hide[section]) next};"\
+"     {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\
+"     {split(\$ 0,a,/\||\r/); split(a[2],s)};"\
+"     s[1]~/^[@?]/{print f,s[1],s[1]; next};"\
+"     s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\
+"     ' prfx=^$ac_symprfx]"
+  else
+    lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[	 ]]\($symcode$symcode*\)[[	 ]][[	 ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+  fi
+  lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'"
+
+  # Check to see that the pipe works correctly.
+  pipe_works=no
+
+  rm -f conftest*
+  cat > conftest.$ac_ext <<_LT_EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(void);
+void nm_test_func(void){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+_LT_EOF
+
+  if AC_TRY_EVAL(ac_compile); then
+    # Now try to grab the symbols.
+    nlist=conftest.nm
+    if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then
+      # Try sorting and uniquifying the output.
+      if sort "$nlist" | uniq > "$nlist"T; then
+	mv -f "$nlist"T "$nlist"
+      else
+	rm -f "$nlist"T
+      fi
+
+      # Make sure that we snagged all the symbols we need.
+      if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+	if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+	  cat <<_LT_EOF > conftest.$ac_ext
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
+#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE
+/* DATA imports from DLLs on WIN32 can't be const, because runtime
+   relocations are performed -- see ld's documentation on pseudo-relocs.  */
+# define LT@&t@_DLSYM_CONST
+#elif defined __osf__
+/* This system does not cope well with relocations in const data.  */
+# define LT@&t@_DLSYM_CONST
+#else
+# define LT@&t@_DLSYM_CONST const
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_LT_EOF
+	  # Now generate the symbol file.
+	  eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
+
+	  cat <<_LT_EOF >> conftest.$ac_ext
+
+/* The mapping between symbol names and symbols.  */
+LT@&t@_DLSYM_CONST struct {
+  const char *name;
+  void       *address;
+}
+lt__PROGRAM__LTX_preloaded_symbols[[]] =
+{
+  { "@PROGRAM@", (void *) 0 },
+_LT_EOF
+	  $SED "s/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+	  cat <<\_LT_EOF >> conftest.$ac_ext
+  {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+  return lt__PROGRAM__LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+_LT_EOF
+	  # Now try linking the two files.
+	  mv conftest.$ac_objext conftstm.$ac_objext
+	  lt_globsym_save_LIBS=$LIBS
+	  lt_globsym_save_CFLAGS=$CFLAGS
+	  LIBS=conftstm.$ac_objext
+	  CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)"
+	  if AC_TRY_EVAL(ac_link) && test -s conftest$ac_exeext; then
+	    pipe_works=yes
+	  fi
+	  LIBS=$lt_globsym_save_LIBS
+	  CFLAGS=$lt_globsym_save_CFLAGS
+	else
+	  echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD
+	fi
+      else
+	echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD
+      fi
+    else
+      echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD
+    fi
+  else
+    echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD
+    cat conftest.$ac_ext >&5
+  fi
+  rm -rf conftest* conftst*
+
+  # Do not use the global_symbol_pipe unless it works.
+  if test yes = "$pipe_works"; then
+    break
+  else
+    lt_cv_sys_global_symbol_pipe=
+  fi
+done
+])
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+  lt_cv_sys_global_symbol_to_cdecl=
+fi
+if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+  AC_MSG_RESULT(failed)
+else
+  AC_MSG_RESULT(ok)
+fi
+
+# Response file support.
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+  nm_file_list_spec='@'
+elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then
+  nm_file_list_spec='@'
+fi
+
+_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1],
+    [Take the output of nm and produce a listing of raw symbols and C names])
+_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1],
+    [Transform the output of nm in a proper C declaration])
+_LT_DECL([global_symbol_to_import], [lt_cv_sys_global_symbol_to_import], [1],
+    [Transform the output of nm into a list of symbols to manually relocate])
+_LT_DECL([global_symbol_to_c_name_address],
+    [lt_cv_sys_global_symbol_to_c_name_address], [1],
+    [Transform the output of nm in a C name address pair])
+_LT_DECL([global_symbol_to_c_name_address_lib_prefix],
+    [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1],
+    [Transform the output of nm in a C name address pair when lib prefix is needed])
+_LT_DECL([nm_interface], [lt_cv_nm_interface], [1],
+    [The name lister interface])
+_LT_DECL([], [nm_file_list_spec], [1],
+    [Specify filename containing input files for $NM])
+]) # _LT_CMD_GLOBAL_SYMBOLS
+
+
+# _LT_COMPILER_PIC([TAGNAME])
+# ---------------------------
+m4_defun([_LT_COMPILER_PIC],
+[m4_require([_LT_TAG_COMPILER])dnl
+_LT_TAGVAR(lt_prog_compiler_wl, $1)=
+_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+_LT_TAGVAR(lt_prog_compiler_static, $1)=
+
+m4_if([$1], [CXX], [
+  # C++ specific cases for pic, static, wl, etc.
+  if test yes = "$GXX"; then
+    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+
+    case $host_os in
+    aix*)
+      # All AIX code is PIC.
+      if test ia64 = "$host_cpu"; then
+	# AIX 5 now supports IA64 processor
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      fi
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+        ;;
+      m68k)
+            # FIXME: we need at least 68020 code to build shared libraries, but
+            # adding the '-m68020' flag to GCC prevents building anything better,
+            # like '-m68040'.
+            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+        ;;
+      esac
+      ;;
+
+    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+      # PIC is the default for these OSes.
+      ;;
+    mingw* | cygwin* | os2* | pw32* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+      # (--disable-auto-import) libraries
+      m4_if([$1], [GCJ], [],
+	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+      case $host_os in
+      os2*)
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static'
+	;;
+      esac
+      ;;
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+      ;;
+    *djgpp*)
+      # DJGPP does not support shared libraries at all
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+      ;;
+    haiku*)
+      # PIC is the default for Haiku.
+      # The "-static" flag exists, but is broken.
+      _LT_TAGVAR(lt_prog_compiler_static, $1)=
+      ;;
+    interix[[3-9]]*)
+      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+      # Instead, we relocate shared libraries at runtime.
+      ;;
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+      fi
+      ;;
+    hpux*)
+      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+      # sets the default TLS model and affects inlining.
+      case $host_cpu in
+      hppa*64*)
+	;;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	;;
+      esac
+      ;;
+    *qnx* | *nto*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+      ;;
+    *)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+      ;;
+    esac
+  else
+    case $host_os in
+      aix[[4-9]]*)
+	# All AIX code is PIC.
+	if test ia64 = "$host_cpu"; then
+	  # AIX 5 now supports IA64 processor
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	else
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+	fi
+	;;
+      chorus*)
+	case $cc_basename in
+	cxch68*)
+	  # Green Hills C++ Compiler
+	  # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
+	  ;;
+	esac
+	;;
+      mingw* | cygwin* | os2* | pw32* | cegcc*)
+	# This hack is so that the source file can tell whether it is being
+	# built for inclusion in a dll (and should export symbols for example).
+	m4_if([$1], [GCJ], [],
+	  [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+	;;
+      dgux*)
+	case $cc_basename in
+	  ec++*)
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    ;;
+	  ghcx*)
+	    # Green Hills C++ Compiler
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      freebsd* | dragonfly*)
+	# FreeBSD uses GNU C++
+	;;
+      hpux9* | hpux10* | hpux11*)
+	case $cc_basename in
+	  CC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive'
+	    if test ia64 != "$host_cpu"; then
+	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+	    fi
+	    ;;
+	  aCC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive'
+	    case $host_cpu in
+	    hppa*64*|ia64*)
+	      # +Z the default
+	      ;;
+	    *)
+	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+	      ;;
+	    esac
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      interix*)
+	# This is c89, which is MS Visual C++ (no shared libs)
+	# Anyone wants to do a port?
+	;;
+      irix5* | irix6* | nonstopux*)
+	case $cc_basename in
+	  CC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+	    # CC pic flag -KPIC is the default.
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+	case $cc_basename in
+	  KCC*)
+	    # KAI C++ Compiler
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	    ;;
+	  ecpc* )
+	    # old Intel C++ for x86_64, which still supported -KPIC.
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+	    ;;
+	  icpc* )
+	    # Intel C++, used to be incompatible with GCC.
+	    # ICC 10 doesn't accept -KPIC any more.
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+	    ;;
+	  pgCC* | pgcpp*)
+	    # Portland Group C++ compiler
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	    ;;
+	  cxx*)
+	    # Compaq C++
+	    # Make sure the PIC flag is empty.  It appears that all Alpha
+	    # Linux and Compaq Tru64 Unix objects are PIC.
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+	    ;;
+	  xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*)
+	    # IBM XL 8.0, 9.0 on PPC and BlueGene
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+	    ;;
+	  *)
+	    case `$CC -V 2>&1 | sed 5q` in
+	    *Sun\ C*)
+	      # Sun C++ 5.9
+	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+	      ;;
+	    esac
+	    ;;
+	esac
+	;;
+      lynxos*)
+	;;
+      m88k*)
+	;;
+      mvs*)
+	case $cc_basename in
+	  cxx*)
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      netbsd*)
+	;;
+      *qnx* | *nto*)
+        # QNX uses GNU C++, but need to define -shared option too, otherwise
+        # it will coredump.
+        _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+        ;;
+      osf3* | osf4* | osf5*)
+	case $cc_basename in
+	  KCC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+	    ;;
+	  RCC*)
+	    # Rational C++ 2.4.1
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+	    ;;
+	  cxx*)
+	    # Digital/Compaq C++
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    # Make sure the PIC flag is empty.  It appears that all Alpha
+	    # Linux and Compaq Tru64 Unix objects are PIC.
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      psos*)
+	;;
+      solaris*)
+	case $cc_basename in
+	  CC* | sunCC*)
+	    # Sun C++ 4.2, 5.x and Centerline C++
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+	    ;;
+	  gcx*)
+	    # Green Hills C++ Compiler
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      sunos4*)
+	case $cc_basename in
+	  CC*)
+	    # Sun C++ 4.x
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	    ;;
+	  lcc*)
+	    # Lucid
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+	case $cc_basename in
+	  CC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	    ;;
+	esac
+	;;
+      tandem*)
+	case $cc_basename in
+	  NCC*)
+	    # NonStop-UX NCC 3.20
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      vxworks*)
+	;;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+	;;
+    esac
+  fi
+],
+[
+  if test yes = "$GCC"; then
+    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+
+    case $host_os in
+      aix*)
+      # All AIX code is PIC.
+      if test ia64 = "$host_cpu"; then
+	# AIX 5 now supports IA64 processor
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      fi
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+        ;;
+      m68k)
+            # FIXME: we need at least 68020 code to build shared libraries, but
+            # adding the '-m68020' flag to GCC prevents building anything better,
+            # like '-m68040'.
+            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+        ;;
+      esac
+      ;;
+
+    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+      # PIC is the default for these OSes.
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+      # (--disable-auto-import) libraries
+      m4_if([$1], [GCJ], [],
+	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+      case $host_os in
+      os2*)
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static'
+	;;
+      esac
+      ;;
+
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+      ;;
+
+    haiku*)
+      # PIC is the default for Haiku.
+      # The "-static" flag exists, but is broken.
+      _LT_TAGVAR(lt_prog_compiler_static, $1)=
+      ;;
+
+    hpux*)
+      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+      # sets the default TLS model and affects inlining.
+      case $host_cpu in
+      hppa*64*)
+	# +Z the default
+	;;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	;;
+      esac
+      ;;
+
+    interix[[3-9]]*)
+      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+      # Instead, we relocate shared libraries at runtime.
+      ;;
+
+    msdosdjgpp*)
+      # Just because we use GCC doesn't mean we suddenly get shared libraries
+      # on systems that don't support them.
+      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+      enable_shared=no
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+      fi
+      ;;
+
+    *)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+      ;;
+    esac
+
+    case $cc_basename in
+    nvcc*) # Cuda Compiler Driver 2.2
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker '
+      if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+        _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)"
+      fi
+      ;;
+    esac
+  else
+    # PORTME Check for flag to pass linker flags through the system compiler.
+    case $host_os in
+    aix*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      if test ia64 = "$host_cpu"; then
+	# AIX 5 now supports IA64 processor
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      else
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+      fi
+      ;;
+
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+      case $cc_basename in
+      nagfor*)
+        # NAG Fortran compiler
+        _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,'
+        _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+        _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+        ;;
+      esac
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      m4_if([$1], [GCJ], [],
+	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+      case $host_os in
+      os2*)
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static'
+	;;
+      esac
+      ;;
+
+    hpux9* | hpux10* | hpux11*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+      # not for PA HP-UX.
+      case $host_cpu in
+      hppa*64*|ia64*)
+	# +Z the default
+	;;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+	;;
+      esac
+      # Is there a better lt_prog_compiler_static that works with the bundled CC?
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive'
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      # PIC (with -KPIC) is the default.
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+      ;;
+
+    linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+      case $cc_basename in
+      # old Intel for x86_64, which still supported -KPIC.
+      ecc*)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+        ;;
+      # icc used to be incompatible with GCC.
+      # ICC 10 doesn't accept -KPIC any more.
+      icc* | ifort*)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+        ;;
+      # Lahey Fortran 8.1.
+      lf95*)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='--static'
+	;;
+      nagfor*)
+	# NAG Fortran compiler
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	;;
+      tcc*)
+	# Fabrice Bellard et al's Tiny C Compiler
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+	;;
+      pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
+        # Portland Group compilers (*not* the Pentium gcc compiler,
+	# which looks to be a dead project)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+        ;;
+      ccc*)
+        _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+        # All Alpha code is PIC.
+        _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+        ;;
+      xl* | bgxl* | bgf* | mpixl*)
+	# IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+	;;
+      *)
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*)
+	  # Sun Fortran 8.3 passes all unrecognized flags to the linker
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)=''
+	  ;;
+	*Sun\ F* | *Sun*Fortran*)
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+	  ;;
+	*Sun\ C*)
+	  # Sun C 5.9
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	  ;;
+        *Intel*\ [[CF]]*Compiler*)
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+	  ;;
+	*Portland\ Group*)
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	  ;;
+	esac
+	;;
+      esac
+      ;;
+
+    newsos6)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+      ;;
+
+    osf3* | osf4* | osf5*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      # All OSF/1 code is PIC.
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+      ;;
+
+    rdos*)
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+      ;;
+
+    solaris*)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      case $cc_basename in
+      f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';;
+      esac
+      ;;
+
+    sunos4*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    sysv4 | sysv4.2uw2* | sysv4.3*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      fi
+      ;;
+
+    sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    unicos*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+      ;;
+
+    uts4*)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    *)
+      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+      ;;
+    esac
+  fi
+])
+case $host_os in
+  # For platforms that do not support PIC, -DPIC is meaningless:
+  *djgpp*)
+    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+    ;;
+  *)
+    _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])"
+    ;;
+esac
+
+AC_CACHE_CHECK([for $compiler option to produce PIC],
+  [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)],
+  [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)])
+_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+  _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works],
+    [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)],
+    [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [],
+    [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in
+     "" | " "*) ;;
+     *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;;
+     esac],
+    [_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+     _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no])
+fi
+_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1],
+	[Additional compiler flags for building library objects])
+
+_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1],
+	[How to pass a linker flag through the compiler])
+#
+# Check to make sure the static flag actually works.
+#
+wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\"
+_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works],
+  _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1),
+  $lt_tmp_static_flag,
+  [],
+  [_LT_TAGVAR(lt_prog_compiler_static, $1)=])
+_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1],
+	[Compiler flag to prevent dynamic linking])
+])# _LT_COMPILER_PIC
+
+
+# _LT_LINKER_SHLIBS([TAGNAME])
+# ----------------------------
+# See if the linker supports building shared libraries.
+m4_defun([_LT_LINKER_SHLIBS],
+[AC_REQUIRE([LT_PATH_LD])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+m4_require([_LT_PATH_MANIFEST_TOOL])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+m4_if([$1], [CXX], [
+  _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+  _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+  case $host_os in
+  aix[[4-9]]*)
+    # If we're using GNU nm, then we don't want the "-C" option.
+    # -C means demangle to GNU nm, but means don't demangle to AIX nm.
+    # Without the "-l" option, or with the "-B" option, AIX nm treats
+    # weak defined symbols like other global defined symbols, whereas
+    # GNU nm marks them as "W".
+    # While the 'weak' keyword is ignored in the Export File, we need
+    # it in the Import File for the 'aix-soname' feature, so we have
+    # to replace the "-B" option with "-P" for AIX nm.
+    if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+      _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
+    else
+      _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
+    fi
+    ;;
+  pw32*)
+    _LT_TAGVAR(export_symbols_cmds, $1)=$ltdll_cmds
+    ;;
+  cygwin* | mingw* | cegcc*)
+    case $cc_basename in
+    cl*)
+      _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+      ;;
+    *)
+      _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+      _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
+      ;;
+    esac
+    ;;
+  *)
+    _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+    ;;
+  esac
+], [
+  runpath_var=
+  _LT_TAGVAR(allow_undefined_flag, $1)=
+  _LT_TAGVAR(always_export_symbols, $1)=no
+  _LT_TAGVAR(archive_cmds, $1)=
+  _LT_TAGVAR(archive_expsym_cmds, $1)=
+  _LT_TAGVAR(compiler_needs_object, $1)=no
+  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+  _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+  _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+  _LT_TAGVAR(hardcode_automatic, $1)=no
+  _LT_TAGVAR(hardcode_direct, $1)=no
+  _LT_TAGVAR(hardcode_direct_absolute, $1)=no
+  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+  _LT_TAGVAR(hardcode_libdir_separator, $1)=
+  _LT_TAGVAR(hardcode_minus_L, $1)=no
+  _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+  _LT_TAGVAR(inherit_rpath, $1)=no
+  _LT_TAGVAR(link_all_deplibs, $1)=unknown
+  _LT_TAGVAR(module_cmds, $1)=
+  _LT_TAGVAR(module_expsym_cmds, $1)=
+  _LT_TAGVAR(old_archive_from_new_cmds, $1)=
+  _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)=
+  _LT_TAGVAR(thread_safe_flag_spec, $1)=
+  _LT_TAGVAR(whole_archive_flag_spec, $1)=
+  # include_expsyms should be a list of space-separated symbols to be *always*
+  # included in the symbol list
+  _LT_TAGVAR(include_expsyms, $1)=
+  # exclude_expsyms can be an extended regexp of symbols to exclude
+  # it will be wrapped by ' (' and ')$', so one must not match beginning or
+  # end of line.  Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc',
+  # as well as any symbol that contains 'd'.
+  _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+  # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+  # platforms (ab)use it in PIC code, but their linkers get confused if
+  # the symbol is explicitly referenced.  Since portable code cannot
+  # rely on this symbol name, it's probably fine to never include it in
+  # preloaded symbol tables.
+  # Exclude shared library initialization/finalization symbols.
+dnl Note also adjust exclude_expsyms for C++ above.
+  extract_expsyms_cmds=
+
+  case $host_os in
+  cygwin* | mingw* | pw32* | cegcc*)
+    # FIXME: the MSVC++ port hasn't been tested in a loooong time
+    # When not using gcc, we currently assume that we are using
+    # Microsoft Visual C++.
+    if test yes != "$GCC"; then
+      with_gnu_ld=no
+    fi
+    ;;
+  interix*)
+    # we just hope/assume this is gcc and not c89 (= MSVC++)
+    with_gnu_ld=yes
+    ;;
+  openbsd* | bitrig*)
+    with_gnu_ld=no
+    ;;
+  esac
+
+  _LT_TAGVAR(ld_shlibs, $1)=yes
+
+  # On some targets, GNU ld is compatible enough with the native linker
+  # that we're better off using the native interface for both.
+  lt_use_gnu_ld_interface=no
+  if test yes = "$with_gnu_ld"; then
+    case $host_os in
+      aix*)
+	# The AIX port of GNU ld has always aspired to compatibility
+	# with the native linker.  However, as the warning in the GNU ld
+	# block says, versions before 2.19.5* couldn't really create working
+	# shared libraries, regardless of the interface used.
+	case `$LD -v 2>&1` in
+	  *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+	  *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;;
+	  *\ \(GNU\ Binutils\)\ [[3-9]]*) ;;
+	  *)
+	    lt_use_gnu_ld_interface=yes
+	    ;;
+	esac
+	;;
+      *)
+	lt_use_gnu_ld_interface=yes
+	;;
+    esac
+  fi
+
+  if test yes = "$lt_use_gnu_ld_interface"; then
+    # If archive_cmds runs LD, not CC, wlarc should be empty
+    wlarc='$wl'
+
+    # Set some defaults for GNU ld with shared library support. These
+    # are reset later if shared libraries are not supported. Putting them
+    # here allows them to be overridden if necessary.
+    runpath_var=LD_RUN_PATH
+    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+    # ancient GNU ld didn't support --whole-archive et. al.
+    if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+      _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
+    else
+      _LT_TAGVAR(whole_archive_flag_spec, $1)=
+    fi
+    supports_anon_versioning=no
+    case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in
+      *GNU\ gold*) supports_anon_versioning=yes ;;
+      *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11
+      *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+      *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+      *\ 2.11.*) ;; # other 2.11 versions
+      *) supports_anon_versioning=yes ;;
+    esac
+
+    # See if GNU ld supports shared libraries.
+    case $host_os in
+    aix[[3-9]]*)
+      # On AIX/PPC, the GNU linker is very broken
+      if test ia64 != "$host_cpu"; then
+	_LT_TAGVAR(ld_shlibs, $1)=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.19, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support.  If you
+*** really care for shared libraries, you may want to install binutils
+*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+*** You will then need to restart the configuration process.
+
+_LT_EOF
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+            _LT_TAGVAR(archive_expsym_cmds, $1)=''
+        ;;
+      m68k)
+            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+            _LT_TAGVAR(hardcode_minus_L, $1)=yes
+        ;;
+      esac
+      ;;
+
+    beos*)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	# Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+	# support --undefined.  This deserves some investigation.  FIXME
+	_LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+      # as there is no search path for DLLs.
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols'
+      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+      _LT_TAGVAR(always_export_symbols, $1)=no
+      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+      _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+      _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
+
+      if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	# If the export-symbols file already is a .def file, use it as
+	# is; otherwise, prepend EXPORTS...
+	_LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
+          cp $export_symbols $output_objdir/$soname.def;
+        else
+          echo EXPORTS > $output_objdir/$soname.def;
+          cat $export_symbols >> $output_objdir/$soname.def;
+        fi~
+        $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    haiku*)
+      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      ;;
+
+    os2*)
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+      shrext_cmds=.dll
+      _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	$ECHO EXPORTS >> $output_objdir/$libname.def~
+	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	emximp -o $lib $output_objdir/$libname.def'
+      _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	$ECHO EXPORTS >> $output_objdir/$libname.def~
+	prefix_cmds="$SED"~
+	if test EXPORTS = "`$SED 1q $export_symbols`"; then
+	  prefix_cmds="$prefix_cmds -e 1d";
+	fi~
+	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	emximp -o $lib $output_objdir/$libname.def'
+      _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+      ;;
+
+    interix[[3-9]]*)
+      _LT_TAGVAR(hardcode_direct, $1)=no
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+      # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+      # Instead, shared libraries are loaded at an image base (0x10000000 by
+      # default) and relocated if they conflict, which is a slow very memory
+      # consuming and fragmenting process.  To avoid this, we pick a random,
+      # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+      # time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      ;;
+
+    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
+      tmp_diet=no
+      if test linux-dietlibc = "$host_os"; then
+	case $cc_basename in
+	  diet\ *) tmp_diet=yes;;	# linux-dietlibc with static linking (!diet-dyn)
+	esac
+      fi
+      if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+	 && test no = "$tmp_diet"
+      then
+	tmp_addflag=' $pic_flag'
+	tmp_sharedflag='-shared'
+	case $cc_basename,$host_cpu in
+        pgcc*)				# Portland Group C compiler
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	  tmp_addflag=' $pic_flag'
+	  ;;
+	pgf77* | pgf90* | pgf95* | pgfortran*)
+					# Portland Group f77 and f90 compilers
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	  tmp_addflag=' $pic_flag -Mnomain' ;;
+	ecc*,ia64* | icc*,ia64*)	# Intel C compiler on ia64
+	  tmp_addflag=' -i_dynamic' ;;
+	efc*,ia64* | ifort*,ia64*)	# Intel Fortran compiler on ia64
+	  tmp_addflag=' -i_dynamic -nofor_main' ;;
+	ifc* | ifort*)			# Intel Fortran compiler
+	  tmp_addflag=' -nofor_main' ;;
+	lf95*)				# Lahey Fortran 8.1
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)=
+	  tmp_sharedflag='--shared' ;;
+        nagfor*)                        # NAGFOR 5.3
+          tmp_sharedflag='-Wl,-shared' ;;
+	xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+	  tmp_sharedflag='-qmkshrobj'
+	  tmp_addflag= ;;
+	nvcc*)	# Cuda Compiler Driver 2.2
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	  _LT_TAGVAR(compiler_needs_object, $1)=yes
+	  ;;
+	esac
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ C*)			# Sun C 5.9
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	  _LT_TAGVAR(compiler_needs_object, $1)=yes
+	  tmp_sharedflag='-G' ;;
+	*Sun\ F*)			# Sun Fortran 8.3
+	  tmp_sharedflag='-G' ;;
+	esac
+	_LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+
+        if test yes = "$supports_anon_versioning"; then
+          _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+            cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+            echo "local: *; };" >> $output_objdir/$libname.ver~
+            $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
+        fi
+
+	case $cc_basename in
+	tcc*)
+	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='-rdynamic'
+	  ;;
+	xlf* | bgf* | bgxlf* | mpixlf*)
+	  # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive'
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
+	  if test yes = "$supports_anon_versioning"; then
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+              cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+              echo "local: *; };" >> $output_objdir/$libname.ver~
+              $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+	  fi
+	  ;;
+	esac
+      else
+        _LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    netbsd*)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+	wlarc=
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+      fi
+      ;;
+
+    solaris*)
+      if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+	_LT_TAGVAR(ld_shlibs, $1)=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+      elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+      case `$LD -v 2>&1` in
+        *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*)
+	_LT_TAGVAR(ld_shlibs, $1)=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot
+*** reliably create shared libraries on SCO systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.16.91.0.3 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+	;;
+	*)
+	  # For security reasons, it is highly recommended that you always
+	  # use absolute paths for naming shared libraries, and exclude the
+	  # DT_RUNPATH tag from executables and libraries.  But doing so
+	  # requires that you compile everything twice, which is a pain.
+	  if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+	  else
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	  fi
+	;;
+      esac
+      ;;
+
+    sunos4*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      wlarc=
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    *)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+    esac
+
+    if test no = "$_LT_TAGVAR(ld_shlibs, $1)"; then
+      runpath_var=
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+      _LT_TAGVAR(whole_archive_flag_spec, $1)=
+    fi
+  else
+    # PORTME fill in a description of your system's linker (not GNU ld)
+    case $host_os in
+    aix3*)
+      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+      _LT_TAGVAR(always_export_symbols, $1)=yes
+      _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+      # Note: this linker hardcodes the directories in LIBPATH if there
+      # are no directories specified by -L.
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then
+	# Neither direct hardcoding nor static linking is supported with a
+	# broken collect2.
+	_LT_TAGVAR(hardcode_direct, $1)=unsupported
+      fi
+      ;;
+
+    aix[[4-9]]*)
+      if test ia64 = "$host_cpu"; then
+	# On IA64, the linker does run time linking by default, so we don't
+	# have to do anything special.
+	aix_use_runtimelinking=no
+	exp_sym_flag='-Bexport'
+	no_entry_flag=
+      else
+	# If we're using GNU nm, then we don't want the "-C" option.
+	# -C means demangle to GNU nm, but means don't demangle to AIX nm.
+	# Without the "-l" option, or with the "-B" option, AIX nm treats
+	# weak defined symbols like other global defined symbols, whereas
+	# GNU nm marks them as "W".
+	# While the 'weak' keyword is ignored in the Export File, we need
+	# it in the Import File for the 'aix-soname' feature, so we have
+	# to replace the "-B" option with "-P" for AIX nm.
+	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+	  _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
+	else
+	  _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
+	fi
+	aix_use_runtimelinking=no
+
+	# Test if we are trying to use run time linking or normal
+	# AIX style linking. If -brtl is somewhere in LDFLAGS, we
+	# have runtime linking enabled, and use it for executables.
+	# For shared libraries, we enable/disable runtime linking
+	# depending on the kind of the shared library created -
+	# when "with_aix_soname,aix_use_runtimelinking" is:
+	# "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
+	# "aix,yes"  lib.so          shared, rtl:yes, for executables
+	#            lib.a           static archive
+	# "both,no"  lib.so.V(shr.o) shared, rtl:yes
+	#            lib.a(lib.so.V) shared, rtl:no,  for executables
+	# "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
+	#            lib.a(lib.so.V) shared, rtl:no
+	# "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
+	#            lib.a           static archive
+	case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+	  for ld_flag in $LDFLAGS; do
+	  if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then
+	    aix_use_runtimelinking=yes
+	    break
+	  fi
+	  done
+	  if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
+	    # With aix-soname=svr4, we create the lib.so.V shared archives only,
+	    # so we don't have lib.a shared libs to link our executables.
+	    # We have to force runtime linking in this case.
+	    aix_use_runtimelinking=yes
+	    LDFLAGS="$LDFLAGS -Wl,-brtl"
+	  fi
+	  ;;
+	esac
+
+	exp_sym_flag='-bexport'
+	no_entry_flag='-bnoentry'
+      fi
+
+      # When large executables or shared objects are built, AIX ld can
+      # have problems creating the table of contents.  If linking a library
+      # or program results in "error TOC overflow" add -mminimal-toc to
+      # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+      # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+      _LT_TAGVAR(archive_cmds, $1)=''
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      _LT_TAGVAR(file_list_spec, $1)='$wl-f,'
+      case $with_aix_soname,$aix_use_runtimelinking in
+      aix,*) ;; # traditional, no import file
+      svr4,* | *,yes) # use import file
+	# The Import File defines what to hardcode.
+	_LT_TAGVAR(hardcode_direct, $1)=no
+	_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+	;;
+      esac
+
+      if test yes = "$GCC"; then
+	case $host_os in aix4.[[012]]|aix4.[[012]].*)
+	# We only want to do this on AIX 4.2 and lower, the check
+	# below for broken collect2 doesn't work under 4.3+
+	  collect2name=`$CC -print-prog-name=collect2`
+	  if test -f "$collect2name" &&
+	   strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+	  then
+	  # We have reworked collect2
+	  :
+	  else
+	  # We have old collect2
+	  _LT_TAGVAR(hardcode_direct, $1)=unsupported
+	  # It fails to find uninstalled libraries when the uninstalled
+	  # path is not listed in the libpath.  Setting hardcode_minus_L
+	  # to unsupported forces relinking
+	  _LT_TAGVAR(hardcode_minus_L, $1)=yes
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+	  _LT_TAGVAR(hardcode_libdir_separator, $1)=
+	  fi
+	  ;;
+	esac
+	shared_flag='-shared'
+	if test yes = "$aix_use_runtimelinking"; then
+	  shared_flag="$shared_flag "'$wl-G'
+	fi
+	# Need to ensure runtime linking is disabled for the traditional
+	# shared library, or the linker may eventually find shared libraries
+	# /with/ Import File - we do not want to mix them.
+	shared_flag_aix='-shared'
+	shared_flag_svr4='-shared $wl-G'
+      else
+	# not using gcc
+	if test ia64 = "$host_cpu"; then
+	# VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+	# chokes on -Wl,-G. The following line is correct:
+	  shared_flag='-G'
+	else
+	  if test yes = "$aix_use_runtimelinking"; then
+	    shared_flag='$wl-G'
+	  else
+	    shared_flag='$wl-bM:SRE'
+	  fi
+	  shared_flag_aix='$wl-bM:SRE'
+	  shared_flag_svr4='$wl-G'
+	fi
+      fi
+
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall'
+      # It seems that -bexpall does not export symbols beginning with
+      # underscore (_), so it is better to generate a list of symbols to export.
+      _LT_TAGVAR(always_export_symbols, $1)=yes
+      if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
+	# Warning - without using the other runtime loading flags (-brtl),
+	# -berok will link without error, but may produce a broken library.
+	_LT_TAGVAR(allow_undefined_flag, $1)='-berok'
+        # Determine the default libpath from the value encoded in an
+        # empty executable.
+        _LT_SYS_MODULE_PATH_AIX([$1])
+        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
+        _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
+      else
+	if test ia64 = "$host_cpu"; then
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib'
+	  _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+	  _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
+	else
+	 # Determine the default libpath from the value encoded in an
+	 # empty executable.
+	 _LT_SYS_MODULE_PATH_AIX([$1])
+	 _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
+	  # Warning - without using the other run time loading flags,
+	  # -berok will link without error, but may produce a broken library.
+	  _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok'
+	  _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok'
+	  if test yes = "$with_gnu_ld"; then
+	    # We only use this code for GNU lds that support --whole-archive.
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive'
+	  else
+	    # Exported symbols can be pulled into shared objects from archives
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+	  fi
+	  _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
+	  # -brtl affects multiple linker settings, -berok does not and is overridden later
+	  compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`'
+	  if test svr4 != "$with_aix_soname"; then
+	    # This is similar to how AIX traditionally builds its shared libraries.
+	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
+	  fi
+	  if test aix != "$with_aix_soname"; then
+	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
+	  else
+	    # used by -dlpreopen to get the symbols
+	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
+	  fi
+	  _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d'
+	fi
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+            _LT_TAGVAR(archive_expsym_cmds, $1)=''
+        ;;
+      m68k)
+            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+            _LT_TAGVAR(hardcode_minus_L, $1)=yes
+        ;;
+      esac
+      ;;
+
+    bsdi[[45]]*)
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # When not using gcc, we currently assume that we are using
+      # Microsoft Visual C++.
+      # hardcode_libdir_flag_spec is actually meaningless, as there is
+      # no search path for DLLs.
+      case $cc_basename in
+      cl*)
+	# Native MSVC
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	_LT_TAGVAR(always_export_symbols, $1)=yes
+	_LT_TAGVAR(file_list_spec, $1)='@'
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=.dll
+	# FIXME: Setting linknames here is a bad hack.
+	_LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
+	_LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
+            cp "$export_symbols" "$output_objdir/$soname.def";
+            echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
+          else
+            $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
+          fi~
+          $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+          linknames='
+	# The linker will not automatically build a static lib if we build a DLL.
+	# _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+	_LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+	_LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols'
+	# Don't use ranlib
+	_LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+	_LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+          lt_tool_outputfile="@TOOL_OUTPUT@"~
+          case $lt_outputfile in
+            *.exe|*.EXE) ;;
+            *)
+              lt_outputfile=$lt_outputfile.exe
+              lt_tool_outputfile=$lt_tool_outputfile.exe
+              ;;
+          esac~
+          if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
+            $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+            $RM "$lt_outputfile.manifest";
+          fi'
+	;;
+      *)
+	# Assume MSVC wrapper
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=.dll
+	# FIXME: Setting linknames here is a bad hack.
+	_LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+	# The linker will automatically build a .lib file if we build a DLL.
+	_LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+	# FIXME: Should let the user specify the lib program.
+	_LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs'
+	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+	;;
+      esac
+      ;;
+
+    darwin* | rhapsody*)
+      _LT_DARWIN_LINKER_FEATURES($1)
+      ;;
+
+    dgux*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+    # support.  Future versions do this automatically, but an explicit c++rt0.o
+    # does not break anything, and helps significantly (at the cost of a little
+    # extra space).
+    freebsd2.2*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+    freebsd2.*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+    freebsd* | dragonfly*)
+      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    hpux9*)
+      if test yes = "$GCC"; then
+	_LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+      fi
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+
+      # hardcode_minus_L: Not really in the search PATH,
+      # but as the default location of the library.
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+      ;;
+
+    hpux10*)
+      if test yes,no = "$GCC,$with_gnu_ld"; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      if test no = "$with_gnu_ld"; then
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+	_LT_TAGVAR(hardcode_libdir_separator, $1)=:
+	_LT_TAGVAR(hardcode_direct, $1)=yes
+	_LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+	# hardcode_minus_L: Not really in the search PATH,
+	# but as the default location of the library.
+	_LT_TAGVAR(hardcode_minus_L, $1)=yes
+      fi
+      ;;
+
+    hpux11*)
+      if test yes,no = "$GCC,$with_gnu_ld"; then
+	case $host_cpu in
+	hppa*64*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	esac
+      else
+	case $host_cpu in
+	hppa*64*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	m4_if($1, [], [
+	  # Older versions of the 11.00 compiler do not understand -b yet
+	  # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+	  _LT_LINKER_OPTION([if $CC understands -b],
+	    _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b],
+	    [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'],
+	    [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])],
+	  [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'])
+	  ;;
+	esac
+      fi
+      if test no = "$with_gnu_ld"; then
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+	_LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	case $host_cpu in
+	hppa*64*|ia64*)
+	  _LT_TAGVAR(hardcode_direct, $1)=no
+	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	  ;;
+	*)
+	  _LT_TAGVAR(hardcode_direct, $1)=yes
+	  _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+
+	  # hardcode_minus_L: Not really in the search PATH,
+	  # but as the default location of the library.
+	  _LT_TAGVAR(hardcode_minus_L, $1)=yes
+	  ;;
+	esac
+      fi
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      if test yes = "$GCC"; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+	# Try to use the -exported_symbol ld option, if it does not
+	# work, assume that -exports_file does not work either and
+	# implicitly export all symbols.
+	# This should be the same for all languages, so no per-tag cache variable.
+	AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol],
+	  [lt_cv_irix_exported_symbol],
+	  [save_LDFLAGS=$LDFLAGS
+	   LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null"
+	   AC_LINK_IFELSE(
+	     [AC_LANG_SOURCE(
+	        [AC_LANG_CASE([C], [[int foo (void) { return 0; }]],
+			      [C++], [[int foo (void) { return 0; }]],
+			      [Fortran 77], [[
+      subroutine foo
+      end]],
+			      [Fortran], [[
+      subroutine foo
+      end]])])],
+	      [lt_cv_irix_exported_symbol=yes],
+	      [lt_cv_irix_exported_symbol=no])
+           LDFLAGS=$save_LDFLAGS])
+	if test yes = "$lt_cv_irix_exported_symbol"; then
+          _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib'
+	fi
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib'
+      fi
+      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      _LT_TAGVAR(inherit_rpath, $1)=yes
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      ;;
+
+    linux*)
+      case $cc_basename in
+      tcc*)
+	# Fabrice Bellard et al's Tiny C Compiler
+	_LT_TAGVAR(ld_shlibs, $1)=yes
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	;;
+      esac
+      ;;
+
+    netbsd*)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags'      # ELF
+      fi
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    newsos6)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    *nto* | *qnx*)
+      ;;
+
+    openbsd* | bitrig*)
+      if test -f /usr/libexec/ld.so; then
+	_LT_TAGVAR(hardcode_direct, $1)=yes
+	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	_LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols'
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+	else
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+	fi
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    os2*)
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+      shrext_cmds=.dll
+      _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	$ECHO EXPORTS >> $output_objdir/$libname.def~
+	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	emximp -o $lib $output_objdir/$libname.def'
+      _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	$ECHO EXPORTS >> $output_objdir/$libname.def~
+	prefix_cmds="$SED"~
+	if test EXPORTS = "`$SED 1q $export_symbols`"; then
+	  prefix_cmds="$prefix_cmds -e 1d";
+	fi~
+	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	emximp -o $lib $output_objdir/$libname.def'
+      _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+      ;;
+
+    osf3*)
+      if test yes = "$GCC"; then
+	_LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+      else
+	_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+      fi
+      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      ;;
+
+    osf4* | osf5*)	# as osf3* with the addition of -msym flag
+      if test yes = "$GCC"; then
+	_LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+      else
+	_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+          $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp'
+
+	# Both c and cxx compiler support -rpath directly
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+      fi
+      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      ;;
+
+    solaris*)
+      _LT_TAGVAR(no_undefined_flag, $1)=' -z defs'
+      if test yes = "$GCC"; then
+	wlarc='$wl'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+          $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+      else
+	case `$CC -V 2>&1` in
+	*"Compilers 5.0"*)
+	  wlarc=''
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+            $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+	  ;;
+	*)
+	  wlarc='$wl'
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+            $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+	  ;;
+	esac
+      fi
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      case $host_os in
+      solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+      *)
+	# The compiler driver will combine and reorder linker options,
+	# but understands '-z linker_flag'.  GCC discards it without '$wl',
+	# but is careful enough not to reorder.
+	# Supported since Solaris 2.6 (maybe 2.5.1?)
+	if test yes = "$GCC"; then
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
+	else
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+	fi
+	;;
+      esac
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      ;;
+
+    sunos4*)
+      if test sequent = "$host_vendor"; then
+	# Use $CC to link under sequent, because it throws in some extra .o
+	# files that make .init and .fini sections work.
+	_LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    sysv4)
+      case $host_vendor in
+	sni)
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true???
+	;;
+	siemens)
+	  ## LD is ld it makes a PLAMLIB
+	  ## CC just makes a GrossModule.
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+	  _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs'
+	  _LT_TAGVAR(hardcode_direct, $1)=no
+        ;;
+	motorola)
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie
+	;;
+      esac
+      runpath_var='LD_RUN_PATH'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    sysv4.3*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	_LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	runpath_var=LD_RUN_PATH
+	hardcode_runpath_var=yes
+	_LT_TAGVAR(ld_shlibs, $1)=yes
+      fi
+      ;;
+
+    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+      _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
+      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      runpath_var='LD_RUN_PATH'
+
+      if test yes = "$GCC"; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6*)
+      # Note: We CANNOT use -z defs as we might desire, because we do not
+      # link with -lc, and that would cause any symbols used from libc to
+      # always be unresolved, which means just about no library would
+      # ever link correctly.  If we're not using GNU ld we use -z text
+      # though, which does catch some bad symbols but isn't as heavy-handed
+      # as -z defs.
+      _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
+      _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs'
+      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport'
+      runpath_var='LD_RUN_PATH'
+
+      if test yes = "$GCC"; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    uts4*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    *)
+      _LT_TAGVAR(ld_shlibs, $1)=no
+      ;;
+    esac
+
+    if test sni = "$host_vendor"; then
+      case $host in
+      sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Blargedynsym'
+	;;
+      esac
+    fi
+  fi
+])
+AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no
+
+_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld
+
+_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl
+_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl
+_LT_DECL([], [extract_expsyms_cmds], [2],
+    [The commands to extract the exported symbol list from a shared archive])
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in
+x|xyes)
+  # Assume -lc should be added
+  _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+
+  if test yes,yes = "$GCC,$enable_shared"; then
+    case $_LT_TAGVAR(archive_cmds, $1) in
+    *'~'*)
+      # FIXME: we may have to deal with multi-command sequences.
+      ;;
+    '$CC '*)
+      # Test whether the compiler implicitly links with -lc since on some
+      # systems, -lgcc has to come before -lc. If gcc already passes -lc
+      # to ld, don't add -lc before -lgcc.
+      AC_CACHE_CHECK([whether -lc should be explicitly linked in],
+	[lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1),
+	[$RM conftest*
+	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+	if AC_TRY_EVAL(ac_compile) 2>conftest.err; then
+	  soname=conftest
+	  lib=conftest
+	  libobjs=conftest.$ac_objext
+	  deplibs=
+	  wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1)
+	  pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1)
+	  compiler_flags=-v
+	  linker_flags=-v
+	  verstring=
+	  output_objdir=.
+	  libname=conftest
+	  lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1)
+	  _LT_TAGVAR(allow_undefined_flag, $1)=
+	  if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1)
+	  then
+	    lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+	  else
+	    lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+	  fi
+	  _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag
+	else
+	  cat conftest.err 1>&5
+	fi
+	$RM conftest*
+	])
+      _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)
+      ;;
+    esac
+  fi
+  ;;
+esac
+
+_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0],
+    [Whether or not to add -lc for building shared libraries])
+_LT_TAGDECL([allow_libtool_libs_with_static_runtimes],
+    [enable_shared_with_static_runtimes], [0],
+    [Whether or not to disallow shared libs when runtime libs are static])
+_LT_TAGDECL([], [export_dynamic_flag_spec], [1],
+    [Compiler flag to allow reflexive dlopens])
+_LT_TAGDECL([], [whole_archive_flag_spec], [1],
+    [Compiler flag to generate shared objects directly from archives])
+_LT_TAGDECL([], [compiler_needs_object], [1],
+    [Whether the compiler copes with passing no objects directly])
+_LT_TAGDECL([], [old_archive_from_new_cmds], [2],
+    [Create an old-style archive from a shared archive])
+_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2],
+    [Create a temporary old-style archive to link instead of a shared archive])
+_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive])
+_LT_TAGDECL([], [archive_expsym_cmds], [2])
+_LT_TAGDECL([], [module_cmds], [2],
+    [Commands used to build a loadable module if different from building
+    a shared archive.])
+_LT_TAGDECL([], [module_expsym_cmds], [2])
+_LT_TAGDECL([], [with_gnu_ld], [1],
+    [Whether we are building with GNU ld or not])
+_LT_TAGDECL([], [allow_undefined_flag], [1],
+    [Flag that allows shared libraries with undefined symbols to be built])
+_LT_TAGDECL([], [no_undefined_flag], [1],
+    [Flag that enforces no undefined symbols])
+_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1],
+    [Flag to hardcode $libdir into a binary during linking.
+    This must work even if $libdir does not exist])
+_LT_TAGDECL([], [hardcode_libdir_separator], [1],
+    [Whether we need a single "-rpath" flag with a separated argument])
+_LT_TAGDECL([], [hardcode_direct], [0],
+    [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
+    DIR into the resulting binary])
+_LT_TAGDECL([], [hardcode_direct_absolute], [0],
+    [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
+    DIR into the resulting binary and the resulting library dependency is
+    "absolute", i.e impossible to change by setting $shlibpath_var if the
+    library is relocated])
+_LT_TAGDECL([], [hardcode_minus_L], [0],
+    [Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+    into the resulting binary])
+_LT_TAGDECL([], [hardcode_shlibpath_var], [0],
+    [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+    into the resulting binary])
+_LT_TAGDECL([], [hardcode_automatic], [0],
+    [Set to "yes" if building a shared library automatically hardcodes DIR
+    into the library and all subsequent libraries and executables linked
+    against it])
+_LT_TAGDECL([], [inherit_rpath], [0],
+    [Set to yes if linker adds runtime paths of dependent libraries
+    to runtime path list])
+_LT_TAGDECL([], [link_all_deplibs], [0],
+    [Whether libtool must link a program against all its dependency libraries])
+_LT_TAGDECL([], [always_export_symbols], [0],
+    [Set to "yes" if exported symbols are required])
+_LT_TAGDECL([], [export_symbols_cmds], [2],
+    [The commands to list exported symbols])
+_LT_TAGDECL([], [exclude_expsyms], [1],
+    [Symbols that should not be listed in the preloaded symbols])
+_LT_TAGDECL([], [include_expsyms], [1],
+    [Symbols that must always be exported])
+_LT_TAGDECL([], [prelink_cmds], [2],
+    [Commands necessary for linking programs (against libraries) with templates])
+_LT_TAGDECL([], [postlink_cmds], [2],
+    [Commands necessary for finishing linking programs])
+_LT_TAGDECL([], [file_list_spec], [1],
+    [Specify filename containing input files])
+dnl FIXME: Not yet implemented
+dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1],
+dnl    [Compiler flag to generate thread safe objects])
+])# _LT_LINKER_SHLIBS
+
+
+# _LT_LANG_C_CONFIG([TAG])
+# ------------------------
+# Ensure that the configuration variables for a C compiler are suitably
+# defined.  These variables are subsequently used by _LT_CONFIG to write
+# the compiler configuration to 'libtool'.
+m4_defun([_LT_LANG_C_CONFIG],
+[m4_require([_LT_DECL_EGREP])dnl
+lt_save_CC=$CC
+AC_LANG_PUSH(C)
+
+# Source file extension for C test sources.
+ac_ext=c
+
+# Object file extension for compiled C test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="int some_variable = 0;"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='int main(){return(0);}'
+
+_LT_TAG_COMPILER
+# Save the default compiler, since it gets overwritten when the other
+# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
+compiler_DEFAULT=$CC
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+if test -n "$compiler"; then
+  _LT_COMPILER_NO_RTTI($1)
+  _LT_COMPILER_PIC($1)
+  _LT_COMPILER_C_O($1)
+  _LT_COMPILER_FILE_LOCKS($1)
+  _LT_LINKER_SHLIBS($1)
+  _LT_SYS_DYNAMIC_LINKER($1)
+  _LT_LINKER_HARDCODE_LIBPATH($1)
+  LT_SYS_DLOPEN_SELF
+  _LT_CMD_STRIPLIB
+
+  # Report what library types will actually be built
+  AC_MSG_CHECKING([if libtool supports shared libraries])
+  AC_MSG_RESULT([$can_build_shared])
+
+  AC_MSG_CHECKING([whether to build shared libraries])
+  test no = "$can_build_shared" && enable_shared=no
+
+  # On AIX, shared libraries and static libraries use the same namespace, and
+  # are all built from PIC.
+  case $host_os in
+  aix3*)
+    test yes = "$enable_shared" && enable_static=no
+    if test -n "$RANLIB"; then
+      archive_cmds="$archive_cmds~\$RANLIB \$lib"
+      postinstall_cmds='$RANLIB $lib'
+    fi
+    ;;
+
+  aix[[4-9]]*)
+    if test ia64 != "$host_cpu"; then
+      case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
+      yes,aix,yes) ;;			# shared object as lib.so file only
+      yes,svr4,*) ;;			# shared object as lib.so archive member only
+      yes,*) enable_static=no ;;	# shared object in lib.a archive as well
+      esac
+    fi
+    ;;
+  esac
+  AC_MSG_RESULT([$enable_shared])
+
+  AC_MSG_CHECKING([whether to build static libraries])
+  # Make sure either enable_shared or enable_static is yes.
+  test yes = "$enable_shared" || enable_static=yes
+  AC_MSG_RESULT([$enable_static])
+
+  _LT_CONFIG($1)
+fi
+AC_LANG_POP
+CC=$lt_save_CC
+])# _LT_LANG_C_CONFIG
+
+
+# _LT_LANG_CXX_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for a C++ compiler are suitably
+# defined.  These variables are subsequently used by _LT_CONFIG to write
+# the compiler configuration to 'libtool'.
+m4_defun([_LT_LANG_CXX_CONFIG],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_PATH_MANIFEST_TOOL])dnl
+if test -n "$CXX" && ( test no != "$CXX" &&
+    ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) ||
+    (test g++ != "$CXX"))); then
+  AC_PROG_CXXCPP
+else
+  _lt_caught_CXX_error=yes
+fi
+
+AC_LANG_PUSH(C++)
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(compiler_needs_object, $1)=no
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for C++ test sources.
+ac_ext=cpp
+
+# Object file extension for compiled C++ test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the CXX compiler isn't working.  Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test yes != "$_lt_caught_CXX_error"; then
+  # Code to be used in simple compile tests
+  lt_simple_compile_test_code="int some_variable = 0;"
+
+  # Code to be used in simple link tests
+  lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }'
+
+  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+  _LT_TAG_COMPILER
+
+  # save warnings/boilerplate of simple test code
+  _LT_COMPILER_BOILERPLATE
+  _LT_LINKER_BOILERPLATE
+
+  # Allow CC to be a program name with arguments.
+  lt_save_CC=$CC
+  lt_save_CFLAGS=$CFLAGS
+  lt_save_LD=$LD
+  lt_save_GCC=$GCC
+  GCC=$GXX
+  lt_save_with_gnu_ld=$with_gnu_ld
+  lt_save_path_LD=$lt_cv_path_LD
+  if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
+    lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
+  else
+    $as_unset lt_cv_prog_gnu_ld
+  fi
+  if test -n "${lt_cv_path_LDCXX+set}"; then
+    lt_cv_path_LD=$lt_cv_path_LDCXX
+  else
+    $as_unset lt_cv_path_LD
+  fi
+  test -z "${LDCXX+set}" || LD=$LDCXX
+  CC=${CXX-"c++"}
+  CFLAGS=$CXXFLAGS
+  compiler=$CC
+  _LT_TAGVAR(compiler, $1)=$CC
+  _LT_CC_BASENAME([$compiler])
+
+  if test -n "$compiler"; then
+    # We don't want -fno-exception when compiling C++ code, so set the
+    # no_builtin_flag separately
+    if test yes = "$GXX"; then
+      _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin'
+    else
+      _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+    fi
+
+    if test yes = "$GXX"; then
+      # Set up default GNU C++ configuration
+
+      LT_PATH_LD
+
+      # Check if GNU C++ uses GNU ld as the underlying linker, since the
+      # archiving commands below assume that GNU ld is being used.
+      if test yes = "$with_gnu_ld"; then
+        _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+        _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+
+        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+        _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+
+        # If archive_cmds runs LD, not CC, wlarc should be empty
+        # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
+        #     investigate it a little bit more. (MM)
+        wlarc='$wl'
+
+        # ancient GNU ld didn't support --whole-archive et. al.
+        if eval "`$CC -print-prog-name=ld` --help 2>&1" |
+	  $GREP 'no-whole-archive' > /dev/null; then
+          _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
+        else
+          _LT_TAGVAR(whole_archive_flag_spec, $1)=
+        fi
+      else
+        with_gnu_ld=no
+        wlarc=
+
+        # A generic and very simple default shared library creation
+        # command for GNU C++ for the case where it uses the native
+        # linker, instead of GNU ld.  If possible, this setting should
+        # overridden to take advantage of the native linker features on
+        # the platform it is being used on.
+        _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+      fi
+
+      # Commands to make compiler produce verbose output that lists
+      # what "hidden" libraries, object files and flags are used when
+      # linking a shared library.
+      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+    else
+      GXX=no
+      with_gnu_ld=no
+      wlarc=
+    fi
+
+    # PORTME: fill in a description of your system's C++ link characteristics
+    AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+    _LT_TAGVAR(ld_shlibs, $1)=yes
+    case $host_os in
+      aix3*)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+      aix[[4-9]]*)
+        if test ia64 = "$host_cpu"; then
+          # On IA64, the linker does run time linking by default, so we don't
+          # have to do anything special.
+          aix_use_runtimelinking=no
+          exp_sym_flag='-Bexport'
+          no_entry_flag=
+        else
+          aix_use_runtimelinking=no
+
+          # Test if we are trying to use run time linking or normal
+          # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+          # have runtime linking enabled, and use it for executables.
+          # For shared libraries, we enable/disable runtime linking
+          # depending on the kind of the shared library created -
+          # when "with_aix_soname,aix_use_runtimelinking" is:
+          # "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
+          # "aix,yes"  lib.so          shared, rtl:yes, for executables
+          #            lib.a           static archive
+          # "both,no"  lib.so.V(shr.o) shared, rtl:yes
+          #            lib.a(lib.so.V) shared, rtl:no,  for executables
+          # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
+          #            lib.a(lib.so.V) shared, rtl:no
+          # "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
+          #            lib.a           static archive
+          case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+	    for ld_flag in $LDFLAGS; do
+	      case $ld_flag in
+	      *-brtl*)
+	        aix_use_runtimelinking=yes
+	        break
+	        ;;
+	      esac
+	    done
+	    if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
+	      # With aix-soname=svr4, we create the lib.so.V shared archives only,
+	      # so we don't have lib.a shared libs to link our executables.
+	      # We have to force runtime linking in this case.
+	      aix_use_runtimelinking=yes
+	      LDFLAGS="$LDFLAGS -Wl,-brtl"
+	    fi
+	    ;;
+          esac
+
+          exp_sym_flag='-bexport'
+          no_entry_flag='-bnoentry'
+        fi
+
+        # When large executables or shared objects are built, AIX ld can
+        # have problems creating the table of contents.  If linking a library
+        # or program results in "error TOC overflow" add -mminimal-toc to
+        # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+        # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+        _LT_TAGVAR(archive_cmds, $1)=''
+        _LT_TAGVAR(hardcode_direct, $1)=yes
+        _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+        _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+        _LT_TAGVAR(link_all_deplibs, $1)=yes
+        _LT_TAGVAR(file_list_spec, $1)='$wl-f,'
+        case $with_aix_soname,$aix_use_runtimelinking in
+        aix,*) ;;	# no import file
+        svr4,* | *,yes) # use import file
+          # The Import File defines what to hardcode.
+          _LT_TAGVAR(hardcode_direct, $1)=no
+          _LT_TAGVAR(hardcode_direct_absolute, $1)=no
+          ;;
+        esac
+
+        if test yes = "$GXX"; then
+          case $host_os in aix4.[[012]]|aix4.[[012]].*)
+          # We only want to do this on AIX 4.2 and lower, the check
+          # below for broken collect2 doesn't work under 4.3+
+	  collect2name=`$CC -print-prog-name=collect2`
+	  if test -f "$collect2name" &&
+	     strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+	  then
+	    # We have reworked collect2
+	    :
+	  else
+	    # We have old collect2
+	    _LT_TAGVAR(hardcode_direct, $1)=unsupported
+	    # It fails to find uninstalled libraries when the uninstalled
+	    # path is not listed in the libpath.  Setting hardcode_minus_L
+	    # to unsupported forces relinking
+	    _LT_TAGVAR(hardcode_minus_L, $1)=yes
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+	    _LT_TAGVAR(hardcode_libdir_separator, $1)=
+	  fi
+          esac
+          shared_flag='-shared'
+	  if test yes = "$aix_use_runtimelinking"; then
+	    shared_flag=$shared_flag' $wl-G'
+	  fi
+	  # Need to ensure runtime linking is disabled for the traditional
+	  # shared library, or the linker may eventually find shared libraries
+	  # /with/ Import File - we do not want to mix them.
+	  shared_flag_aix='-shared'
+	  shared_flag_svr4='-shared $wl-G'
+        else
+          # not using gcc
+          if test ia64 = "$host_cpu"; then
+	  # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+	  # chokes on -Wl,-G. The following line is correct:
+	  shared_flag='-G'
+          else
+	    if test yes = "$aix_use_runtimelinking"; then
+	      shared_flag='$wl-G'
+	    else
+	      shared_flag='$wl-bM:SRE'
+	    fi
+	    shared_flag_aix='$wl-bM:SRE'
+	    shared_flag_svr4='$wl-G'
+          fi
+        fi
+
+        _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall'
+        # It seems that -bexpall does not export symbols beginning with
+        # underscore (_), so it is better to generate a list of symbols to
+	# export.
+        _LT_TAGVAR(always_export_symbols, $1)=yes
+	if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
+          # Warning - without using the other runtime loading flags (-brtl),
+          # -berok will link without error, but may produce a broken library.
+          # The "-G" linker flag allows undefined symbols.
+          _LT_TAGVAR(no_undefined_flag, $1)='-bernotok'
+          # Determine the default libpath from the value encoded in an empty
+          # executable.
+          _LT_SYS_MODULE_PATH_AIX([$1])
+          _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
+
+          _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
+        else
+          if test ia64 = "$host_cpu"; then
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib'
+	    _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+	    _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
+          else
+	    # Determine the default libpath from the value encoded in an
+	    # empty executable.
+	    _LT_SYS_MODULE_PATH_AIX([$1])
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
+	    # Warning - without using the other run time loading flags,
+	    # -berok will link without error, but may produce a broken library.
+	    _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok'
+	    _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok'
+	    if test yes = "$with_gnu_ld"; then
+	      # We only use this code for GNU lds that support --whole-archive.
+	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive'
+	    else
+	      # Exported symbols can be pulled into shared objects from archives
+	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+	    fi
+	    _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
+	    # -brtl affects multiple linker settings, -berok does not and is overridden later
+	    compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`'
+	    if test svr4 != "$with_aix_soname"; then
+	      # This is similar to how AIX traditionally builds its shared
+	      # libraries. Need -bnortl late, we may have -brtl in LDFLAGS.
+	      _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
+	    fi
+	    if test aix != "$with_aix_soname"; then
+	      _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
+	    else
+	      # used by -dlpreopen to get the symbols
+	      _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
+	    fi
+	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d'
+          fi
+        fi
+        ;;
+
+      beos*)
+	if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	  # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+	  # support --undefined.  This deserves some investigation.  FIXME
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	else
+	  _LT_TAGVAR(ld_shlibs, $1)=no
+	fi
+	;;
+
+      chorus*)
+        case $cc_basename in
+          *)
+	  # FIXME: insert proper C++ library support
+	  _LT_TAGVAR(ld_shlibs, $1)=no
+	  ;;
+        esac
+        ;;
+
+      cygwin* | mingw* | pw32* | cegcc*)
+	case $GXX,$cc_basename in
+	,cl* | no,cl*)
+	  # Native MSVC
+	  # hardcode_libdir_flag_spec is actually meaningless, as there is
+	  # no search path for DLLs.
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	  _LT_TAGVAR(always_export_symbols, $1)=yes
+	  _LT_TAGVAR(file_list_spec, $1)='@'
+	  # Tell ltmain to make .lib files, not .a files.
+	  libext=lib
+	  # Tell ltmain to make .dll files, not .so files.
+	  shrext_cmds=.dll
+	  # FIXME: Setting linknames here is a bad hack.
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
+              cp "$export_symbols" "$output_objdir/$soname.def";
+              echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
+            else
+              $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
+            fi~
+            $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+            linknames='
+	  # The linker will not automatically build a static lib if we build a DLL.
+	  # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+	  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+	  # Don't use ranlib
+	  _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+	  _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+            lt_tool_outputfile="@TOOL_OUTPUT@"~
+            case $lt_outputfile in
+              *.exe|*.EXE) ;;
+              *)
+                lt_outputfile=$lt_outputfile.exe
+                lt_tool_outputfile=$lt_tool_outputfile.exe
+                ;;
+            esac~
+            func_to_tool_file "$lt_outputfile"~
+            if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
+              $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+              $RM "$lt_outputfile.manifest";
+            fi'
+	  ;;
+	*)
+	  # g++
+	  # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+	  # as there is no search path for DLLs.
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols'
+	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	  _LT_TAGVAR(always_export_symbols, $1)=no
+	  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+
+	  if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	    # If the export-symbols file already is a .def file, use it as
+	    # is; otherwise, prepend EXPORTS...
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
+              cp $export_symbols $output_objdir/$soname.def;
+            else
+              echo EXPORTS > $output_objdir/$soname.def;
+              cat $export_symbols >> $output_objdir/$soname.def;
+            fi~
+            $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	  else
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	  fi
+	  ;;
+	esac
+	;;
+      darwin* | rhapsody*)
+        _LT_DARWIN_LINKER_FEATURES($1)
+	;;
+
+      os2*)
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+	_LT_TAGVAR(hardcode_minus_L, $1)=yes
+	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	shrext_cmds=.dll
+	_LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	  $ECHO EXPORTS >> $output_objdir/$libname.def~
+	  emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	  emximp -o $lib $output_objdir/$libname.def'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	  $ECHO EXPORTS >> $output_objdir/$libname.def~
+	  prefix_cmds="$SED"~
+	  if test EXPORTS = "`$SED 1q $export_symbols`"; then
+	    prefix_cmds="$prefix_cmds -e 1d";
+	  fi~
+	  prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+	  cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	  emximp -o $lib $output_objdir/$libname.def'
+	_LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+	;;
+
+      dgux*)
+        case $cc_basename in
+          ec++*)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          ghcx*)
+	    # Green Hills C++ Compiler
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+        esac
+        ;;
+
+      freebsd2.*)
+        # C++ shared libraries reported to be fairly broken before
+	# switch to ELF
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+
+      freebsd-elf*)
+        _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+        ;;
+
+      freebsd* | dragonfly*)
+        # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
+        # conventions
+        _LT_TAGVAR(ld_shlibs, $1)=yes
+        ;;
+
+      haiku*)
+        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+        _LT_TAGVAR(link_all_deplibs, $1)=yes
+        ;;
+
+      hpux9*)
+        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+        _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+        _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+        _LT_TAGVAR(hardcode_direct, $1)=yes
+        _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+				             # but as the default
+				             # location of the library.
+
+        case $cc_basename in
+          CC*)
+            # FIXME: insert proper C++ library support
+            _LT_TAGVAR(ld_shlibs, $1)=no
+            ;;
+          aCC*)
+            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+            # Commands to make compiler produce verbose output that lists
+            # what "hidden" libraries, object files and flags are used when
+            # linking a shared library.
+            #
+            # There doesn't appear to be a way to prevent this compiler from
+            # explicitly linking system object files so we need to strip them
+            # from the output so that they don't get included in the library
+            # dependencies.
+            output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+            ;;
+          *)
+            if test yes = "$GXX"; then
+              _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+            else
+              # FIXME: insert proper C++ library support
+              _LT_TAGVAR(ld_shlibs, $1)=no
+            fi
+            ;;
+        esac
+        ;;
+
+      hpux10*|hpux11*)
+        if test no = "$with_gnu_ld"; then
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+	  _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+          case $host_cpu in
+            hppa*64*|ia64*)
+              ;;
+            *)
+	      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+              ;;
+          esac
+        fi
+        case $host_cpu in
+          hppa*64*|ia64*)
+            _LT_TAGVAR(hardcode_direct, $1)=no
+            _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+            ;;
+          *)
+            _LT_TAGVAR(hardcode_direct, $1)=yes
+            _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+            _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+					         # but as the default
+					         # location of the library.
+            ;;
+        esac
+
+        case $cc_basename in
+          CC*)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          aCC*)
+	    case $host_cpu in
+	      hppa*64*)
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	      ia64*)
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	      *)
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	    esac
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+	    ;;
+          *)
+	    if test yes = "$GXX"; then
+	      if test no = "$with_gnu_ld"; then
+	        case $host_cpu in
+	          hppa*64*)
+	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	          ia64*)
+	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	          *)
+	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	        esac
+	      fi
+	    else
+	      # FIXME: insert proper C++ library support
+	      _LT_TAGVAR(ld_shlibs, $1)=no
+	    fi
+	    ;;
+        esac
+        ;;
+
+      interix[[3-9]]*)
+	_LT_TAGVAR(hardcode_direct, $1)=no
+	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+	# Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+	# Instead, shared libraries are loaded at an image base (0x10000000 by
+	# default) and relocated if they conflict, which is a slow very memory
+	# consuming and fragmenting process.  To avoid this, we pick a random,
+	# 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+	# time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+	;;
+      irix5* | irix6*)
+        case $cc_basename in
+          CC*)
+	    # SGI C++
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -ar", where "CC" is the IRIX C++ compiler.  This is
+	    # necessary to make sure instantiated templates are included
+	    # in the archive.
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs'
+	    ;;
+          *)
+	    if test yes = "$GXX"; then
+	      if test no = "$with_gnu_ld"; then
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+	      else
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib'
+	      fi
+	    fi
+	    _LT_TAGVAR(link_all_deplibs, $1)=yes
+	    ;;
+        esac
+        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+        _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+        _LT_TAGVAR(inherit_rpath, $1)=yes
+        ;;
+
+      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+        case $cc_basename in
+          KCC*)
+	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+	    # KCC will only create a shared library if the output file
+	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+	    # to its proper name (with version) after linking.
+	    _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib'
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -Bstatic", where "CC" is the KAI C++ compiler.
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs'
+	    ;;
+	  icpc* | ecpc* )
+	    # Intel C++
+	    with_gnu_ld=yes
+	    # version 8.0 and above of icpc choke on multiply defined symbols
+	    # if we add $predep_objects and $postdep_objects, however 7.1 and
+	    # earlier do not add the objects themselves.
+	    case `$CC -V 2>&1` in
+	      *"Version 7."*)
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+		_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+		;;
+	      *)  # Version 8.0 or newer
+	        tmp_idyn=
+	        case $host_cpu in
+		  ia64*) tmp_idyn=' -i_dynamic';;
+		esac
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+		_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+		;;
+	    esac
+	    _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive'
+	    ;;
+          pgCC* | pgcpp*)
+            # Portland Group C++ compiler
+	    case `$CC -V` in
+	    *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*)
+	      _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~
+               rm -rf $tpldir~
+               $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
+               compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
+	      _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~
+                rm -rf $tpldir~
+                $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
+                $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
+                $RANLIB $oldlib'
+	      _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~
+                rm -rf $tpldir~
+                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+	      _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~
+                rm -rf $tpldir~
+                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+	      ;;
+	    *) # Version 6 and above use weak symbols
+	      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+	      _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+	      ;;
+	    esac
+
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl--rpath $wl$libdir'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+            ;;
+	  cxx*)
+	    # Compaq C++
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname  -o $lib $wl-retain-symbols-file $wl$export_symbols'
+
+	    runpath_var=LD_RUN_PATH
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
+	    ;;
+	  xl* | mpixl* | bgxl*)
+	    # IBM XL 8.0 on PPC, with GNU ld
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	    if test yes = "$supports_anon_versioning"; then
+	      _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+                cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+                echo "local: *; };" >> $output_objdir/$libname.ver~
+                $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
+	    fi
+	    ;;
+	  *)
+	    case `$CC -V 2>&1 | sed 5q` in
+	    *Sun\ C*)
+	      # Sun C++ 5.9
+	      _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+	      _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	      _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols'
+	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	      _LT_TAGVAR(compiler_needs_object, $1)=yes
+
+	      # Not sure whether something based on
+	      # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
+	      # would be better.
+	      output_verbose_link_cmd='func_echo_all'
+
+	      # Archives containing C++ object files must be created using
+	      # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+	      # necessary to make sure instantiated templates are included
+	      # in the archive.
+	      _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+	      ;;
+	    esac
+	    ;;
+	esac
+	;;
+
+      lynxos*)
+        # FIXME: insert proper C++ library support
+	_LT_TAGVAR(ld_shlibs, $1)=no
+	;;
+
+      m88k*)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+	;;
+
+      mvs*)
+        case $cc_basename in
+          cxx*)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+	  *)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+	esac
+	;;
+
+      netbsd*)
+        if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable  -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
+	  wlarc=
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+	  _LT_TAGVAR(hardcode_direct, $1)=yes
+	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	fi
+	# Workaround some broken pre-1.5 toolchains
+	output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
+	;;
+
+      *nto* | *qnx*)
+        _LT_TAGVAR(ld_shlibs, $1)=yes
+	;;
+
+      openbsd* | bitrig*)
+	if test -f /usr/libexec/ld.so; then
+	  _LT_TAGVAR(hardcode_direct, $1)=yes
+	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	  _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+	  if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
+	  fi
+	  output_verbose_link_cmd=func_echo_all
+	else
+	  _LT_TAGVAR(ld_shlibs, $1)=no
+	fi
+	;;
+
+      osf3* | osf4* | osf5*)
+        case $cc_basename in
+          KCC*)
+	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+	    # KCC will only create a shared library if the output file
+	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+	    # to its proper name (with version) after linking.
+	    _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	    # Archives containing C++ object files must be created using
+	    # the KAI C++ compiler.
+	    case $host in
+	      osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;;
+	      *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;;
+	    esac
+	    ;;
+          RCC*)
+	    # Rational C++ 2.4.1
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          cxx*)
+	    case $host in
+	      osf3*)
+	        _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+	        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+		;;
+	      *)
+	        _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+	        _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
+                  echo "-hidden">> $lib.exp~
+                  $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp  `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~
+                  $RM $lib.exp'
+	        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+		;;
+	    esac
+
+	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+	    ;;
+	  *)
+	    if test yes,no = "$GXX,$with_gnu_ld"; then
+	      _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
+	      case $host in
+	        osf3*)
+	          _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+		  ;;
+	        *)
+	          _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+		  ;;
+	      esac
+
+	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+	      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	      # Commands to make compiler produce verbose output that lists
+	      # what "hidden" libraries, object files and flags are used when
+	      # linking a shared library.
+	      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+	    else
+	      # FIXME: insert proper C++ library support
+	      _LT_TAGVAR(ld_shlibs, $1)=no
+	    fi
+	    ;;
+        esac
+        ;;
+
+      psos*)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+
+      sunos4*)
+        case $cc_basename in
+          CC*)
+	    # Sun C++ 4.x
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          lcc*)
+	    # Lucid
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+        esac
+        ;;
+
+      solaris*)
+        case $cc_basename in
+          CC* | sunCC*)
+	    # Sun C++ 4.2, 5.x and Centerline C++
+            _LT_TAGVAR(archive_cmds_need_lc,$1)=yes
+	    _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+              $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+	    _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	    case $host_os in
+	      solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+	      *)
+		# The compiler driver will combine and reorder linker options,
+		# but understands '-z linker_flag'.
+	        # Supported since Solaris 2.6 (maybe 2.5.1?)
+		_LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+	        ;;
+	    esac
+	    _LT_TAGVAR(link_all_deplibs, $1)=yes
+
+	    output_verbose_link_cmd='func_echo_all'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+	    # necessary to make sure instantiated templates are included
+	    # in the archive.
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+	    ;;
+          gcx*)
+	    # Green Hills C++ Compiler
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
+
+	    # The C++ compiler must be used to create the archive.
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
+	    ;;
+          *)
+	    # GNU C++ compiler with Solaris linker
+	    if test yes,no = "$GXX,$with_gnu_ld"; then
+	      _LT_TAGVAR(no_undefined_flag, $1)=' $wl-z ${wl}defs'
+	      if $CC --version | $GREP -v '^2\.7' > /dev/null; then
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
+	        _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+                  $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	        # Commands to make compiler produce verbose output that lists
+	        # what "hidden" libraries, object files and flags are used when
+	        # linking a shared library.
+	        output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+	      else
+	        # g++ 2.7 appears to require '-G' NOT '-shared' on this
+	        # platform.
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
+	        _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+                  $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	        # Commands to make compiler produce verbose output that lists
+	        # what "hidden" libraries, object files and flags are used when
+	        # linking a shared library.
+	        output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+	      fi
+
+	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir'
+	      case $host_os in
+		solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+		*)
+		  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
+		  ;;
+	      esac
+	    fi
+	    ;;
+        esac
+        ;;
+
+    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+      _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
+      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      runpath_var='LD_RUN_PATH'
+
+      case $cc_basename in
+        CC*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+      esac
+      ;;
+
+      sysv5* | sco3.2v5* | sco5v6*)
+	# Note: We CANNOT use -z defs as we might desire, because we do not
+	# link with -lc, and that would cause any symbols used from libc to
+	# always be unresolved, which means just about no library would
+	# ever link correctly.  If we're not using GNU ld we use -z text
+	# though, which does catch some bad symbols but isn't as heavy-handed
+	# as -z defs.
+	_LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
+	_LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs'
+	_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir'
+	_LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+	_LT_TAGVAR(link_all_deplibs, $1)=yes
+	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport'
+	runpath_var='LD_RUN_PATH'
+
+	case $cc_basename in
+          CC*)
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~
+              '"$_LT_TAGVAR(old_archive_cmds, $1)"
+	    _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~
+              '"$_LT_TAGVAR(reload_cmds, $1)"
+	    ;;
+	  *)
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    ;;
+	esac
+      ;;
+
+      tandem*)
+        case $cc_basename in
+          NCC*)
+	    # NonStop-UX NCC 3.20
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+        esac
+        ;;
+
+      vxworks*)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+
+      *)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+    esac
+
+    AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+    test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no
+
+    _LT_TAGVAR(GCC, $1)=$GXX
+    _LT_TAGVAR(LD, $1)=$LD
+
+    ## CAVEAT EMPTOR:
+    ## There is no encapsulation within the following macros, do not change
+    ## the running order or otherwise move them around unless you know exactly
+    ## what you are doing...
+    _LT_SYS_HIDDEN_LIBDEPS($1)
+    _LT_COMPILER_PIC($1)
+    _LT_COMPILER_C_O($1)
+    _LT_COMPILER_FILE_LOCKS($1)
+    _LT_LINKER_SHLIBS($1)
+    _LT_SYS_DYNAMIC_LINKER($1)
+    _LT_LINKER_HARDCODE_LIBPATH($1)
+
+    _LT_CONFIG($1)
+  fi # test -n "$compiler"
+
+  CC=$lt_save_CC
+  CFLAGS=$lt_save_CFLAGS
+  LDCXX=$LD
+  LD=$lt_save_LD
+  GCC=$lt_save_GCC
+  with_gnu_ld=$lt_save_with_gnu_ld
+  lt_cv_path_LDCXX=$lt_cv_path_LD
+  lt_cv_path_LD=$lt_save_path_LD
+  lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
+  lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
+fi # test yes != "$_lt_caught_CXX_error"
+
+AC_LANG_POP
+])# _LT_LANG_CXX_CONFIG
+
+
+# _LT_FUNC_STRIPNAME_CNF
+# ----------------------
+# func_stripname_cnf prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+#
+# This function is identical to the (non-XSI) version of func_stripname,
+# except this one can be used by m4 code that may be executed by configure,
+# rather than the libtool script.
+m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl
+AC_REQUIRE([_LT_DECL_SED])
+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])
+func_stripname_cnf ()
+{
+  case @S|@2 in
+  .*) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%\\\\@S|@2\$%%"`;;
+  *)  func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%@S|@2\$%%"`;;
+  esac
+} # func_stripname_cnf
+])# _LT_FUNC_STRIPNAME_CNF
+
+
+# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME])
+# ---------------------------------
+# Figure out "hidden" library dependencies from verbose
+# compiler output when linking a shared library.
+# Parse the compiler output and extract the necessary
+# objects, libraries and library flags.
+m4_defun([_LT_SYS_HIDDEN_LIBDEPS],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl
+# Dependencies to place before and after the object being linked:
+_LT_TAGVAR(predep_objects, $1)=
+_LT_TAGVAR(postdep_objects, $1)=
+_LT_TAGVAR(predeps, $1)=
+_LT_TAGVAR(postdeps, $1)=
+_LT_TAGVAR(compiler_lib_search_path, $1)=
+
+dnl we can't use the lt_simple_compile_test_code here,
+dnl because it contains code intended for an executable,
+dnl not a library.  It's possible we should let each
+dnl tag define a new lt_????_link_test_code variable,
+dnl but it's only used here...
+m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF
+int a;
+void foo (void) { a = 0; }
+_LT_EOF
+], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF
+class Foo
+{
+public:
+  Foo (void) { a = 0; }
+private:
+  int a;
+};
+_LT_EOF
+], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF
+      subroutine foo
+      implicit none
+      integer*4 a
+      a=0
+      return
+      end
+_LT_EOF
+], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF
+      subroutine foo
+      implicit none
+      integer a
+      a=0
+      return
+      end
+_LT_EOF
+], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF
+public class foo {
+  private int a;
+  public void bar (void) {
+    a = 0;
+  }
+};
+_LT_EOF
+], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF
+package foo
+func foo() {
+}
+_LT_EOF
+])
+
+_lt_libdeps_save_CFLAGS=$CFLAGS
+case "$CC $CFLAGS " in #(
+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+esac
+
+dnl Parse the compiler output and extract the necessary
+dnl objects, libraries and library flags.
+if AC_TRY_EVAL(ac_compile); then
+  # Parse the compiler output and extract the necessary
+  # objects, libraries and library flags.
+
+  # Sentinel used to keep track of whether or not we are before
+  # the conftest object file.
+  pre_test_object_deps_done=no
+
+  for p in `eval "$output_verbose_link_cmd"`; do
+    case $prev$p in
+
+    -L* | -R* | -l*)
+       # Some compilers place space between "-{L,R}" and the path.
+       # Remove the space.
+       if test x-L = "$p" ||
+          test x-R = "$p"; then
+	 prev=$p
+	 continue
+       fi
+
+       # Expand the sysroot to ease extracting the directories later.
+       if test -z "$prev"; then
+         case $p in
+         -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+         -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+         -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+         esac
+       fi
+       case $p in
+       =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+       esac
+       if test no = "$pre_test_object_deps_done"; then
+	 case $prev in
+	 -L | -R)
+	   # Internal compiler library paths should come after those
+	   # provided the user.  The postdeps already come after the
+	   # user supplied libs so there is no need to process them.
+	   if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then
+	     _LT_TAGVAR(compiler_lib_search_path, $1)=$prev$p
+	   else
+	     _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} $prev$p"
+	   fi
+	   ;;
+	 # The "-l" case would never come before the object being
+	 # linked, so don't bother handling this case.
+	 esac
+       else
+	 if test -z "$_LT_TAGVAR(postdeps, $1)"; then
+	   _LT_TAGVAR(postdeps, $1)=$prev$p
+	 else
+	   _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} $prev$p"
+	 fi
+       fi
+       prev=
+       ;;
+
+    *.lto.$objext) ;; # Ignore GCC LTO objects
+    *.$objext)
+       # This assumes that the test object file only shows up
+       # once in the compiler output.
+       if test "$p" = "conftest.$objext"; then
+	 pre_test_object_deps_done=yes
+	 continue
+       fi
+
+       if test no = "$pre_test_object_deps_done"; then
+	 if test -z "$_LT_TAGVAR(predep_objects, $1)"; then
+	   _LT_TAGVAR(predep_objects, $1)=$p
+	 else
+	   _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p"
+	 fi
+       else
+	 if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then
+	   _LT_TAGVAR(postdep_objects, $1)=$p
+	 else
+	   _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p"
+	 fi
+       fi
+       ;;
+
+    *) ;; # Ignore the rest.
+
+    esac
+  done
+
+  # Clean up.
+  rm -f a.out a.exe
+else
+  echo "libtool.m4: error: problem compiling $1 test program"
+fi
+
+$RM -f confest.$objext
+CFLAGS=$_lt_libdeps_save_CFLAGS
+
+# PORTME: override above test on systems where it is broken
+m4_if([$1], [CXX],
+[case $host_os in
+interix[[3-9]]*)
+  # Interix 3.5 installs completely hosed .la files for C++, so rather than
+  # hack all around it, let's just trust "g++" to DTRT.
+  _LT_TAGVAR(predep_objects,$1)=
+  _LT_TAGVAR(postdep_objects,$1)=
+  _LT_TAGVAR(postdeps,$1)=
+  ;;
+esac
+])
+
+case " $_LT_TAGVAR(postdeps, $1) " in
+*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;;
+esac
+ _LT_TAGVAR(compiler_lib_search_dirs, $1)=
+if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then
+ _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | $SED -e 's! -L! !g' -e 's!^ !!'`
+fi
+_LT_TAGDECL([], [compiler_lib_search_dirs], [1],
+    [The directories searched by this compiler when creating a shared library])
+_LT_TAGDECL([], [predep_objects], [1],
+    [Dependencies to place before and after the objects being linked to
+    create a shared library])
+_LT_TAGDECL([], [postdep_objects], [1])
+_LT_TAGDECL([], [predeps], [1])
+_LT_TAGDECL([], [postdeps], [1])
+_LT_TAGDECL([], [compiler_lib_search_path], [1],
+    [The library search path used internally by the compiler when linking
+    a shared library])
+])# _LT_SYS_HIDDEN_LIBDEPS
+
+
+# _LT_LANG_F77_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for a Fortran 77 compiler are
+# suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to 'libtool'.
+m4_defun([_LT_LANG_F77_CONFIG],
+[AC_LANG_PUSH(Fortran 77)
+if test -z "$F77" || test no = "$F77"; then
+  _lt_disable_F77=yes
+fi
+
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for f77 test sources.
+ac_ext=f
+
+# Object file extension for compiled f77 test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the F77 compiler isn't working.  Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test yes != "$_lt_disable_F77"; then
+  # Code to be used in simple compile tests
+  lt_simple_compile_test_code="\
+      subroutine t
+      return
+      end
+"
+
+  # Code to be used in simple link tests
+  lt_simple_link_test_code="\
+      program t
+      end
+"
+
+  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+  _LT_TAG_COMPILER
+
+  # save warnings/boilerplate of simple test code
+  _LT_COMPILER_BOILERPLATE
+  _LT_LINKER_BOILERPLATE
+
+  # Allow CC to be a program name with arguments.
+  lt_save_CC=$CC
+  lt_save_GCC=$GCC
+  lt_save_CFLAGS=$CFLAGS
+  CC=${F77-"f77"}
+  CFLAGS=$FFLAGS
+  compiler=$CC
+  _LT_TAGVAR(compiler, $1)=$CC
+  _LT_CC_BASENAME([$compiler])
+  GCC=$G77
+  if test -n "$compiler"; then
+    AC_MSG_CHECKING([if libtool supports shared libraries])
+    AC_MSG_RESULT([$can_build_shared])
+
+    AC_MSG_CHECKING([whether to build shared libraries])
+    test no = "$can_build_shared" && enable_shared=no
+
+    # On AIX, shared libraries and static libraries use the same namespace, and
+    # are all built from PIC.
+    case $host_os in
+      aix3*)
+        test yes = "$enable_shared" && enable_static=no
+        if test -n "$RANLIB"; then
+          archive_cmds="$archive_cmds~\$RANLIB \$lib"
+          postinstall_cmds='$RANLIB $lib'
+        fi
+        ;;
+      aix[[4-9]]*)
+	if test ia64 != "$host_cpu"; then
+	  case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
+	  yes,aix,yes) ;;		# shared object as lib.so file only
+	  yes,svr4,*) ;;		# shared object as lib.so archive member only
+	  yes,*) enable_static=no ;;	# shared object in lib.a archive as well
+	  esac
+	fi
+        ;;
+    esac
+    AC_MSG_RESULT([$enable_shared])
+
+    AC_MSG_CHECKING([whether to build static libraries])
+    # Make sure either enable_shared or enable_static is yes.
+    test yes = "$enable_shared" || enable_static=yes
+    AC_MSG_RESULT([$enable_static])
+
+    _LT_TAGVAR(GCC, $1)=$G77
+    _LT_TAGVAR(LD, $1)=$LD
+
+    ## CAVEAT EMPTOR:
+    ## There is no encapsulation within the following macros, do not change
+    ## the running order or otherwise move them around unless you know exactly
+    ## what you are doing...
+    _LT_COMPILER_PIC($1)
+    _LT_COMPILER_C_O($1)
+    _LT_COMPILER_FILE_LOCKS($1)
+    _LT_LINKER_SHLIBS($1)
+    _LT_SYS_DYNAMIC_LINKER($1)
+    _LT_LINKER_HARDCODE_LIBPATH($1)
+
+    _LT_CONFIG($1)
+  fi # test -n "$compiler"
+
+  GCC=$lt_save_GCC
+  CC=$lt_save_CC
+  CFLAGS=$lt_save_CFLAGS
+fi # test yes != "$_lt_disable_F77"
+
+AC_LANG_POP
+])# _LT_LANG_F77_CONFIG
+
+
+# _LT_LANG_FC_CONFIG([TAG])
+# -------------------------
+# Ensure that the configuration variables for a Fortran compiler are
+# suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to 'libtool'.
+m4_defun([_LT_LANG_FC_CONFIG],
+[AC_LANG_PUSH(Fortran)
+
+if test -z "$FC" || test no = "$FC"; then
+  _lt_disable_FC=yes
+fi
+
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for fc test sources.
+ac_ext=${ac_fc_srcext-f}
+
+# Object file extension for compiled fc test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the FC compiler isn't working.  Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test yes != "$_lt_disable_FC"; then
+  # Code to be used in simple compile tests
+  lt_simple_compile_test_code="\
+      subroutine t
+      return
+      end
+"
+
+  # Code to be used in simple link tests
+  lt_simple_link_test_code="\
+      program t
+      end
+"
+
+  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+  _LT_TAG_COMPILER
+
+  # save warnings/boilerplate of simple test code
+  _LT_COMPILER_BOILERPLATE
+  _LT_LINKER_BOILERPLATE
+
+  # Allow CC to be a program name with arguments.
+  lt_save_CC=$CC
+  lt_save_GCC=$GCC
+  lt_save_CFLAGS=$CFLAGS
+  CC=${FC-"f95"}
+  CFLAGS=$FCFLAGS
+  compiler=$CC
+  GCC=$ac_cv_fc_compiler_gnu
+
+  _LT_TAGVAR(compiler, $1)=$CC
+  _LT_CC_BASENAME([$compiler])
+
+  if test -n "$compiler"; then
+    AC_MSG_CHECKING([if libtool supports shared libraries])
+    AC_MSG_RESULT([$can_build_shared])
+
+    AC_MSG_CHECKING([whether to build shared libraries])
+    test no = "$can_build_shared" && enable_shared=no
+
+    # On AIX, shared libraries and static libraries use the same namespace, and
+    # are all built from PIC.
+    case $host_os in
+      aix3*)
+        test yes = "$enable_shared" && enable_static=no
+        if test -n "$RANLIB"; then
+          archive_cmds="$archive_cmds~\$RANLIB \$lib"
+          postinstall_cmds='$RANLIB $lib'
+        fi
+        ;;
+      aix[[4-9]]*)
+	if test ia64 != "$host_cpu"; then
+	  case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
+	  yes,aix,yes) ;;		# shared object as lib.so file only
+	  yes,svr4,*) ;;		# shared object as lib.so archive member only
+	  yes,*) enable_static=no ;;	# shared object in lib.a archive as well
+	  esac
+	fi
+        ;;
+    esac
+    AC_MSG_RESULT([$enable_shared])
+
+    AC_MSG_CHECKING([whether to build static libraries])
+    # Make sure either enable_shared or enable_static is yes.
+    test yes = "$enable_shared" || enable_static=yes
+    AC_MSG_RESULT([$enable_static])
+
+    _LT_TAGVAR(GCC, $1)=$ac_cv_fc_compiler_gnu
+    _LT_TAGVAR(LD, $1)=$LD
+
+    ## CAVEAT EMPTOR:
+    ## There is no encapsulation within the following macros, do not change
+    ## the running order or otherwise move them around unless you know exactly
+    ## what you are doing...
+    _LT_SYS_HIDDEN_LIBDEPS($1)
+    _LT_COMPILER_PIC($1)
+    _LT_COMPILER_C_O($1)
+    _LT_COMPILER_FILE_LOCKS($1)
+    _LT_LINKER_SHLIBS($1)
+    _LT_SYS_DYNAMIC_LINKER($1)
+    _LT_LINKER_HARDCODE_LIBPATH($1)
+
+    _LT_CONFIG($1)
+  fi # test -n "$compiler"
+
+  GCC=$lt_save_GCC
+  CC=$lt_save_CC
+  CFLAGS=$lt_save_CFLAGS
+fi # test yes != "$_lt_disable_FC"
+
+AC_LANG_POP
+])# _LT_LANG_FC_CONFIG
+
+
+# _LT_LANG_GCJ_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for the GNU Java Compiler compiler
+# are suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to 'libtool'.
+m4_defun([_LT_LANG_GCJ_CONFIG],
+[AC_REQUIRE([LT_PROG_GCJ])dnl
+AC_LANG_SAVE
+
+# Source file extension for Java test sources.
+ac_ext=java
+
+# Object file extension for compiled Java test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="class foo {}"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }'
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC=$CC
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=yes
+CC=${GCJ-"gcj"}
+CFLAGS=$GCJFLAGS
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_TAGVAR(LD, $1)=$LD
+_LT_CC_BASENAME([$compiler])
+
+# GCJ did not exist at the time GCC didn't implicitly link libc in.
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+
+if test -n "$compiler"; then
+  _LT_COMPILER_NO_RTTI($1)
+  _LT_COMPILER_PIC($1)
+  _LT_COMPILER_C_O($1)
+  _LT_COMPILER_FILE_LOCKS($1)
+  _LT_LINKER_SHLIBS($1)
+  _LT_LINKER_HARDCODE_LIBPATH($1)
+
+  _LT_CONFIG($1)
+fi
+
+AC_LANG_RESTORE
+
+GCC=$lt_save_GCC
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_GCJ_CONFIG
+
+
+# _LT_LANG_GO_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for the GNU Go compiler
+# are suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to 'libtool'.
+m4_defun([_LT_LANG_GO_CONFIG],
+[AC_REQUIRE([LT_PROG_GO])dnl
+AC_LANG_SAVE
+
+# Source file extension for Go test sources.
+ac_ext=go
+
+# Object file extension for compiled Go test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="package main; func main() { }"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='package main; func main() { }'
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC=$CC
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=yes
+CC=${GOC-"gccgo"}
+CFLAGS=$GOFLAGS
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_TAGVAR(LD, $1)=$LD
+_LT_CC_BASENAME([$compiler])
+
+# Go did not exist at the time GCC didn't implicitly link libc in.
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+
+if test -n "$compiler"; then
+  _LT_COMPILER_NO_RTTI($1)
+  _LT_COMPILER_PIC($1)
+  _LT_COMPILER_C_O($1)
+  _LT_COMPILER_FILE_LOCKS($1)
+  _LT_LINKER_SHLIBS($1)
+  _LT_LINKER_HARDCODE_LIBPATH($1)
+
+  _LT_CONFIG($1)
+fi
+
+AC_LANG_RESTORE
+
+GCC=$lt_save_GCC
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_GO_CONFIG
+
+
+# _LT_LANG_RC_CONFIG([TAG])
+# -------------------------
+# Ensure that the configuration variables for the Windows resource compiler
+# are suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to 'libtool'.
+m4_defun([_LT_LANG_RC_CONFIG],
+[AC_REQUIRE([LT_PROG_RC])dnl
+AC_LANG_SAVE
+
+# Source file extension for RC test sources.
+ac_ext=rc
+
+# Object file extension for compiled RC test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }'
+
+# Code to be used in simple link tests
+lt_simple_link_test_code=$lt_simple_compile_test_code
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC=$CC
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=
+CC=${RC-"windres"}
+CFLAGS=
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_CC_BASENAME([$compiler])
+_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+
+if test -n "$compiler"; then
+  :
+  _LT_CONFIG($1)
+fi
+
+GCC=$lt_save_GCC
+AC_LANG_RESTORE
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_RC_CONFIG
+
+
+# LT_PROG_GCJ
+# -----------
+AC_DEFUN([LT_PROG_GCJ],
+[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ],
+  [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ],
+    [AC_CHECK_TOOL(GCJ, gcj,)
+      test set = "${GCJFLAGS+set}" || GCJFLAGS="-g -O2"
+      AC_SUBST(GCJFLAGS)])])[]dnl
+])
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_GCJ], [])
+
+
+# LT_PROG_GO
+# ----------
+AC_DEFUN([LT_PROG_GO],
+[AC_CHECK_TOOL(GOC, gccgo,)
+])
+
+
+# LT_PROG_RC
+# ----------
+AC_DEFUN([LT_PROG_RC],
+[AC_CHECK_TOOL(RC, windres,)
+])
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_RC], [])
+
+
+# _LT_DECL_EGREP
+# --------------
+# If we don't have a new enough Autoconf to choose the best grep
+# available, choose the one first in the user's PATH.
+m4_defun([_LT_DECL_EGREP],
+[AC_REQUIRE([AC_PROG_EGREP])dnl
+AC_REQUIRE([AC_PROG_FGREP])dnl
+test -z "$GREP" && GREP=grep
+_LT_DECL([], [GREP], [1], [A grep program that handles long lines])
+_LT_DECL([], [EGREP], [1], [An ERE matcher])
+_LT_DECL([], [FGREP], [1], [A literal string matcher])
+dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too
+AC_SUBST([GREP])
+])
+
+
+# _LT_DECL_OBJDUMP
+# --------------
+# If we don't have a new enough Autoconf to choose the best objdump
+# available, choose the one first in the user's PATH.
+m4_defun([_LT_DECL_OBJDUMP],
+[AC_CHECK_TOOL(OBJDUMP, objdump, false)
+test -z "$OBJDUMP" && OBJDUMP=objdump
+_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper])
+AC_SUBST([OBJDUMP])
+])
+
+# _LT_DECL_DLLTOOL
+# ----------------
+# Ensure DLLTOOL variable is set.
+m4_defun([_LT_DECL_DLLTOOL],
+[AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+_LT_DECL([], [DLLTOOL], [1], [DLL creation program])
+AC_SUBST([DLLTOOL])
+])
+
+# _LT_DECL_SED
+# ------------
+# Check for a fully-functional sed program, that truncates
+# as few characters as possible.  Prefer GNU sed if found.
+m4_defun([_LT_DECL_SED],
+[AC_PROG_SED
+test -z "$SED" && SED=sed
+Xsed="$SED -e 1s/^X//"
+_LT_DECL([], [SED], [1], [A sed program that does not truncate output])
+_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"],
+    [Sed that helps us avoid accidentally triggering echo(1) options like -n])
+])# _LT_DECL_SED
+
+m4_ifndef([AC_PROG_SED], [
+# NOTE: This macro has been submitted for inclusion into   #
+#  GNU Autoconf as AC_PROG_SED.  When it is available in   #
+#  a released version of Autoconf we should remove this    #
+#  macro and use it instead.                               #
+
+m4_defun([AC_PROG_SED],
+[AC_MSG_CHECKING([for a sed that does not truncate output])
+AC_CACHE_VAL(lt_cv_path_SED,
+[# Loop through the user's path and test for sed and gsed.
+# Then use that list of sed's as ones to test for truncation.
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+  for lt_ac_prog in sed gsed; do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then
+        lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext"
+      fi
+    done
+  done
+done
+IFS=$as_save_IFS
+lt_ac_max=0
+lt_ac_count=0
+# Add /usr/xpg4/bin/sed as it is typically found on Solaris
+# along with /bin/sed that truncates output.
+for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do
+  test ! -f "$lt_ac_sed" && continue
+  cat /dev/null > conftest.in
+  lt_ac_count=0
+  echo $ECHO_N "0123456789$ECHO_C" >conftest.in
+  # Check for GNU sed and select it if it is found.
+  if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then
+    lt_cv_path_SED=$lt_ac_sed
+    break
+  fi
+  while true; do
+    cat conftest.in conftest.in >conftest.tmp
+    mv conftest.tmp conftest.in
+    cp conftest.in conftest.nl
+    echo >>conftest.nl
+    $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break
+    cmp -s conftest.out conftest.nl || break
+    # 10000 chars as input seems more than enough
+    test 10 -lt "$lt_ac_count" && break
+    lt_ac_count=`expr $lt_ac_count + 1`
+    if test "$lt_ac_count" -gt "$lt_ac_max"; then
+      lt_ac_max=$lt_ac_count
+      lt_cv_path_SED=$lt_ac_sed
+    fi
+  done
+done
+])
+SED=$lt_cv_path_SED
+AC_SUBST([SED])
+AC_MSG_RESULT([$SED])
+])#AC_PROG_SED
+])#m4_ifndef
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_SED], [])
+
+
+# _LT_CHECK_SHELL_FEATURES
+# ------------------------
+# Find out whether the shell is Bourne or XSI compatible,
+# or has some other useful features.
+m4_defun([_LT_CHECK_SHELL_FEATURES],
+[if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+  lt_unset=unset
+else
+  lt_unset=false
+fi
+_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl
+
+# test EBCDIC or ASCII
+case `echo X|tr X '\101'` in
+ A) # ASCII based system
+    # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+  lt_SP2NL='tr \040 \012'
+  lt_NL2SP='tr \015\012 \040\040'
+  ;;
+ *) # EBCDIC based system
+  lt_SP2NL='tr \100 \n'
+  lt_NL2SP='tr \r\n \100\100'
+  ;;
+esac
+_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl
+_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl
+])# _LT_CHECK_SHELL_FEATURES
+
+
+# _LT_PATH_CONVERSION_FUNCTIONS
+# -----------------------------
+# Determine what file name conversion functions should be used by
+# func_to_host_file (and, implicitly, by func_to_host_path).  These are needed
+# for certain cross-compile configurations and native mingw.
+m4_defun([_LT_PATH_CONVERSION_FUNCTIONS],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_MSG_CHECKING([how to convert $build file names to $host format])
+AC_CACHE_VAL(lt_cv_to_host_file_cmd,
+[case $host in
+  *-*-mingw* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+        ;;
+      *-*-cygwin* )
+        lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+        ;;
+      * ) # otherwise, assume *nix
+        lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+        ;;
+    esac
+    ;;
+  *-*-cygwin* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+        ;;
+      *-*-cygwin* )
+        lt_cv_to_host_file_cmd=func_convert_file_noop
+        ;;
+      * ) # otherwise, assume *nix
+        lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+        ;;
+    esac
+    ;;
+  * ) # unhandled hosts (and "normal" native builds)
+    lt_cv_to_host_file_cmd=func_convert_file_noop
+    ;;
+esac
+])
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+AC_MSG_RESULT([$lt_cv_to_host_file_cmd])
+_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd],
+         [0], [convert $build file names to $host format])dnl
+
+AC_MSG_CHECKING([how to convert $build file names to toolchain format])
+AC_CACHE_VAL(lt_cv_to_tool_file_cmd,
+[#assume ordinary cross tools, or native build.
+lt_cv_to_tool_file_cmd=func_convert_file_noop
+case $host in
+  *-*-mingw* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+        ;;
+    esac
+    ;;
+esac
+])
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+AC_MSG_RESULT([$lt_cv_to_tool_file_cmd])
+_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd],
+         [0], [convert $build files to toolchain format])dnl
+])# _LT_PATH_CONVERSION_FUNCTIONS
+
+# Helper functions for option handling.                    -*- Autoconf -*-
+#
+#   Copyright (C) 2004-2005, 2007-2009, 2011-2015 Free Software
+#   Foundation, Inc.
+#   Written by Gary V. Vaughan, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 8 ltoptions.m4
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])])
+
+
+# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME)
+# ------------------------------------------
+m4_define([_LT_MANGLE_OPTION],
+[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])])
+
+
+# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME)
+# ---------------------------------------
+# Set option OPTION-NAME for macro MACRO-NAME, and if there is a
+# matching handler defined, dispatch to it.  Other OPTION-NAMEs are
+# saved as a flag.
+m4_define([_LT_SET_OPTION],
+[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl
+m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]),
+        _LT_MANGLE_DEFUN([$1], [$2]),
+    [m4_warning([Unknown $1 option '$2'])])[]dnl
+])
+
+
+# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET])
+# ------------------------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+m4_define([_LT_IF_OPTION],
+[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])])
+
+
+# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET)
+# -------------------------------------------------------
+# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME
+# are set.
+m4_define([_LT_UNLESS_OPTIONS],
+[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+	    [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option),
+		      [m4_define([$0_found])])])[]dnl
+m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3
+])[]dnl
+])
+
+
+# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST)
+# ----------------------------------------
+# OPTION-LIST is a space-separated list of Libtool options associated
+# with MACRO-NAME.  If any OPTION has a matching handler declared with
+# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about
+# the unknown option and exit.
+m4_defun([_LT_SET_OPTIONS],
+[# Set options
+m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+    [_LT_SET_OPTION([$1], _LT_Option)])
+
+m4_if([$1],[LT_INIT],[
+  dnl
+  dnl Simply set some default values (i.e off) if boolean options were not
+  dnl specified:
+  _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no
+  ])
+  _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no
+  ])
+  dnl
+  dnl If no reference was made to various pairs of opposing options, then
+  dnl we run the default mode handler for the pair.  For example, if neither
+  dnl 'shared' nor 'disable-shared' was passed, we enable building of shared
+  dnl archives by default:
+  _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED])
+  _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC])
+  _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC])
+  _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install],
+		   [_LT_ENABLE_FAST_INSTALL])
+  _LT_UNLESS_OPTIONS([LT_INIT], [aix-soname=aix aix-soname=both aix-soname=svr4],
+		   [_LT_WITH_AIX_SONAME([aix])])
+  ])
+])# _LT_SET_OPTIONS
+
+
+
+# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME)
+# -----------------------------------------
+m4_define([_LT_MANGLE_DEFUN],
+[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])])
+
+
+# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE)
+# -----------------------------------------------
+m4_define([LT_OPTION_DEFINE],
+[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl
+])# LT_OPTION_DEFINE
+
+
+# dlopen
+# ------
+LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes
+])
+
+AU_DEFUN([AC_LIBTOOL_DLOPEN],
+[_LT_SET_OPTION([LT_INIT], [dlopen])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the 'dlopen' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], [])
+
+
+# win32-dll
+# ---------
+# Declare package support for building win32 dll's.
+LT_OPTION_DEFINE([LT_INIT], [win32-dll],
+[enable_win32_dll=yes
+
+case $host in
+*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*)
+  AC_CHECK_TOOL(AS, as, false)
+  AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+  AC_CHECK_TOOL(OBJDUMP, objdump, false)
+  ;;
+esac
+
+test -z "$AS" && AS=as
+_LT_DECL([], [AS],      [1], [Assembler program])dnl
+
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl
+
+test -z "$OBJDUMP" && OBJDUMP=objdump
+_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl
+])# win32-dll
+
+AU_DEFUN([AC_LIBTOOL_WIN32_DLL],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+_LT_SET_OPTION([LT_INIT], [win32-dll])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the 'win32-dll' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], [])
+
+
+# _LT_ENABLE_SHARED([DEFAULT])
+# ----------------------------
+# implement the --enable-shared flag, and supports the 'shared' and
+# 'disable-shared' LT_INIT options.
+# DEFAULT is either 'yes' or 'no'.  If omitted, it defaults to 'yes'.
+m4_define([_LT_ENABLE_SHARED],
+[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([shared],
+    [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@],
+	[build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])],
+    [p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_shared=yes ;;
+    no) enable_shared=no ;;
+    *)
+      enable_shared=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+      for pkg in $enableval; do
+	IFS=$lt_save_ifs
+	if test "X$pkg" = "X$p"; then
+	  enable_shared=yes
+	fi
+      done
+      IFS=$lt_save_ifs
+      ;;
+    esac],
+    [enable_shared=]_LT_ENABLE_SHARED_DEFAULT)
+
+    _LT_DECL([build_libtool_libs], [enable_shared], [0],
+	[Whether or not to build shared libraries])
+])# _LT_ENABLE_SHARED
+
+LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])])
+
+# Old names:
+AC_DEFUN([AC_ENABLE_SHARED],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared])
+])
+
+AC_DEFUN([AC_DISABLE_SHARED],
+[_LT_SET_OPTION([LT_INIT], [disable-shared])
+])
+
+AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)])
+AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_ENABLE_SHARED], [])
+dnl AC_DEFUN([AM_DISABLE_SHARED], [])
+
+
+
+# _LT_ENABLE_STATIC([DEFAULT])
+# ----------------------------
+# implement the --enable-static flag, and support the 'static' and
+# 'disable-static' LT_INIT options.
+# DEFAULT is either 'yes' or 'no'.  If omitted, it defaults to 'yes'.
+m4_define([_LT_ENABLE_STATIC],
+[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([static],
+    [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@],
+	[build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])],
+    [p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_static=yes ;;
+    no) enable_static=no ;;
+    *)
+     enable_static=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+      for pkg in $enableval; do
+	IFS=$lt_save_ifs
+	if test "X$pkg" = "X$p"; then
+	  enable_static=yes
+	fi
+      done
+      IFS=$lt_save_ifs
+      ;;
+    esac],
+    [enable_static=]_LT_ENABLE_STATIC_DEFAULT)
+
+    _LT_DECL([build_old_libs], [enable_static], [0],
+	[Whether or not to build static libraries])
+])# _LT_ENABLE_STATIC
+
+LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])])
+
+# Old names:
+AC_DEFUN([AC_ENABLE_STATIC],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static])
+])
+
+AC_DEFUN([AC_DISABLE_STATIC],
+[_LT_SET_OPTION([LT_INIT], [disable-static])
+])
+
+AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)])
+AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_ENABLE_STATIC], [])
+dnl AC_DEFUN([AM_DISABLE_STATIC], [])
+
+
+
+# _LT_ENABLE_FAST_INSTALL([DEFAULT])
+# ----------------------------------
+# implement the --enable-fast-install flag, and support the 'fast-install'
+# and 'disable-fast-install' LT_INIT options.
+# DEFAULT is either 'yes' or 'no'.  If omitted, it defaults to 'yes'.
+m4_define([_LT_ENABLE_FAST_INSTALL],
+[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([fast-install],
+    [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@],
+    [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])],
+    [p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_fast_install=yes ;;
+    no) enable_fast_install=no ;;
+    *)
+      enable_fast_install=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+      for pkg in $enableval; do
+	IFS=$lt_save_ifs
+	if test "X$pkg" = "X$p"; then
+	  enable_fast_install=yes
+	fi
+      done
+      IFS=$lt_save_ifs
+      ;;
+    esac],
+    [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT)
+
+_LT_DECL([fast_install], [enable_fast_install], [0],
+	 [Whether or not to optimize for fast installation])dnl
+])# _LT_ENABLE_FAST_INSTALL
+
+LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])])
+
+# Old names:
+AU_DEFUN([AC_ENABLE_FAST_INSTALL],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+the 'fast-install' option into LT_INIT's first parameter.])
+])
+
+AU_DEFUN([AC_DISABLE_FAST_INSTALL],
+[_LT_SET_OPTION([LT_INIT], [disable-fast-install])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+the 'disable-fast-install' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], [])
+dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], [])
+
+
+# _LT_WITH_AIX_SONAME([DEFAULT])
+# ----------------------------------
+# implement the --with-aix-soname flag, and support the `aix-soname=aix'
+# and `aix-soname=both' and `aix-soname=svr4' LT_INIT options. DEFAULT
+# is either `aix', `both' or `svr4'.  If omitted, it defaults to `aix'.
+m4_define([_LT_WITH_AIX_SONAME],
+[m4_define([_LT_WITH_AIX_SONAME_DEFAULT], [m4_if($1, svr4, svr4, m4_if($1, both, both, aix))])dnl
+shared_archive_member_spec=
+case $host,$enable_shared in
+power*-*-aix[[5-9]]*,yes)
+  AC_MSG_CHECKING([which variant of shared library versioning to provide])
+  AC_ARG_WITH([aix-soname],
+    [AS_HELP_STRING([--with-aix-soname=aix|svr4|both],
+      [shared library versioning (aka "SONAME") variant to provide on AIX, @<:@default=]_LT_WITH_AIX_SONAME_DEFAULT[@:>@.])],
+    [case $withval in
+    aix|svr4|both)
+      ;;
+    *)
+      AC_MSG_ERROR([Unknown argument to --with-aix-soname])
+      ;;
+    esac
+    lt_cv_with_aix_soname=$with_aix_soname],
+    [AC_CACHE_VAL([lt_cv_with_aix_soname],
+      [lt_cv_with_aix_soname=]_LT_WITH_AIX_SONAME_DEFAULT)
+    with_aix_soname=$lt_cv_with_aix_soname])
+  AC_MSG_RESULT([$with_aix_soname])
+  if test aix != "$with_aix_soname"; then
+    # For the AIX way of multilib, we name the shared archive member
+    # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o',
+    # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File.
+    # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag,
+    # the AIX toolchain works better with OBJECT_MODE set (default 32).
+    if test 64 = "${OBJECT_MODE-32}"; then
+      shared_archive_member_spec=shr_64
+    else
+      shared_archive_member_spec=shr
+    fi
+  fi
+  ;;
+*)
+  with_aix_soname=aix
+  ;;
+esac
+
+_LT_DECL([], [shared_archive_member_spec], [0],
+    [Shared archive member basename, for filename based shared library versioning on AIX])dnl
+])# _LT_WITH_AIX_SONAME
+
+LT_OPTION_DEFINE([LT_INIT], [aix-soname=aix], [_LT_WITH_AIX_SONAME([aix])])
+LT_OPTION_DEFINE([LT_INIT], [aix-soname=both], [_LT_WITH_AIX_SONAME([both])])
+LT_OPTION_DEFINE([LT_INIT], [aix-soname=svr4], [_LT_WITH_AIX_SONAME([svr4])])
+
+
+# _LT_WITH_PIC([MODE])
+# --------------------
+# implement the --with-pic flag, and support the 'pic-only' and 'no-pic'
+# LT_INIT options.
+# MODE is either 'yes' or 'no'.  If omitted, it defaults to 'both'.
+m4_define([_LT_WITH_PIC],
+[AC_ARG_WITH([pic],
+    [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@],
+	[try to use only PIC/non-PIC objects @<:@default=use both@:>@])],
+    [lt_p=${PACKAGE-default}
+    case $withval in
+    yes|no) pic_mode=$withval ;;
+    *)
+      pic_mode=default
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+      for lt_pkg in $withval; do
+	IFS=$lt_save_ifs
+	if test "X$lt_pkg" = "X$lt_p"; then
+	  pic_mode=yes
+	fi
+      done
+      IFS=$lt_save_ifs
+      ;;
+    esac],
+    [pic_mode=m4_default([$1], [default])])
+
+_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl
+])# _LT_WITH_PIC
+
+LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])])
+LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])])
+
+# Old name:
+AU_DEFUN([AC_LIBTOOL_PICMODE],
+[_LT_SET_OPTION([LT_INIT], [pic-only])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the 'pic-only' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_PICMODE], [])
+
+
+m4_define([_LTDL_MODE], [])
+LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive],
+		 [m4_define([_LTDL_MODE], [nonrecursive])])
+LT_OPTION_DEFINE([LTDL_INIT], [recursive],
+		 [m4_define([_LTDL_MODE], [recursive])])
+LT_OPTION_DEFINE([LTDL_INIT], [subproject],
+		 [m4_define([_LTDL_MODE], [subproject])])
+
+m4_define([_LTDL_TYPE], [])
+LT_OPTION_DEFINE([LTDL_INIT], [installable],
+		 [m4_define([_LTDL_TYPE], [installable])])
+LT_OPTION_DEFINE([LTDL_INIT], [convenience],
+		 [m4_define([_LTDL_TYPE], [convenience])])
+
+# ltsugar.m4 -- libtool m4 base layer.                         -*-Autoconf-*-
+#
+# Copyright (C) 2004-2005, 2007-2008, 2011-2015 Free Software
+# Foundation, Inc.
+# Written by Gary V. Vaughan, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 6 ltsugar.m4
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])])
+
+
+# lt_join(SEP, ARG1, [ARG2...])
+# -----------------------------
+# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their
+# associated separator.
+# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier
+# versions in m4sugar had bugs.
+m4_define([lt_join],
+[m4_if([$#], [1], [],
+       [$#], [2], [[$2]],
+       [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])])
+m4_define([_lt_join],
+[m4_if([$#$2], [2], [],
+       [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])])
+
+
+# lt_car(LIST)
+# lt_cdr(LIST)
+# ------------
+# Manipulate m4 lists.
+# These macros are necessary as long as will still need to support
+# Autoconf-2.59, which quotes differently.
+m4_define([lt_car], [[$1]])
+m4_define([lt_cdr],
+[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])],
+       [$#], 1, [],
+       [m4_dquote(m4_shift($@))])])
+m4_define([lt_unquote], $1)
+
+
+# lt_append(MACRO-NAME, STRING, [SEPARATOR])
+# ------------------------------------------
+# Redefine MACRO-NAME to hold its former content plus 'SEPARATOR''STRING'.
+# Note that neither SEPARATOR nor STRING are expanded; they are appended
+# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked).
+# No SEPARATOR is output if MACRO-NAME was previously undefined (different
+# than defined and empty).
+#
+# This macro is needed until we can rely on Autoconf 2.62, since earlier
+# versions of m4sugar mistakenly expanded SEPARATOR but not STRING.
+m4_define([lt_append],
+[m4_define([$1],
+	   m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])])
+
+
+
+# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...])
+# ----------------------------------------------------------
+# Produce a SEP delimited list of all paired combinations of elements of
+# PREFIX-LIST with SUFFIX1 through SUFFIXn.  Each element of the list
+# has the form PREFIXmINFIXSUFFIXn.
+# Needed until we can rely on m4_combine added in Autoconf 2.62.
+m4_define([lt_combine],
+[m4_if(m4_eval([$# > 3]), [1],
+       [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl
+[[m4_foreach([_Lt_prefix], [$2],
+	     [m4_foreach([_Lt_suffix],
+		]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[,
+	[_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])])
+
+
+# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ])
+# -----------------------------------------------------------------------
+# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited
+# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ.
+m4_define([lt_if_append_uniq],
+[m4_ifdef([$1],
+	  [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1],
+		 [lt_append([$1], [$2], [$3])$4],
+		 [$5])],
+	  [lt_append([$1], [$2], [$3])$4])])
+
+
+# lt_dict_add(DICT, KEY, VALUE)
+# -----------------------------
+m4_define([lt_dict_add],
+[m4_define([$1($2)], [$3])])
+
+
+# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE)
+# --------------------------------------------
+m4_define([lt_dict_add_subkey],
+[m4_define([$1($2:$3)], [$4])])
+
+
+# lt_dict_fetch(DICT, KEY, [SUBKEY])
+# ----------------------------------
+m4_define([lt_dict_fetch],
+[m4_ifval([$3],
+	m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]),
+    m4_ifdef([$1($2)], [m4_defn([$1($2)])]))])
+
+
+# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE])
+# -----------------------------------------------------------------
+m4_define([lt_if_dict_fetch],
+[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4],
+	[$5],
+    [$6])])
+
+
+# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...])
+# --------------------------------------------------------------
+m4_define([lt_dict_filter],
+[m4_if([$5], [], [],
+  [lt_join(m4_quote(m4_default([$4], [[, ]])),
+           lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]),
+		      [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl
+])
+
+# ltversion.m4 -- version numbers			-*- Autoconf -*-
+#
+#   Copyright (C) 2004, 2011-2015 Free Software Foundation, Inc.
+#   Written by Scott James Remnant, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# @configure_input@
+
+# serial 4179 ltversion.m4
+# This file is part of GNU Libtool
+
+m4_define([LT_PACKAGE_VERSION], [2.4.6])
+m4_define([LT_PACKAGE_REVISION], [2.4.6])
+
+AC_DEFUN([LTVERSION_VERSION],
+[macro_version='2.4.6'
+macro_revision='2.4.6'
+_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?])
+_LT_DECL(, macro_revision, 0)
+])
+
+# lt~obsolete.m4 -- aclocal satisfying obsolete definitions.    -*-Autoconf-*-
+#
+#   Copyright (C) 2004-2005, 2007, 2009, 2011-2015 Free Software
+#   Foundation, Inc.
+#   Written by Scott James Remnant, 2004.
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 5 lt~obsolete.m4
+
+# These exist entirely to fool aclocal when bootstrapping libtool.
+#
+# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN),
+# which have later been changed to m4_define as they aren't part of the
+# exported API, or moved to Autoconf or Automake where they belong.
+#
+# The trouble is, aclocal is a bit thick.  It'll see the old AC_DEFUN
+# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us
+# using a macro with the same name in our local m4/libtool.m4 it'll
+# pull the old libtool.m4 in (it doesn't see our shiny new m4_define
+# and doesn't know about Autoconf macros at all.)
+#
+# So we provide this file, which has a silly filename so it's always
+# included after everything else.  This provides aclocal with the
+# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything
+# because those macros already exist, or will be overwritten later.
+# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6.
+#
+# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here.
+# Yes, that means every name once taken will need to remain here until
+# we give up compatibility with versions before 1.7, at which point
+# we need to keep only those names which we still refer to.
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])])
+
+m4_ifndef([AC_LIBTOOL_LINKER_OPTION],	[AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])])
+m4_ifndef([AC_PROG_EGREP],		[AC_DEFUN([AC_PROG_EGREP])])
+m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH],	[AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])])
+m4_ifndef([_LT_AC_SHELL_INIT],		[AC_DEFUN([_LT_AC_SHELL_INIT])])
+m4_ifndef([_LT_AC_SYS_LIBPATH_AIX],	[AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])])
+m4_ifndef([_LT_PROG_LTMAIN],		[AC_DEFUN([_LT_PROG_LTMAIN])])
+m4_ifndef([_LT_AC_TAGVAR],		[AC_DEFUN([_LT_AC_TAGVAR])])
+m4_ifndef([AC_LTDL_ENABLE_INSTALL],	[AC_DEFUN([AC_LTDL_ENABLE_INSTALL])])
+m4_ifndef([AC_LTDL_PREOPEN],		[AC_DEFUN([AC_LTDL_PREOPEN])])
+m4_ifndef([_LT_AC_SYS_COMPILER],	[AC_DEFUN([_LT_AC_SYS_COMPILER])])
+m4_ifndef([_LT_AC_LOCK],		[AC_DEFUN([_LT_AC_LOCK])])
+m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE],	[AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])])
+m4_ifndef([_LT_AC_TRY_DLOPEN_SELF],	[AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])])
+m4_ifndef([AC_LIBTOOL_PROG_CC_C_O],	[AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])])
+m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])])
+m4_ifndef([AC_LIBTOOL_OBJDIR],		[AC_DEFUN([AC_LIBTOOL_OBJDIR])])
+m4_ifndef([AC_LTDL_OBJDIR],		[AC_DEFUN([AC_LTDL_OBJDIR])])
+m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])])
+m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP],	[AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])])
+m4_ifndef([AC_PATH_MAGIC],		[AC_DEFUN([AC_PATH_MAGIC])])
+m4_ifndef([AC_PROG_LD_GNU],		[AC_DEFUN([AC_PROG_LD_GNU])])
+m4_ifndef([AC_PROG_LD_RELOAD_FLAG],	[AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])])
+m4_ifndef([AC_DEPLIBS_CHECK_METHOD],	[AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])])
+m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])])
+m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])])
+m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])])
+m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS],	[AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])])
+m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP],	[AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])])
+m4_ifndef([LT_AC_PROG_EGREP],		[AC_DEFUN([LT_AC_PROG_EGREP])])
+m4_ifndef([LT_AC_PROG_SED],		[AC_DEFUN([LT_AC_PROG_SED])])
+m4_ifndef([_LT_CC_BASENAME],		[AC_DEFUN([_LT_CC_BASENAME])])
+m4_ifndef([_LT_COMPILER_BOILERPLATE],	[AC_DEFUN([_LT_COMPILER_BOILERPLATE])])
+m4_ifndef([_LT_LINKER_BOILERPLATE],	[AC_DEFUN([_LT_LINKER_BOILERPLATE])])
+m4_ifndef([_AC_PROG_LIBTOOL],		[AC_DEFUN([_AC_PROG_LIBTOOL])])
+m4_ifndef([AC_LIBTOOL_SETUP],		[AC_DEFUN([AC_LIBTOOL_SETUP])])
+m4_ifndef([_LT_AC_CHECK_DLFCN],		[AC_DEFUN([_LT_AC_CHECK_DLFCN])])
+m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER],	[AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])])
+m4_ifndef([_LT_AC_TAGCONFIG],		[AC_DEFUN([_LT_AC_TAGCONFIG])])
+m4_ifndef([AC_DISABLE_FAST_INSTALL],	[AC_DEFUN([AC_DISABLE_FAST_INSTALL])])
+m4_ifndef([_LT_AC_LANG_CXX],		[AC_DEFUN([_LT_AC_LANG_CXX])])
+m4_ifndef([_LT_AC_LANG_F77],		[AC_DEFUN([_LT_AC_LANG_F77])])
+m4_ifndef([_LT_AC_LANG_GCJ],		[AC_DEFUN([_LT_AC_LANG_GCJ])])
+m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])])
+m4_ifndef([_LT_AC_LANG_C_CONFIG],	[AC_DEFUN([_LT_AC_LANG_C_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])])
+m4_ifndef([_LT_AC_LANG_CXX_CONFIG],	[AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])])
+m4_ifndef([_LT_AC_LANG_F77_CONFIG],	[AC_DEFUN([_LT_AC_LANG_F77_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])])
+m4_ifndef([_LT_AC_LANG_GCJ_CONFIG],	[AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])])
+m4_ifndef([_LT_AC_LANG_RC_CONFIG],	[AC_DEFUN([_LT_AC_LANG_RC_CONFIG])])
+m4_ifndef([AC_LIBTOOL_CONFIG],		[AC_DEFUN([AC_LIBTOOL_CONFIG])])
+m4_ifndef([_LT_AC_FILE_LTDLL_C],	[AC_DEFUN([_LT_AC_FILE_LTDLL_C])])
+m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS],	[AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])])
+m4_ifndef([_LT_AC_PROG_CXXCPP],		[AC_DEFUN([_LT_AC_PROG_CXXCPP])])
+m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS],	[AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])])
+m4_ifndef([_LT_PROG_ECHO_BACKSLASH],	[AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])])
+m4_ifndef([_LT_PROG_F77],		[AC_DEFUN([_LT_PROG_F77])])
+m4_ifndef([_LT_PROG_FC],		[AC_DEFUN([_LT_PROG_FC])])
+m4_ifndef([_LT_PROG_CXX],		[AC_DEFUN([_LT_PROG_CXX])])
+
+# Copyright (C) 2002-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_AUTOMAKE_VERSION(VERSION)
+# ----------------------------
+# Automake X.Y traces this macro to ensure aclocal.m4 has been
+# generated from the m4 files accompanying Automake X.Y.
+# (This private macro should not be called outside this file.)
+AC_DEFUN([AM_AUTOMAKE_VERSION],
+[am__api_version='1.16'
+dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
+dnl require some minimum version.  Point them to the right macro.
+m4_if([$1], [1.16.5], [],
+      [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
+])
+
+# _AM_AUTOCONF_VERSION(VERSION)
+# -----------------------------
+# aclocal traces this macro to find the Autoconf version.
+# This is a private macro too.  Using m4_define simplifies
+# the logic in aclocal, which can simply ignore this definition.
+m4_define([_AM_AUTOCONF_VERSION], [])
+
+# AM_SET_CURRENT_AUTOMAKE_VERSION
+# -------------------------------
+# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
+# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
+AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
+[AM_AUTOMAKE_VERSION([1.16.5])dnl
+m4_ifndef([AC_AUTOCONF_VERSION],
+  [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
+
+# AM_AUX_DIR_EXPAND                                         -*- Autoconf -*-
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
+# $ac_aux_dir to '$srcdir/foo'.  In other projects, it is set to
+# '$srcdir', '$srcdir/..', or '$srcdir/../..'.
+#
+# Of course, Automake must honor this variable whenever it calls a
+# tool from the auxiliary directory.  The problem is that $srcdir (and
+# therefore $ac_aux_dir as well) can be either absolute or relative,
+# depending on how configure is run.  This is pretty annoying, since
+# it makes $ac_aux_dir quite unusable in subdirectories: in the top
+# source directory, any form will work fine, but in subdirectories a
+# relative path needs to be adjusted first.
+#
+# $ac_aux_dir/missing
+#    fails when called from a subdirectory if $ac_aux_dir is relative
+# $top_srcdir/$ac_aux_dir/missing
+#    fails if $ac_aux_dir is absolute,
+#    fails when called from a subdirectory in a VPATH build with
+#          a relative $ac_aux_dir
+#
+# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
+# are both prefixed by $srcdir.  In an in-source build this is usually
+# harmless because $srcdir is '.', but things will broke when you
+# start a VPATH build or use an absolute $srcdir.
+#
+# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
+# iff we strip the leading $srcdir from $ac_aux_dir.  That would be:
+#   am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"`
+# and then we would define $MISSING as
+#   MISSING="\${SHELL} $am_aux_dir/missing"
+# This will work as long as MISSING is not called from configure, because
+# unfortunately $(top_srcdir) has no meaning in configure.
+# However there are other variables, like CC, which are often used in
+# configure, and could therefore not use this "fixed" $ac_aux_dir.
+#
+# Another solution, used here, is to always expand $ac_aux_dir to an
+# absolute PATH.  The drawback is that using absolute paths prevent a
+# configured tree to be moved without reconfiguration.
+
+AC_DEFUN([AM_AUX_DIR_EXPAND],
+[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
+# Expand $ac_aux_dir to an absolute path.
+am_aux_dir=`cd "$ac_aux_dir" && pwd`
+])
+
+# AM_CONDITIONAL                                            -*- Autoconf -*-
+
+# Copyright (C) 1997-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_CONDITIONAL(NAME, SHELL-CONDITION)
+# -------------------------------------
+# Define a conditional.
+AC_DEFUN([AM_CONDITIONAL],
+[AC_PREREQ([2.52])dnl
+ m4_if([$1], [TRUE],  [AC_FATAL([$0: invalid condition: $1])],
+       [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
+AC_SUBST([$1_TRUE])dnl
+AC_SUBST([$1_FALSE])dnl
+_AM_SUBST_NOTMAKE([$1_TRUE])dnl
+_AM_SUBST_NOTMAKE([$1_FALSE])dnl
+m4_define([_AM_COND_VALUE_$1], [$2])dnl
+if $2; then
+  $1_TRUE=
+  $1_FALSE='#'
+else
+  $1_TRUE='#'
+  $1_FALSE=
+fi
+AC_CONFIG_COMMANDS_PRE(
+[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then
+  AC_MSG_ERROR([[conditional "$1" was never defined.
+Usually this means the macro was only invoked conditionally.]])
+fi])])
+
+# Copyright (C) 1999-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+
+# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be
+# written in clear, in which case automake, when reading aclocal.m4,
+# will think it sees a *use*, and therefore will trigger all it's
+# C support machinery.  Also note that it means that autoscan, seeing
+# CC etc. in the Makefile, will ask for an AC_PROG_CC use...
+
+
+# _AM_DEPENDENCIES(NAME)
+# ----------------------
+# See how the compiler implements dependency checking.
+# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC".
+# We try a few techniques and use that to set a single cache variable.
+#
+# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was
+# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular
+# dependency, and given that the user is not expected to run this macro,
+# just rely on AC_PROG_CC.
+AC_DEFUN([_AM_DEPENDENCIES],
+[AC_REQUIRE([AM_SET_DEPDIR])dnl
+AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl
+AC_REQUIRE([AM_MAKE_INCLUDE])dnl
+AC_REQUIRE([AM_DEP_TRACK])dnl
+
+m4_if([$1], [CC],   [depcc="$CC"   am_compiler_list=],
+      [$1], [CXX],  [depcc="$CXX"  am_compiler_list=],
+      [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
+      [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'],
+      [$1], [UPC],  [depcc="$UPC"  am_compiler_list=],
+      [$1], [GCJ],  [depcc="$GCJ"  am_compiler_list='gcc3 gcc'],
+                    [depcc="$$1"   am_compiler_list=])
+
+AC_CACHE_CHECK([dependency style of $depcc],
+               [am_cv_$1_dependencies_compiler_type],
+[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+  # We make a subdir and do the tests there.  Otherwise we can end up
+  # making bogus files that we don't know about and never remove.  For
+  # instance it was reported that on HP-UX the gcc test will end up
+  # making a dummy file named 'D' -- because '-MD' means "put the output
+  # in D".
+  rm -rf conftest.dir
+  mkdir conftest.dir
+  # Copy depcomp to subdir because otherwise we won't find it if we're
+  # using a relative directory.
+  cp "$am_depcomp" conftest.dir
+  cd conftest.dir
+  # We will build objects and dependencies in a subdirectory because
+  # it helps to detect inapplicable dependency modes.  For instance
+  # both Tru64's cc and ICC support -MD to output dependencies as a
+  # side effect of compilation, but ICC will put the dependencies in
+  # the current directory while Tru64 will put them in the object
+  # directory.
+  mkdir sub
+
+  am_cv_$1_dependencies_compiler_type=none
+  if test "$am_compiler_list" = ""; then
+     am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp`
+  fi
+  am__universal=false
+  m4_case([$1], [CC],
+    [case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac],
+    [CXX],
+    [case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac])
+
+  for depmode in $am_compiler_list; do
+    # Setup a source with many dependencies, because some compilers
+    # like to wrap large dependency lists on column 80 (with \), and
+    # we should not choose a depcomp mode which is confused by this.
+    #
+    # We need to recreate these files for each test, as the compiler may
+    # overwrite some of them when testing with obscure command lines.
+    # This happens at least with the AIX C compiler.
+    : > sub/conftest.c
+    for i in 1 2 3 4 5 6; do
+      echo '#include "conftst'$i'.h"' >> sub/conftest.c
+      # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+      # Solaris 10 /bin/sh.
+      echo '/* dummy */' > sub/conftst$i.h
+    done
+    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+    # We check with '-c' and '-o' for the sake of the "dashmstdout"
+    # mode.  It turns out that the SunPro C++ compiler does not properly
+    # handle '-M -o', and we need to detect this.  Also, some Intel
+    # versions had trouble with output in subdirs.
+    am__obj=sub/conftest.${OBJEXT-o}
+    am__minus_obj="-o $am__obj"
+    case $depmode in
+    gcc)
+      # This depmode causes a compiler race in universal mode.
+      test "$am__universal" = false || continue
+      ;;
+    nosideeffect)
+      # After this tag, mechanisms are not by side-effect, so they'll
+      # only be used when explicitly requested.
+      if test "x$enable_dependency_tracking" = xyes; then
+	continue
+      else
+	break
+      fi
+      ;;
+    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+      # This compiler won't grok '-c -o', but also, the minuso test has
+      # not run yet.  These depmodes are late enough in the game, and
+      # so weak that their functioning should not be impacted.
+      am__obj=conftest.${OBJEXT-o}
+      am__minus_obj=
+      ;;
+    none) break ;;
+    esac
+    if depmode=$depmode \
+       source=sub/conftest.c object=$am__obj \
+       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+         >/dev/null 2>conftest.err &&
+       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+      # icc doesn't choke on unknown options, it will just issue warnings
+      # or remarks (even with -Werror).  So we grep stderr for any message
+      # that says an option was ignored or not supported.
+      # When given -MP, icc 7.0 and 7.1 complain thusly:
+      #   icc: Command line warning: ignoring option '-M'; no argument required
+      # The diagnosis changed in icc 8.0:
+      #   icc: Command line remark: option '-MP' not supported
+      if (grep 'ignoring option' conftest.err ||
+          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+        am_cv_$1_dependencies_compiler_type=$depmode
+        break
+      fi
+    fi
+  done
+
+  cd ..
+  rm -rf conftest.dir
+else
+  am_cv_$1_dependencies_compiler_type=none
+fi
+])
+AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type])
+AM_CONDITIONAL([am__fastdep$1], [
+  test "x$enable_dependency_tracking" != xno \
+  && test "$am_cv_$1_dependencies_compiler_type" = gcc3])
+])
+
+
+# AM_SET_DEPDIR
+# -------------
+# Choose a directory name for dependency files.
+# This macro is AC_REQUIREd in _AM_DEPENDENCIES.
+AC_DEFUN([AM_SET_DEPDIR],
+[AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
+])
+
+
+# AM_DEP_TRACK
+# ------------
+AC_DEFUN([AM_DEP_TRACK],
+[AC_ARG_ENABLE([dependency-tracking], [dnl
+AS_HELP_STRING(
+  [--enable-dependency-tracking],
+  [do not reject slow dependency extractors])
+AS_HELP_STRING(
+  [--disable-dependency-tracking],
+  [speeds up one-time build])])
+if test "x$enable_dependency_tracking" != xno; then
+  am_depcomp="$ac_aux_dir/depcomp"
+  AMDEPBACKSLASH='\'
+  am__nodep='_no'
+fi
+AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno])
+AC_SUBST([AMDEPBACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl
+AC_SUBST([am__nodep])dnl
+_AM_SUBST_NOTMAKE([am__nodep])dnl
+])
+
+# Generate code to set up dependency tracking.              -*- Autoconf -*-
+
+# Copyright (C) 1999-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_OUTPUT_DEPENDENCY_COMMANDS
+# ------------------------------
+AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
+[{
+  # Older Autoconf quotes --file arguments for eval, but not when files
+  # are listed without --file.  Let's play safe and only enable the eval
+  # if we detect the quoting.
+  # TODO: see whether this extra hack can be removed once we start
+  # requiring Autoconf 2.70 or later.
+  AS_CASE([$CONFIG_FILES],
+          [*\'*], [eval set x "$CONFIG_FILES"],
+          [*], [set x $CONFIG_FILES])
+  shift
+  # Used to flag and report bootstrapping failures.
+  am_rc=0
+  for am_mf
+  do
+    # Strip MF so we end up with the name of the file.
+    am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'`
+    # Check whether this is an Automake generated Makefile which includes
+    # dependency-tracking related rules and includes.
+    # Grep'ing the whole file directly is not great: AIX grep has a line
+    # limit of 2048, but all sed's we know have understand at least 4000.
+    sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \
+      || continue
+    am_dirpart=`AS_DIRNAME(["$am_mf"])`
+    am_filepart=`AS_BASENAME(["$am_mf"])`
+    AM_RUN_LOG([cd "$am_dirpart" \
+      && sed -e '/# am--include-marker/d' "$am_filepart" \
+        | $MAKE -f - am--depfiles]) || am_rc=$?
+  done
+  if test $am_rc -ne 0; then
+    AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments
+    for automatic dependency tracking.  If GNU make was not used, consider
+    re-running the configure script with MAKE="gmake" (or whatever is
+    necessary).  You can also try re-running configure with the
+    '--disable-dependency-tracking' option to at least be able to build
+    the package (albeit without support for automatic dependency tracking).])
+  fi
+  AS_UNSET([am_dirpart])
+  AS_UNSET([am_filepart])
+  AS_UNSET([am_mf])
+  AS_UNSET([am_rc])
+  rm -f conftest-deps.mk
+}
+])# _AM_OUTPUT_DEPENDENCY_COMMANDS
+
+
+# AM_OUTPUT_DEPENDENCY_COMMANDS
+# -----------------------------
+# This macro should only be invoked once -- use via AC_REQUIRE.
+#
+# This code is only required when automatic dependency tracking is enabled.
+# This creates each '.Po' and '.Plo' makefile fragment that we'll need in
+# order to bootstrap the dependency handling code.
+AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
+[AC_CONFIG_COMMANDS([depfiles],
+     [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS],
+     [AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])])
+
+# Do all the work for Automake.                             -*- Autoconf -*-
+
+# Copyright (C) 1996-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This macro actually does too much.  Some checks are only needed if
+# your package does certain things.  But this isn't really a big deal.
+
+dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O.
+m4_define([AC_PROG_CC],
+m4_defn([AC_PROG_CC])
+[_AM_PROG_CC_C_O
+])
+
+# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
+# AM_INIT_AUTOMAKE([OPTIONS])
+# -----------------------------------------------
+# The call with PACKAGE and VERSION arguments is the old style
+# call (pre autoconf-2.50), which is being phased out.  PACKAGE
+# and VERSION should now be passed to AC_INIT and removed from
+# the call to AM_INIT_AUTOMAKE.
+# We support both call styles for the transition.  After
+# the next Automake release, Autoconf can make the AC_INIT
+# arguments mandatory, and then we can depend on a new Autoconf
+# release and drop the old call support.
+AC_DEFUN([AM_INIT_AUTOMAKE],
+[AC_PREREQ([2.65])dnl
+m4_ifdef([_$0_ALREADY_INIT],
+  [m4_fatal([$0 expanded multiple times
+]m4_defn([_$0_ALREADY_INIT]))],
+  [m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl
+dnl Autoconf wants to disallow AM_ names.  We explicitly allow
+dnl the ones we care about.
+m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
+AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl
+AC_REQUIRE([AC_PROG_INSTALL])dnl
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+  # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+  # is not polluted with repeated "-I."
+  AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl
+  # test to see if srcdir already configured
+  if test -f $srcdir/config.status; then
+    AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
+  fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+  if (cygpath --version) >/dev/null 2>/dev/null; then
+    CYGPATH_W='cygpath -w'
+  else
+    CYGPATH_W=echo
+  fi
+fi
+AC_SUBST([CYGPATH_W])
+
+# Define the identity of the package.
+dnl Distinguish between old-style and new-style calls.
+m4_ifval([$2],
+[AC_DIAGNOSE([obsolete],
+             [$0: two- and three-arguments forms are deprecated.])
+m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
+ AC_SUBST([PACKAGE], [$1])dnl
+ AC_SUBST([VERSION], [$2])],
+[_AM_SET_OPTIONS([$1])dnl
+dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
+m4_if(
+  m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]),
+  [ok:ok],,
+  [m4_fatal([AC_INIT should be called with package and version arguments])])dnl
+ AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
+ AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
+
+_AM_IF_OPTION([no-define],,
+[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package])
+ AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl
+
+# Some tools Automake needs.
+AC_REQUIRE([AM_SANITY_CHECK])dnl
+AC_REQUIRE([AC_ARG_PROGRAM])dnl
+AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}])
+AM_MISSING_PROG([AUTOCONF], [autoconf])
+AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}])
+AM_MISSING_PROG([AUTOHEADER], [autoheader])
+AM_MISSING_PROG([MAKEINFO], [makeinfo])
+AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
+AC_REQUIRE([AC_PROG_MKDIR_P])dnl
+# For better backward compatibility.  To be removed once Automake 1.9.x
+# dies out for good.  For more background, see:
+# <https://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <https://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
+# We need awk for the "check" target (and possibly the TAP driver).  The
+# system "awk" is bad on some platforms.
+AC_REQUIRE([AC_PROG_AWK])dnl
+AC_REQUIRE([AC_PROG_MAKE_SET])dnl
+AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
+	      [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])],
+			     [_AM_PROG_TAR([v7])])])
+_AM_IF_OPTION([no-dependencies],,
+[AC_PROVIDE_IFELSE([AC_PROG_CC],
+		  [_AM_DEPENDENCIES([CC])],
+		  [m4_define([AC_PROG_CC],
+			     m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_CXX],
+		  [_AM_DEPENDENCIES([CXX])],
+		  [m4_define([AC_PROG_CXX],
+			     m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJC],
+		  [_AM_DEPENDENCIES([OBJC])],
+		  [m4_define([AC_PROG_OBJC],
+			     m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
+		  [_AM_DEPENDENCIES([OBJCXX])],
+		  [m4_define([AC_PROG_OBJCXX],
+			     m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl
+])
+# Variables for tags utilities; see am/tags.am
+if test -z "$CTAGS"; then
+  CTAGS=ctags
+fi
+AC_SUBST([CTAGS])
+if test -z "$ETAGS"; then
+  ETAGS=etags
+fi
+AC_SUBST([ETAGS])
+if test -z "$CSCOPE"; then
+  CSCOPE=cscope
+fi
+AC_SUBST([CSCOPE])
+
+AC_REQUIRE([AM_SILENT_RULES])dnl
+dnl The testsuite driver may need to know about EXEEXT, so add the
+dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen.  This
+dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below.
+AC_CONFIG_COMMANDS_PRE(dnl
+[m4_provide_if([_AM_COMPILER_EXEEXT],
+  [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes.  So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+  cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present.  This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message.  This
+can help us improve future automake versions.
+
+END
+  if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+    echo 'Configuration will proceed anyway, since you have set the' >&2
+    echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+    echo >&2
+  else
+    cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <https://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+    AC_MSG_ERROR([Your 'rm' program is bad, sorry.])
+  fi
+fi
+dnl The trailing newline in this macro's definition is deliberate, for
+dnl backward compatibility and to allow trailing 'dnl'-style comments
+dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841.
+])
+
+dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion.  Do not
+dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
+dnl mangled by Autoconf and run in a shell conditional statement.
+m4_define([_AC_COMPILER_EXEEXT],
+m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
+
+# When config.status generates a header, we must update the stamp-h file.
+# This file resides in the same directory as the config header
+# that is generated.  The stamp files are numbered to have different names.
+
+# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the
+# loop where config.status creates the headers, so we can generate
+# our stamp files there.
+AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK],
+[# Compute $1's index in $config_headers.
+_am_arg=$1
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+  case $_am_header in
+    $_am_arg | $_am_arg:* )
+      break ;;
+    * )
+      _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+  esac
+done
+echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_INSTALL_SH
+# ------------------
+# Define $install_sh.
+AC_DEFUN([AM_PROG_INSTALL_SH],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+if test x"${install_sh+set}" != xset; then
+  case $am_aux_dir in
+  *\ * | *\	*)
+    install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+  *)
+    install_sh="\${SHELL} $am_aux_dir/install-sh"
+  esac
+fi
+AC_SUBST([install_sh])])
+
+# Copyright (C) 2003-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# Check whether the underlying file-system supports filenames
+# with a leading dot.  For instance MS-DOS doesn't.
+AC_DEFUN([AM_SET_LEADING_DOT],
+[rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+  am__leading_dot=.
+else
+  am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+AC_SUBST([am__leading_dot])])
+
+# Check to see how 'make' treats includes.	            -*- Autoconf -*-
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_MAKE_INCLUDE()
+# -----------------
+# Check whether make has an 'include' directive that can support all
+# the idioms we need for our automatic dependency tracking code.
+AC_DEFUN([AM_MAKE_INCLUDE],
+[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive])
+cat > confinc.mk << 'END'
+am__doit:
+	@echo this is the am__doit target >confinc.out
+.PHONY: am__doit
+END
+am__include="#"
+am__quote=
+# BSD make does it like this.
+echo '.include "confinc.mk" # ignored' > confmf.BSD
+# Other make implementations (GNU, Solaris 10, AIX) do it like this.
+echo 'include confinc.mk # ignored' > confmf.GNU
+_am_result=no
+for s in GNU BSD; do
+  AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out])
+  AS_CASE([$?:`cat confinc.out 2>/dev/null`],
+      ['0:this is the am__doit target'],
+      [AS_CASE([$s],
+          [BSD], [am__include='.include' am__quote='"'],
+          [am__include='include' am__quote=''])])
+  if test "$am__include" != "#"; then
+    _am_result="yes ($s style)"
+    break
+  fi
+done
+rm -f confinc.* confmf.*
+AC_MSG_RESULT([${_am_result}])
+AC_SUBST([am__include])])
+AC_SUBST([am__quote])])
+
+# Fake the existence of programs that GNU maintainers use.  -*- Autoconf -*-
+
+# Copyright (C) 1997-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_MISSING_PROG(NAME, PROGRAM)
+# ------------------------------
+AC_DEFUN([AM_MISSING_PROG],
+[AC_REQUIRE([AM_MISSING_HAS_RUN])
+$1=${$1-"${am_missing_run}$2"}
+AC_SUBST($1)])
+
+# AM_MISSING_HAS_RUN
+# ------------------
+# Define MISSING if not defined so far and test if it is modern enough.
+# If it is, set am_missing_run to use it, otherwise, to nothing.
+AC_DEFUN([AM_MISSING_HAS_RUN],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([missing])dnl
+if test x"${MISSING+set}" != xset; then
+  MISSING="\${SHELL} '$am_aux_dir/missing'"
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --is-lightweight"; then
+  am_missing_run="$MISSING "
+else
+  am_missing_run=
+  AC_MSG_WARN(['missing' script is too old or missing])
+fi
+])
+
+#  -*- Autoconf -*-
+# Obsolete and "removed" macros, that must however still report explicit
+# error messages when used, to smooth transition.
+#
+# Copyright (C) 1996-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+AC_DEFUN([AM_CONFIG_HEADER],
+[AC_DIAGNOSE([obsolete],
+['$0': this macro is obsolete.
+You should use the 'AC][_CONFIG_HEADERS' macro instead.])dnl
+AC_CONFIG_HEADERS($@)])
+
+AC_DEFUN([AM_PROG_CC_STDC],
+[AC_PROG_CC
+am_cv_prog_cc_stdc=$ac_cv_prog_cc_stdc
+AC_DIAGNOSE([obsolete],
+['$0': this macro is obsolete.
+You should simply use the 'AC][_PROG_CC' macro instead.
+Also, your code should no longer depend upon 'am_cv_prog_cc_stdc',
+but upon 'ac_cv_prog_cc_stdc'.])])
+
+AC_DEFUN([AM_C_PROTOTYPES],
+         [AC_FATAL([automatic de-ANSI-fication support has been removed])])
+AU_DEFUN([fp_C_PROTOTYPES], [AM_C_PROTOTYPES])
+
+# Helper functions for option handling.                     -*- Autoconf -*-
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_MANGLE_OPTION(NAME)
+# -----------------------
+AC_DEFUN([_AM_MANGLE_OPTION],
+[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])])
+
+# _AM_SET_OPTION(NAME)
+# --------------------
+# Set option NAME.  Presently that only means defining a flag for this option.
+AC_DEFUN([_AM_SET_OPTION],
+[m4_define(_AM_MANGLE_OPTION([$1]), [1])])
+
+# _AM_SET_OPTIONS(OPTIONS)
+# ------------------------
+# OPTIONS is a space-separated list of Automake options.
+AC_DEFUN([_AM_SET_OPTIONS],
+[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])])
+
+# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET])
+# -------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+AC_DEFUN([_AM_IF_OPTION],
+[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
+
+# Copyright (C) 1999-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_PROG_CC_C_O
+# ---------------
+# Like AC_PROG_CC_C_O, but changed for automake.  We rewrite AC_PROG_CC
+# to automatically call this.
+AC_DEFUN([_AM_PROG_CC_C_O],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([compile])dnl
+AC_LANG_PUSH([C])dnl
+AC_CACHE_CHECK(
+  [whether $CC understands -c and -o together],
+  [am_cv_prog_cc_c_o],
+  [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])])
+  # Make sure it works both with $CC and with simple cc.
+  # Following AC_PROG_CC_C_O, we do the test twice because some
+  # compilers refuse to overwrite an existing .o file with -o,
+  # though they will create one.
+  am_cv_prog_cc_c_o=yes
+  for am_i in 1 2; do
+    if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \
+         && test -f conftest2.$ac_objext; then
+      : OK
+    else
+      am_cv_prog_cc_c_o=no
+      break
+    fi
+  done
+  rm -f core conftest*
+  unset am_i])
+if test "$am_cv_prog_cc_c_o" != yes; then
+   # Losing compiler, so override with the script.
+   # FIXME: It is wrong to rewrite CC.
+   # But if we don't then we get into trouble of one sort or another.
+   # A longer-term fix would be to have automake use am__CC in this case,
+   # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+   CC="$am_aux_dir/compile $CC"
+fi
+AC_LANG_POP([C])])
+
+# For backward compatibility.
+AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_RUN_LOG(COMMAND)
+# -------------------
+# Run COMMAND, save the exit status in ac_status, and log it.
+# (This has been adapted from Autoconf's _AC_RUN_LOG macro.)
+AC_DEFUN([AM_RUN_LOG],
+[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD
+   ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD
+   ac_status=$?
+   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+   (exit $ac_status); }])
+
+# Check to make sure that the build environment is sane.    -*- Autoconf -*-
+
+# Copyright (C) 1996-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_SANITY_CHECK
+# ---------------
+AC_DEFUN([AM_SANITY_CHECK],
+[AC_MSG_CHECKING([whether build environment is sane])
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name.  Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+  *[[\\\"\#\$\&\'\`$am_lf]]*)
+    AC_MSG_ERROR([unsafe absolute working directory name]);;
+esac
+case $srcdir in
+  *[[\\\"\#\$\&\'\`$am_lf\ \	]]*)
+    AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);;
+esac
+
+# Do 'set' in a subshell so we don't clobber the current shell's
+# arguments.  Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+   am_has_slept=no
+   for am_try in 1 2; do
+     echo "timestamp, slept: $am_has_slept" > conftest.file
+     set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+     if test "$[*]" = "X"; then
+	# -L didn't work.
+	set X `ls -t "$srcdir/configure" conftest.file`
+     fi
+     if test "$[*]" != "X $srcdir/configure conftest.file" \
+	&& test "$[*]" != "X conftest.file $srcdir/configure"; then
+
+	# If neither matched, then we have a broken ls.  This can happen
+	# if, for instance, CONFIG_SHELL is bash and it inherits a
+	# broken ls alias from the environment.  This has actually
+	# happened.  Such a system could not be considered "sane".
+	AC_MSG_ERROR([ls -t appears to fail.  Make sure there is not a broken
+  alias in your environment])
+     fi
+     if test "$[2]" = conftest.file || test $am_try -eq 2; then
+       break
+     fi
+     # Just in case.
+     sleep 1
+     am_has_slept=yes
+   done
+   test "$[2]" = conftest.file
+   )
+then
+   # Ok.
+   :
+else
+   AC_MSG_ERROR([newly created file is older than distributed files!
+Check your system clock])
+fi
+AC_MSG_RESULT([yes])
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+  ( sleep 1 ) &
+  am_sleep_pid=$!
+fi
+AC_CONFIG_COMMANDS_PRE(
+  [AC_MSG_CHECKING([that generated files are newer than configure])
+   if test -n "$am_sleep_pid"; then
+     # Hide warnings about reused PIDs.
+     wait $am_sleep_pid 2>/dev/null
+   fi
+   AC_MSG_RESULT([done])])
+rm -f conftest.file
+])
+
+# Copyright (C) 2009-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_SILENT_RULES([DEFAULT])
+# --------------------------
+# Enable less verbose build rules; with the default set to DEFAULT
+# ("yes" being less verbose, "no" or empty being verbose).
+AC_DEFUN([AM_SILENT_RULES],
+[AC_ARG_ENABLE([silent-rules], [dnl
+AS_HELP_STRING(
+  [--enable-silent-rules],
+  [less verbose build output (undo: "make V=1")])
+AS_HELP_STRING(
+  [--disable-silent-rules],
+  [verbose build output (undo: "make V=0")])dnl
+])
+case $enable_silent_rules in @%:@ (((
+  yes) AM_DEFAULT_VERBOSITY=0;;
+   no) AM_DEFAULT_VERBOSITY=1;;
+    *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);;
+esac
+dnl
+dnl A few 'make' implementations (e.g., NonStop OS and NextStep)
+dnl do not support nested variable expansions.
+dnl See automake bug#9928 and bug#10237.
+am_make=${MAKE-make}
+AC_CACHE_CHECK([whether $am_make supports nested variables],
+   [am_cv_make_support_nested_variables],
+   [if AS_ECHO([['TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+	@$(TRUE)
+.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then
+  am_cv_make_support_nested_variables=yes
+else
+  am_cv_make_support_nested_variables=no
+fi])
+if test $am_cv_make_support_nested_variables = yes; then
+  dnl Using '$V' instead of '$(V)' breaks IRIX make.
+  AM_V='$(V)'
+  AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+  AM_V=$AM_DEFAULT_VERBOSITY
+  AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AC_SUBST([AM_V])dnl
+AM_SUBST_NOTMAKE([AM_V])dnl
+AC_SUBST([AM_DEFAULT_V])dnl
+AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl
+AC_SUBST([AM_DEFAULT_VERBOSITY])dnl
+AM_BACKSLASH='\'
+AC_SUBST([AM_BACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
+])
+
+# Copyright (C) 2001-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_INSTALL_STRIP
+# ---------------------
+# One issue with vendor 'install' (even GNU) is that you can't
+# specify the program used to strip binaries.  This is especially
+# annoying in cross-compiling environments, where the build's strip
+# is unlikely to handle the host's binaries.
+# Fortunately install-sh will honor a STRIPPROG variable, so we
+# always use install-sh in "make install-strip", and initialize
+# STRIPPROG with the value of the STRIP variable (set by the user).
+AC_DEFUN([AM_PROG_INSTALL_STRIP],
+[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip".  However 'strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the 'STRIP' environment variable to overrule this program.
+dnl Don't test for $cross_compiling = yes, because it might be 'maybe'.
+if test "$cross_compiling" != no; then
+  AC_CHECK_TOOL([STRIP], [strip], :)
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+AC_SUBST([INSTALL_STRIP_PROGRAM])])
+
+# Copyright (C) 2006-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_SUBST_NOTMAKE(VARIABLE)
+# ---------------------------
+# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in.
+# This macro is traced by Automake.
+AC_DEFUN([_AM_SUBST_NOTMAKE])
+
+# AM_SUBST_NOTMAKE(VARIABLE)
+# --------------------------
+# Public sister of _AM_SUBST_NOTMAKE.
+AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
+
+# Check how to create a tarball.                            -*- Autoconf -*-
+
+# Copyright (C) 2004-2021 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_PROG_TAR(FORMAT)
+# --------------------
+# Check how to create a tarball in format FORMAT.
+# FORMAT should be one of 'v7', 'ustar', or 'pax'.
+#
+# Substitute a variable $(am__tar) that is a command
+# writing to stdout a FORMAT-tarball containing the directory
+# $tardir.
+#     tardir=directory && $(am__tar) > result.tar
+#
+# Substitute a variable $(am__untar) that extract such
+# a tarball read from stdin.
+#     $(am__untar) < result.tar
+#
+AC_DEFUN([_AM_PROG_TAR],
+[# Always define AMTAR for backward compatibility.  Yes, it's still used
+# in the wild :-(  We should find a proper way to deprecate it ...
+AC_SUBST([AMTAR], ['$${TAR-tar}'])
+
+# We'll loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
+
+m4_if([$1], [v7],
+  [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
+
+  [m4_case([$1],
+    [ustar],
+     [# The POSIX 1988 'ustar' format is defined with fixed-size fields.
+      # There is notably a 21 bits limit for the UID and the GID.  In fact,
+      # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343
+      # and bug#13588).
+      am_max_uid=2097151 # 2^21 - 1
+      am_max_gid=$am_max_uid
+      # The $UID and $GID variables are not portable, so we need to resort
+      # to the POSIX-mandated id(1) utility.  Errors in the 'id' calls
+      # below are definitely unexpected, so allow the users to see them
+      # (that is, avoid stderr redirection).
+      am_uid=`id -u || echo unknown`
+      am_gid=`id -g || echo unknown`
+      AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format])
+      if test $am_uid -le $am_max_uid; then
+         AC_MSG_RESULT([yes])
+      else
+         AC_MSG_RESULT([no])
+         _am_tools=none
+      fi
+      AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format])
+      if test $am_gid -le $am_max_gid; then
+         AC_MSG_RESULT([yes])
+      else
+        AC_MSG_RESULT([no])
+        _am_tools=none
+      fi],
+
+  [pax],
+    [],
+
+  [m4_fatal([Unknown tar format])])
+
+  AC_MSG_CHECKING([how to create a $1 tar archive])
+
+  # Go ahead even if we have the value already cached.  We do so because we
+  # need to set the values for the 'am__tar' and 'am__untar' variables.
+  _am_tools=${am_cv_prog_tar_$1-$_am_tools}
+
+  for _am_tool in $_am_tools; do
+    case $_am_tool in
+    gnutar)
+      for _am_tar in tar gnutar gtar; do
+        AM_RUN_LOG([$_am_tar --version]) && break
+      done
+      am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
+      am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
+      am__untar="$_am_tar -xf -"
+      ;;
+    plaintar)
+      # Must skip GNU tar: if it does not support --format= it doesn't create
+      # ustar tarball either.
+      (tar --version) >/dev/null 2>&1 && continue
+      am__tar='tar chf - "$$tardir"'
+      am__tar_='tar chf - "$tardir"'
+      am__untar='tar xf -'
+      ;;
+    pax)
+      am__tar='pax -L -x $1 -w "$$tardir"'
+      am__tar_='pax -L -x $1 -w "$tardir"'
+      am__untar='pax -r'
+      ;;
+    cpio)
+      am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
+      am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
+      am__untar='cpio -i -H $1 -d'
+      ;;
+    none)
+      am__tar=false
+      am__tar_=false
+      am__untar=false
+      ;;
+    esac
+
+    # If the value was cached, stop now.  We just wanted to have am__tar
+    # and am__untar set.
+    test -n "${am_cv_prog_tar_$1}" && break
+
+    # tar/untar a dummy directory, and stop if the command works.
+    rm -rf conftest.dir
+    mkdir conftest.dir
+    echo GrepMe > conftest.dir/file
+    AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+    rm -rf conftest.dir
+    if test -s conftest.tar; then
+      AM_RUN_LOG([$am__untar <conftest.tar])
+      AM_RUN_LOG([cat conftest.dir/file])
+      grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+    fi
+  done
+  rm -rf conftest.dir
+
+  AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
+  AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+
+AC_SUBST([am__tar])
+AC_SUBST([am__untar])
+]) # _AM_PROG_TAR
+
Index: applgrid/tags/applgrid-01-06-36/install-sh
===================================================================
--- applgrid/tags/applgrid-01-06-36/install-sh	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/install-sh	(revision 2010)
@@ -0,0 +1,501 @@
+#!/bin/sh
+# install - install a program, script, or datafile
+
+scriptversion=2013-12-25.23; # UTC
+
+# This originates from X11R5 (mit/util/scripts/install.sh), which was
+# later released in X11R6 (xc/config/util/install.sh) with the
+# following copyright and license.
+#
+# Copyright (C) 1994 X Consortium
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
+# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+# Except as contained in this notice, the name of the X Consortium shall not
+# be used in advertising or otherwise to promote the sale, use or other deal-
+# ings in this Software without prior written authorization from the X Consor-
+# tium.
+#
+#
+# FSF changes to this file are in the public domain.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# 'make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch.
+
+tab='	'
+nl='
+'
+IFS=" $tab$nl"
+
+# Set DOITPROG to "echo" to test this script.
+
+doit=${DOITPROG-}
+doit_exec=${doit:-exec}
+
+# Put in absolute file names if you don't have them in your path;
+# or use environment vars.
+
+chgrpprog=${CHGRPPROG-chgrp}
+chmodprog=${CHMODPROG-chmod}
+chownprog=${CHOWNPROG-chown}
+cmpprog=${CMPPROG-cmp}
+cpprog=${CPPROG-cp}
+mkdirprog=${MKDIRPROG-mkdir}
+mvprog=${MVPROG-mv}
+rmprog=${RMPROG-rm}
+stripprog=${STRIPPROG-strip}
+
+posix_mkdir=
+
+# Desired mode of installed file.
+mode=0755
+
+chgrpcmd=
+chmodcmd=$chmodprog
+chowncmd=
+mvcmd=$mvprog
+rmcmd="$rmprog -f"
+stripcmd=
+
+src=
+dst=
+dir_arg=
+dst_arg=
+
+copy_on_change=false
+is_target_a_directory=possibly
+
+usage="\
+Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+   or: $0 [OPTION]... SRCFILES... DIRECTORY
+   or: $0 [OPTION]... -t DIRECTORY SRCFILES...
+   or: $0 [OPTION]... -d DIRECTORIES...
+
+In the 1st form, copy SRCFILE to DSTFILE.
+In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
+In the 4th, create DIRECTORIES.
+
+Options:
+     --help     display this help and exit.
+     --version  display version info and exit.
+
+  -c            (ignored)
+  -C            install only if different (preserve the last data modification time)
+  -d            create directories instead of installing files.
+  -g GROUP      $chgrpprog installed files to GROUP.
+  -m MODE       $chmodprog installed files to MODE.
+  -o USER       $chownprog installed files to USER.
+  -s            $stripprog installed files.
+  -t DIRECTORY  install into DIRECTORY.
+  -T            report an error if DSTFILE is a directory.
+
+Environment variables override the default commands:
+  CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
+  RMPROG STRIPPROG
+"
+
+while test $# -ne 0; do
+  case $1 in
+    -c) ;;
+
+    -C) copy_on_change=true;;
+
+    -d) dir_arg=true;;
+
+    -g) chgrpcmd="$chgrpprog $2"
+        shift;;
+
+    --help) echo "$usage"; exit $?;;
+
+    -m) mode=$2
+        case $mode in
+          *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*)
+            echo "$0: invalid mode: $mode" >&2
+            exit 1;;
+        esac
+        shift;;
+
+    -o) chowncmd="$chownprog $2"
+        shift;;
+
+    -s) stripcmd=$stripprog;;
+
+    -t)
+        is_target_a_directory=always
+        dst_arg=$2
+        # Protect names problematic for 'test' and other utilities.
+        case $dst_arg in
+          -* | [=\(\)!]) dst_arg=./$dst_arg;;
+        esac
+        shift;;
+
+    -T) is_target_a_directory=never;;
+
+    --version) echo "$0 $scriptversion"; exit $?;;
+
+    --) shift
+        break;;
+
+    -*) echo "$0: invalid option: $1" >&2
+        exit 1;;
+
+    *)  break;;
+  esac
+  shift
+done
+
+# We allow the use of options -d and -T together, by making -d
+# take the precedence; this is for compatibility with GNU install.
+
+if test -n "$dir_arg"; then
+  if test -n "$dst_arg"; then
+    echo "$0: target directory not allowed when installing a directory." >&2
+    exit 1
+  fi
+fi
+
+if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
+  # When -d is used, all remaining arguments are directories to create.
+  # When -t is used, the destination is already specified.
+  # Otherwise, the last argument is the destination.  Remove it from $@.
+  for arg
+  do
+    if test -n "$dst_arg"; then
+      # $@ is not empty: it contains at least $arg.
+      set fnord "$@" "$dst_arg"
+      shift # fnord
+    fi
+    shift # arg
+    dst_arg=$arg
+    # Protect names problematic for 'test' and other utilities.
+    case $dst_arg in
+      -* | [=\(\)!]) dst_arg=./$dst_arg;;
+    esac
+  done
+fi
+
+if test $# -eq 0; then
+  if test -z "$dir_arg"; then
+    echo "$0: no input file specified." >&2
+    exit 1
+  fi
+  # It's OK to call 'install-sh -d' without argument.
+  # This can happen when creating conditional directories.
+  exit 0
+fi
+
+if test -z "$dir_arg"; then
+  if test $# -gt 1 || test "$is_target_a_directory" = always; then
+    if test ! -d "$dst_arg"; then
+      echo "$0: $dst_arg: Is not a directory." >&2
+      exit 1
+    fi
+  fi
+fi
+
+if test -z "$dir_arg"; then
+  do_exit='(exit $ret); exit $ret'
+  trap "ret=129; $do_exit" 1
+  trap "ret=130; $do_exit" 2
+  trap "ret=141; $do_exit" 13
+  trap "ret=143; $do_exit" 15
+
+  # Set umask so as not to create temps with too-generous modes.
+  # However, 'strip' requires both read and write access to temps.
+  case $mode in
+    # Optimize common cases.
+    *644) cp_umask=133;;
+    *755) cp_umask=22;;
+
+    *[0-7])
+      if test -z "$stripcmd"; then
+        u_plus_rw=
+      else
+        u_plus_rw='% 200'
+      fi
+      cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
+    *)
+      if test -z "$stripcmd"; then
+        u_plus_rw=
+      else
+        u_plus_rw=,u+rw
+      fi
+      cp_umask=$mode$u_plus_rw;;
+  esac
+fi
+
+for src
+do
+  # Protect names problematic for 'test' and other utilities.
+  case $src in
+    -* | [=\(\)!]) src=./$src;;
+  esac
+
+  if test -n "$dir_arg"; then
+    dst=$src
+    dstdir=$dst
+    test -d "$dstdir"
+    dstdir_status=$?
+  else
+
+    # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
+    # might cause directories to be created, which would be especially bad
+    # if $src (and thus $dsttmp) contains '*'.
+    if test ! -f "$src" && test ! -d "$src"; then
+      echo "$0: $src does not exist." >&2
+      exit 1
+    fi
+
+    if test -z "$dst_arg"; then
+      echo "$0: no destination specified." >&2
+      exit 1
+    fi
+    dst=$dst_arg
+
+    # If destination is a directory, append the input filename; won't work
+    # if double slashes aren't ignored.
+    if test -d "$dst"; then
+      if test "$is_target_a_directory" = never; then
+        echo "$0: $dst_arg: Is a directory" >&2
+        exit 1
+      fi
+      dstdir=$dst
+      dst=$dstdir/`basename "$src"`
+      dstdir_status=0
+    else
+      dstdir=`dirname "$dst"`
+      test -d "$dstdir"
+      dstdir_status=$?
+    fi
+  fi
+
+  obsolete_mkdir_used=false
+
+  if test $dstdir_status != 0; then
+    case $posix_mkdir in
+      '')
+        # Create intermediate dirs using mode 755 as modified by the umask.
+        # This is like FreeBSD 'install' as of 1997-10-28.
+        umask=`umask`
+        case $stripcmd.$umask in
+          # Optimize common cases.
+          *[2367][2367]) mkdir_umask=$umask;;
+          .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
+
+          *[0-7])
+            mkdir_umask=`expr $umask + 22 \
+              - $umask % 100 % 40 + $umask % 20 \
+              - $umask % 10 % 4 + $umask % 2
+            `;;
+          *) mkdir_umask=$umask,go-w;;
+        esac
+
+        # With -d, create the new directory with the user-specified mode.
+        # Otherwise, rely on $mkdir_umask.
+        if test -n "$dir_arg"; then
+          mkdir_mode=-m$mode
+        else
+          mkdir_mode=
+        fi
+
+        posix_mkdir=false
+        case $umask in
+          *[123567][0-7][0-7])
+            # POSIX mkdir -p sets u+wx bits regardless of umask, which
+            # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
+            ;;
+          *)
+            tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
+            trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
+
+            if (umask $mkdir_umask &&
+                exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
+            then
+              if test -z "$dir_arg" || {
+                   # Check for POSIX incompatibilities with -m.
+                   # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
+                   # other-writable bit of parent directory when it shouldn't.
+                   # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
+                   ls_ld_tmpdir=`ls -ld "$tmpdir"`
+                   case $ls_ld_tmpdir in
+                     d????-?r-*) different_mode=700;;
+                     d????-?--*) different_mode=755;;
+                     *) false;;
+                   esac &&
+                   $mkdirprog -m$different_mode -p -- "$tmpdir" && {
+                     ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
+                     test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
+                   }
+                 }
+              then posix_mkdir=:
+              fi
+              rmdir "$tmpdir/d" "$tmpdir"
+            else
+              # Remove any dirs left behind by ancient mkdir implementations.
+              rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
+            fi
+            trap '' 0;;
+        esac;;
+    esac
+
+    if
+      $posix_mkdir && (
+        umask $mkdir_umask &&
+        $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
+      )
+    then :
+    else
+
+      # The umask is ridiculous, or mkdir does not conform to POSIX,
+      # or it failed possibly due to a race condition.  Create the
+      # directory the slow way, step by step, checking for races as we go.
+
+      case $dstdir in
+        /*) prefix='/';;
+        [-=\(\)!]*) prefix='./';;
+        *)  prefix='';;
+      esac
+
+      oIFS=$IFS
+      IFS=/
+      set -f
+      set fnord $dstdir
+      shift
+      set +f
+      IFS=$oIFS
+
+      prefixes=
+
+      for d
+      do
+        test X"$d" = X && continue
+
+        prefix=$prefix$d
+        if test -d "$prefix"; then
+          prefixes=
+        else
+          if $posix_mkdir; then
+            (umask=$mkdir_umask &&
+             $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
+            # Don't fail if two instances are running concurrently.
+            test -d "$prefix" || exit 1
+          else
+            case $prefix in
+              *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
+              *) qprefix=$prefix;;
+            esac
+            prefixes="$prefixes '$qprefix'"
+          fi
+        fi
+        prefix=$prefix/
+      done
+
+      if test -n "$prefixes"; then
+        # Don't fail if two instances are running concurrently.
+        (umask $mkdir_umask &&
+         eval "\$doit_exec \$mkdirprog $prefixes") ||
+          test -d "$dstdir" || exit 1
+        obsolete_mkdir_used=true
+      fi
+    fi
+  fi
+
+  if test -n "$dir_arg"; then
+    { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
+    { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
+    { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
+      test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
+  else
+
+    # Make a couple of temp file names in the proper directory.
+    dsttmp=$dstdir/_inst.$$_
+    rmtmp=$dstdir/_rm.$$_
+
+    # Trap to clean up those temp files at exit.
+    trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
+
+    # Copy the file name to the temp name.
+    (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
+
+    # and set any options; do chmod last to preserve setuid bits.
+    #
+    # If any of these fail, we abort the whole thing.  If we want to
+    # ignore errors from any of these, just make sure not to ignore
+    # errors from the above "$doit $cpprog $src $dsttmp" command.
+    #
+    { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
+    { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
+    { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
+    { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
+
+    # If -C, don't bother to copy if it wouldn't change the file.
+    if $copy_on_change &&
+       old=`LC_ALL=C ls -dlL "$dst"     2>/dev/null` &&
+       new=`LC_ALL=C ls -dlL "$dsttmp"  2>/dev/null` &&
+       set -f &&
+       set X $old && old=:$2:$4:$5:$6 &&
+       set X $new && new=:$2:$4:$5:$6 &&
+       set +f &&
+       test "$old" = "$new" &&
+       $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
+    then
+      rm -f "$dsttmp"
+    else
+      # Rename the file to the real destination.
+      $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
+
+      # The rename failed, perhaps because mv can't rename something else
+      # to itself, or perhaps because mv is so ancient that it does not
+      # support -f.
+      {
+        # Now remove or move aside any old file at destination location.
+        # We try this two ways since rm can't unlink itself on some
+        # systems and the destination file might be busy for other
+        # reasons.  In this case, the final cleanup might fail but the new
+        # file should still install successfully.
+        {
+          test ! -f "$dst" ||
+          $doit $rmcmd -f "$dst" 2>/dev/null ||
+          { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
+            { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
+          } ||
+          { echo "$0: cannot unlink or rename $dst" >&2
+            (exit 1); exit 1
+          }
+        } &&
+
+        # Now rename the file to the real destination.
+        $doit $mvcmd "$dsttmp" "$dst"
+      }
+    fi || exit 1
+
+    trap '' 0
+  fi
+done
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:

Property changes on: applgrid/tags/applgrid-01-06-36/install-sh
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: applgrid/tags/applgrid-01-06-36/configure
===================================================================
--- applgrid/tags/applgrid-01-06-36/configure	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/configure	(revision 2010)
@@ -0,0 +1,19877 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.71 for APPLgrid 1.6.36.
+#
+# Report bugs to <sutt@cern.ch>.
+#
+#
+# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
+# Inc.
+#
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+as_nop=:
+if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
+then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '${1+"$@"}'='"$@"'
+  setopt NO_GLOB_SUBST
+else $as_nop
+  case `(set -o) 2>/dev/null` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+
+
+
+# Reset variables that may have inherited troublesome values from
+# the environment.
+
+# IFS needs to be set, to space, tab, and newline, in precisely that order.
+# (If _AS_PATH_WALK were called with IFS unset, it would have the
+# side effect of setting IFS to empty, thus disabling word splitting.)
+# Quoting is to prevent editors from complaining about space-tab.
+as_nl='
+'
+export as_nl
+IFS=" ""	$as_nl"
+
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# Ensure predictable behavior from utilities with locale-dependent output.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# We cannot yet rely on "unset" to work, but we need these variables
+# to be unset--not just set to an empty or harmless value--now, to
+# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh).  This construct
+# also avoids known problems related to "unset" and subshell syntax
+# in other old shells (e.g. bash 2.01 and pdksh 5.2.14).
+for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH
+do eval test \${$as_var+y} \
+  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+
+# Ensure that fds 0, 1, and 2 are open.
+if (exec 3>&0) 2>/dev/null; then :; else exec 0</dev/null; fi
+if (exec 3>&1) 2>/dev/null; then :; else exec 1>/dev/null; fi
+if (exec 3>&2)            ; then :; else exec 2>/dev/null; fi
+
+# The user is always right.
+if ${PATH_SEPARATOR+false} :; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
+fi
+
+
+# Find who we are.  Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+  *[\\/]* ) as_myself=$0 ;;
+  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    test -r "$as_dir$0" && as_myself=$as_dir$0 && break
+  done
+IFS=$as_save_IFS
+
+     ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+  as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+  printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+  exit 1
+fi
+
+
+# Use a proper internal environment variable to ensure we don't fall
+  # into an infinite loop, continuously re-executing ourselves.
+  if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+    _as_can_reexec=no; export _as_can_reexec;
+    # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+  *v*x* | *x*v* ) as_opts=-vx ;;
+  *v* ) as_opts=-v ;;
+  *x* ) as_opts=-x ;;
+  * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
+  fi
+  # We don't want this to propagate to other subprocesses.
+          { _as_can_reexec=; unset _as_can_reexec;}
+if test "x$CONFIG_SHELL" = x; then
+  as_bourne_compatible="as_nop=:
+if test \${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
+then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '\${1+\"\$@\"}'='\"\$@\"'
+  setopt NO_GLOB_SUBST
+else \$as_nop
+  case \`(set -o) 2>/dev/null\` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+"
+  as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" )
+then :
+
+else \$as_nop
+  exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1
+blah=\$(echo \$(echo blah))
+test x\"\$blah\" = xblah || exit 1
+test -x / || exit 1"
+  as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+  as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+  eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+  test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
+
+  test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || (
+    ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+    ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
+    ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
+    PATH=/empty FPATH=/empty; export PATH FPATH
+    test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\
+      || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1
+test \$(( 1 + 1 )) = 2 || exit 1"
+  if (eval "$as_required") 2>/dev/null
+then :
+  as_have_required=yes
+else $as_nop
+  as_have_required=no
+fi
+  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null
+then :
+
+else $as_nop
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+  as_found=:
+  case $as_dir in #(
+	 /*)
+	   for as_base in sh bash ksh sh5; do
+	     # Try only shells that exist, to save several forks.
+	     as_shell=$as_dir$as_base
+	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+		    as_run=a "$as_shell" -c "$as_bourne_compatible""$as_required" 2>/dev/null
+then :
+  CONFIG_SHELL=$as_shell as_have_required=yes
+		   if as_run=a "$as_shell" -c "$as_bourne_compatible""$as_suggested" 2>/dev/null
+then :
+  break 2
+fi
+fi
+	   done;;
+       esac
+  as_found=false
+done
+IFS=$as_save_IFS
+if $as_found
+then :
+
+else $as_nop
+  if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+	      as_run=a "$SHELL" -c "$as_bourne_compatible""$as_required" 2>/dev/null
+then :
+  CONFIG_SHELL=$SHELL as_have_required=yes
+fi
+fi
+
+
+      if test "x$CONFIG_SHELL" != x
+then :
+  export CONFIG_SHELL
+             # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+  *v*x* | *x*v* ) as_opts=-vx ;;
+  *v* ) as_opts=-v ;;
+  *x* ) as_opts=-x ;;
+  * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
+fi
+
+    if test x$as_have_required = xno
+then :
+  printf "%s\n" "$0: This script requires a shell more modern than all"
+  printf "%s\n" "$0: the shells that I found on your system."
+  if test ${ZSH_VERSION+y} ; then
+    printf "%s\n" "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+    printf "%s\n" "$0: be upgraded to zsh 4.3.4 or later."
+  else
+    printf "%s\n" "$0: Please tell bug-autoconf@gnu.org and sutt@cern.ch about
+$0: your system, including any error possibly output before
+$0: this message. Then install a modern shell, or manually
+$0: run the script under such a shell if you do have one."
+  fi
+  exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+  { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+  return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+  set +e
+  as_fn_set_status $1
+  exit $1
+} # as_fn_exit
+# as_fn_nop
+# ---------
+# Do nothing but, unlike ":", preserve the value of $?.
+as_fn_nop ()
+{
+  return $?
+}
+as_nop=as_fn_nop
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+  case $as_dir in #(
+  -*) as_dir=./$as_dir;;
+  esac
+  test -d "$as_dir" || eval $as_mkdir_p || {
+    as_dirs=
+    while :; do
+      case $as_dir in #(
+      *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+      *) as_qdir=$as_dir;;
+      esac
+      as_dirs="'$as_qdir' $as_dirs"
+      as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_dir" : 'X\(//\)[^/]' \| \
+	 X"$as_dir" : 'X\(//\)$' \| \
+	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$as_dir" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+      test -d "$as_dir" && break
+    done
+    test -z "$as_dirs" || eval "mkdir $as_dirs"
+  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+  test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null
+then :
+  eval 'as_fn_append ()
+  {
+    eval $1+=\$2
+  }'
+else $as_nop
+  as_fn_append ()
+  {
+    eval $1=\$$1\$2
+  }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null
+then :
+  eval 'as_fn_arith ()
+  {
+    as_val=$(( $* ))
+  }'
+else $as_nop
+  as_fn_arith ()
+  {
+    as_val=`expr "$@" || test $? -eq 1`
+  }
+fi # as_fn_arith
+
+# as_fn_nop
+# ---------
+# Do nothing but, unlike ":", preserve the value of $?.
+as_fn_nop ()
+{
+  return $?
+}
+as_nop=as_fn_nop
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+  as_status=$1; test $as_status -eq 0 && as_status=1
+  if test "$4"; then
+    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+  fi
+  printf "%s\n" "$as_me: error: $2" >&2
+  as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+   test "X`expr 00001 : '.*\(...\)'`" = X001; then
+  as_expr=expr
+else
+  as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+  as_basename=basename
+else
+  as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+  as_dirname=dirname
+else
+  as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+	 X"$0" : 'X\(//\)$' \| \
+	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X/"$0" |
+    sed '/^.*\/\([^/][^/]*\)\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+  as_lineno_1=$LINENO as_lineno_1a=$LINENO
+  as_lineno_2=$LINENO as_lineno_2a=$LINENO
+  eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+  test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+  # Blame Lee E. McMahon (1931-1989) for sed's syntax.  :-)
+  sed -n '
+    p
+    /[$]LINENO/=
+  ' <$as_myself |
+    sed '
+      s/[$]LINENO.*/&-/
+      t lineno
+      b
+      :lineno
+      N
+      :loop
+      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+      t loop
+      s/-\n.*//
+    ' >$as_me.lineno &&
+  chmod +x "$as_me.lineno" ||
+    { printf "%s\n" "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+
+  # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+  # already done that, so ensure we don't try to do so again and fall
+  # in an infinite loop.  This has already happened in practice.
+  _as_can_reexec=no; export _as_can_reexec
+  # Don't try to exec as it changes $[0], causing all sort of problems
+  # (the dirname of $[0] is not the place where we might find the
+  # original and so on.  Autoconf is especially sensitive to this).
+  . "./$as_me.lineno"
+  # Exit status is that of the last command.
+  exit
+}
+
+
+# Determine whether it's possible to make 'echo' print without a newline.
+# These variables are no longer used directly by Autoconf, but are AC_SUBSTed
+# for compatibility with existing Makefiles.
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+  case `echo 'xy\c'` in
+  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
+  xy)  ECHO_C='\c';;
+  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
+       ECHO_T='	';;
+  esac;;
+*)
+  ECHO_N='-n';;
+esac
+
+# For backward compatibility with old third-party macros, we provide
+# the shell variables $as_echo and $as_echo_n.  New code should use
+# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively.
+as_echo='printf %s\n'
+as_echo_n='printf %s'
+
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+  rm -f conf$$.dir/conf$$.file
+else
+  rm -f conf$$.dir
+  mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+  if ln -s conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s='ln -s'
+    # ... but there are two gotchas:
+    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+    # In both cases, we have to default to `cp -pR'.
+    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+      as_ln_s='cp -pR'
+  elif ln conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s=ln
+  else
+    as_ln_s='cp -pR'
+  fi
+else
+  as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+  as_mkdir_p='mkdir -p "$as_dir"'
+else
+  test -d ./-p && rmdir ./-p
+  as_mkdir_p=false
+fi
+
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+
+# Identity of this package.
+PACKAGE_NAME='APPLgrid'
+PACKAGE_TARNAME='applgrid'
+PACKAGE_VERSION='1.6.36'
+PACKAGE_STRING='APPLgrid 1.6.36'
+PACKAGE_BUGREPORT='sutt@cern.ch'
+PACKAGE_URL='https://applgrid.hepforge.org'
+
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stddef.h>
+#ifdef HAVE_STDIO_H
+# include <stdio.h>
+#endif
+#ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+#endif
+#ifdef HAVE_STRING_H
+# include <string.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_header_c_list=
+ac_subst_vars='am__EXEEXT_FALSE
+am__EXEEXT_TRUE
+LTLIBOBJS
+LIBOBJS
+USE_LHAPDF_FALSE
+USE_LHAPDF_TRUE
+LHAPDF_CXXFLAGS
+LHAPDFPATH
+FRTLLIB
+USE_FRTLSO_FALSE
+USE_FRTLSO_TRUE
+USE_HOPPET_FALSE
+USE_HOPPET_TRUE
+HOPPET_CXXFLAGS
+HOPPET_LDFLAGS
+HOPPETPATH
+USE_ROOT_FALSE
+USE_ROOT_TRUE
+ROOT_CXXFLAGS
+ROOT_LDFLAGS
+ROOTPATH
+USEROOT
+CXXCPP
+LT_SYS_LIBRARY_PATH
+OTOOL64
+OTOOL
+LIPO
+NMEDIT
+DSYMUTIL
+MANIFEST_TOOL
+RANLIB
+ac_ct_AR
+AR
+DLLTOOL
+OBJDUMP
+LN_S
+NM
+ac_ct_DUMPBIN
+DUMPBIN
+LD
+FGREP
+EGREP
+GREP
+SED
+host_os
+host_vendor
+host_cpu
+host
+build_os
+build_vendor
+build_cpu
+build
+LIBTOOL
+am__fastdepCXX_FALSE
+am__fastdepCXX_TRUE
+CXXDEPMODE
+ac_ct_CXX
+CXXFLAGS
+CXX
+am__fastdepCC_FALSE
+am__fastdepCC_TRUE
+CCDEPMODE
+am__nodep
+AMDEPBACKSLASH
+AMDEP_FALSE
+AMDEP_TRUE
+am__include
+DEPDIR
+OBJEXT
+EXEEXT
+ac_ct_CC
+CPPFLAGS
+LDFLAGS
+CFLAGS
+CC
+AM_BACKSLASH
+AM_DEFAULT_VERBOSITY
+AM_DEFAULT_V
+AM_V
+CSCOPE
+ETAGS
+CTAGS
+am__untar
+am__tar
+AMTAR
+am__leading_dot
+SET_MAKE
+AWK
+mkdir_p
+MKDIR_P
+INSTALL_STRIP_PROGRAM
+STRIP
+install_sh
+MAKEINFO
+AUTOHEADER
+AUTOMAKE
+AUTOCONF
+ACLOCAL
+VERSION
+PACKAGE
+CYGPATH_W
+am__isrc
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+runstatedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL
+am__quote'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_silent_rules
+enable_dependency_tracking
+enable_shared
+enable_static
+with_pic
+enable_fast_install
+with_aix_soname
+with_gnu_ld
+with_sysroot
+enable_libtool_lock
+with_root
+'
+      ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CXX
+CXXFLAGS
+CCC
+LT_SYS_LIBRARY_PATH
+CXXCPP'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+runstatedir='${localstatedir}/run'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+  # If the previous option needs an argument, assign it.
+  if test -n "$ac_prev"; then
+    eval $ac_prev=\$ac_option
+    ac_prev=
+    continue
+  fi
+
+  case $ac_option in
+  *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+  *=)   ac_optarg= ;;
+  *)    ac_optarg=yes ;;
+  esac
+
+  case $ac_dashdash$ac_option in
+  --)
+    ac_dashdash=yes ;;
+
+  -bindir | --bindir | --bindi | --bind | --bin | --bi)
+    ac_prev=bindir ;;
+  -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+    bindir=$ac_optarg ;;
+
+  -build | --build | --buil | --bui | --bu)
+    ac_prev=build_alias ;;
+  -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+    build_alias=$ac_optarg ;;
+
+  -cache-file | --cache-file | --cache-fil | --cache-fi \
+  | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+    ac_prev=cache_file ;;
+  -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+  | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+    cache_file=$ac_optarg ;;
+
+  --config-cache | -C)
+    cache_file=config.cache ;;
+
+  -datadir | --datadir | --datadi | --datad)
+    ac_prev=datadir ;;
+  -datadir=* | --datadir=* | --datadi=* | --datad=*)
+    datadir=$ac_optarg ;;
+
+  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+  | --dataroo | --dataro | --datar)
+    ac_prev=datarootdir ;;
+  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+    datarootdir=$ac_optarg ;;
+
+  -disable-* | --disable-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid feature name: \`$ac_useropt'"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"enable_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval enable_$ac_useropt=no ;;
+
+  -docdir | --docdir | --docdi | --doc | --do)
+    ac_prev=docdir ;;
+  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+    docdir=$ac_optarg ;;
+
+  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+    ac_prev=dvidir ;;
+  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+    dvidir=$ac_optarg ;;
+
+  -enable-* | --enable-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid feature name: \`$ac_useropt'"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"enable_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval enable_$ac_useropt=\$ac_optarg ;;
+
+  -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+  | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+  | --exec | --exe | --ex)
+    ac_prev=exec_prefix ;;
+  -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+  | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+  | --exec=* | --exe=* | --ex=*)
+    exec_prefix=$ac_optarg ;;
+
+  -gas | --gas | --ga | --g)
+    # Obsolete; use --with-gas.
+    with_gas=yes ;;
+
+  -help | --help | --hel | --he | -h)
+    ac_init_help=long ;;
+  -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+    ac_init_help=recursive ;;
+  -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+    ac_init_help=short ;;
+
+  -host | --host | --hos | --ho)
+    ac_prev=host_alias ;;
+  -host=* | --host=* | --hos=* | --ho=*)
+    host_alias=$ac_optarg ;;
+
+  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+    ac_prev=htmldir ;;
+  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+  | --ht=*)
+    htmldir=$ac_optarg ;;
+
+  -includedir | --includedir | --includedi | --included | --include \
+  | --includ | --inclu | --incl | --inc)
+    ac_prev=includedir ;;
+  -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+  | --includ=* | --inclu=* | --incl=* | --inc=*)
+    includedir=$ac_optarg ;;
+
+  -infodir | --infodir | --infodi | --infod | --info | --inf)
+    ac_prev=infodir ;;
+  -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+    infodir=$ac_optarg ;;
+
+  -libdir | --libdir | --libdi | --libd)
+    ac_prev=libdir ;;
+  -libdir=* | --libdir=* | --libdi=* | --libd=*)
+    libdir=$ac_optarg ;;
+
+  -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+  | --libexe | --libex | --libe)
+    ac_prev=libexecdir ;;
+  -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+  | --libexe=* | --libex=* | --libe=*)
+    libexecdir=$ac_optarg ;;
+
+  -localedir | --localedir | --localedi | --localed | --locale)
+    ac_prev=localedir ;;
+  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+    localedir=$ac_optarg ;;
+
+  -localstatedir | --localstatedir | --localstatedi | --localstated \
+  | --localstate | --localstat | --localsta | --localst | --locals)
+    ac_prev=localstatedir ;;
+  -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+    localstatedir=$ac_optarg ;;
+
+  -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+    ac_prev=mandir ;;
+  -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+    mandir=$ac_optarg ;;
+
+  -nfp | --nfp | --nf)
+    # Obsolete; use --without-fp.
+    with_fp=no ;;
+
+  -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+  | --no-cr | --no-c | -n)
+    no_create=yes ;;
+
+  -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+  | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+    no_recursion=yes ;;
+
+  -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+  | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+  | --oldin | --oldi | --old | --ol | --o)
+    ac_prev=oldincludedir ;;
+  -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+  | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+  | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+    oldincludedir=$ac_optarg ;;
+
+  -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+    ac_prev=prefix ;;
+  -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+    prefix=$ac_optarg ;;
+
+  -program-prefix | --program-prefix | --program-prefi | --program-pref \
+  | --program-pre | --program-pr | --program-p)
+    ac_prev=program_prefix ;;
+  -program-prefix=* | --program-prefix=* | --program-prefi=* \
+  | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+    program_prefix=$ac_optarg ;;
+
+  -program-suffix | --program-suffix | --program-suffi | --program-suff \
+  | --program-suf | --program-su | --program-s)
+    ac_prev=program_suffix ;;
+  -program-suffix=* | --program-suffix=* | --program-suffi=* \
+  | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+    program_suffix=$ac_optarg ;;
+
+  -program-transform-name | --program-transform-name \
+  | --program-transform-nam | --program-transform-na \
+  | --program-transform-n | --program-transform- \
+  | --program-transform | --program-transfor \
+  | --program-transfo | --program-transf \
+  | --program-trans | --program-tran \
+  | --progr-tra | --program-tr | --program-t)
+    ac_prev=program_transform_name ;;
+  -program-transform-name=* | --program-transform-name=* \
+  | --program-transform-nam=* | --program-transform-na=* \
+  | --program-transform-n=* | --program-transform-=* \
+  | --program-transform=* | --program-transfor=* \
+  | --program-transfo=* | --program-transf=* \
+  | --program-trans=* | --program-tran=* \
+  | --progr-tra=* | --program-tr=* | --program-t=*)
+    program_transform_name=$ac_optarg ;;
+
+  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+    ac_prev=pdfdir ;;
+  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+    pdfdir=$ac_optarg ;;
+
+  -psdir | --psdir | --psdi | --psd | --ps)
+    ac_prev=psdir ;;
+  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+    psdir=$ac_optarg ;;
+
+  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+  | -silent | --silent | --silen | --sile | --sil)
+    silent=yes ;;
+
+  -runstatedir | --runstatedir | --runstatedi | --runstated \
+  | --runstate | --runstat | --runsta | --runst | --runs \
+  | --run | --ru | --r)
+    ac_prev=runstatedir ;;
+  -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
+  | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
+  | --run=* | --ru=* | --r=*)
+    runstatedir=$ac_optarg ;;
+
+  -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+    ac_prev=sbindir ;;
+  -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+  | --sbi=* | --sb=*)
+    sbindir=$ac_optarg ;;
+
+  -sharedstatedir | --sharedstatedir | --sharedstatedi \
+  | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+  | --sharedst | --shareds | --shared | --share | --shar \
+  | --sha | --sh)
+    ac_prev=sharedstatedir ;;
+  -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+  | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+  | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+  | --sha=* | --sh=*)
+    sharedstatedir=$ac_optarg ;;
+
+  -site | --site | --sit)
+    ac_prev=site ;;
+  -site=* | --site=* | --sit=*)
+    site=$ac_optarg ;;
+
+  -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+    ac_prev=srcdir ;;
+  -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+    srcdir=$ac_optarg ;;
+
+  -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+  | --syscon | --sysco | --sysc | --sys | --sy)
+    ac_prev=sysconfdir ;;
+  -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+  | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+    sysconfdir=$ac_optarg ;;
+
+  -target | --target | --targe | --targ | --tar | --ta | --t)
+    ac_prev=target_alias ;;
+  -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+    target_alias=$ac_optarg ;;
+
+  -v | -verbose | --verbose | --verbos | --verbo | --verb)
+    verbose=yes ;;
+
+  -version | --version | --versio | --versi | --vers | -V)
+    ac_init_version=: ;;
+
+  -with-* | --with-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid package name: \`$ac_useropt'"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"with_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval with_$ac_useropt=\$ac_optarg ;;
+
+  -without-* | --without-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid package name: \`$ac_useropt'"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"with_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval with_$ac_useropt=no ;;
+
+  --x)
+    # Obsolete; use --with-x.
+    with_x=yes ;;
+
+  -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+  | --x-incl | --x-inc | --x-in | --x-i)
+    ac_prev=x_includes ;;
+  -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+  | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+    x_includes=$ac_optarg ;;
+
+  -x-libraries | --x-libraries | --x-librarie | --x-librari \
+  | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+    ac_prev=x_libraries ;;
+  -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+  | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+    x_libraries=$ac_optarg ;;
+
+  -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
+    ;;
+
+  *=*)
+    ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+    # Reject names that are not valid shell variable names.
+    case $ac_envvar in #(
+      '' | [0-9]* | *[!_$as_cr_alnum]* )
+      as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+    esac
+    eval $ac_envvar=\$ac_optarg
+    export $ac_envvar ;;
+
+  *)
+    # FIXME: should be removed in autoconf 3.0.
+    printf "%s\n" "$as_me: WARNING: you should use --build, --host, --target" >&2
+    expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+      printf "%s\n" "$as_me: WARNING: invalid host type: $ac_option" >&2
+    : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
+    ;;
+
+  esac
+done
+
+if test -n "$ac_prev"; then
+  ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+  as_fn_error $? "missing argument to $ac_option"
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+  case $enable_option_checking in
+    no) ;;
+    fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+    *)     printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+  esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
+		datadir sysconfdir sharedstatedir localstatedir includedir \
+		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+		libdir localedir mandir runstatedir
+do
+  eval ac_val=\$$ac_var
+  # Remove trailing slashes.
+  case $ac_val in
+    */ )
+      ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+      eval $ac_var=\$ac_val;;
+  esac
+  # Be sure to have absolute directory names.
+  case $ac_val in
+    [\\/$]* | ?:[\\/]* )  continue;;
+    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+  esac
+  as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+  if test "x$build_alias" = x; then
+    cross_compiling=maybe
+  elif test "x$build_alias" != "x$host_alias"; then
+    cross_compiling=yes
+  fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+  as_fn_error $? "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+  as_fn_error $? "pwd does not report name of working directory"
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+  ac_srcdir_defaulted=yes
+  # Try the directory containing this script, then the parent directory.
+  ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_myself" : 'X\(//\)[^/]' \| \
+	 X"$as_myself" : 'X\(//\)$' \| \
+	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$as_myself" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+  srcdir=$ac_confdir
+  if test ! -r "$srcdir/$ac_unique_file"; then
+    srcdir=..
+  fi
+else
+  ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+  as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+	cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
+	pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+  srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+  eval ac_env_${ac_var}_set=\${${ac_var}+set}
+  eval ac_env_${ac_var}_value=\$${ac_var}
+  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+  eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+  # Omit some internal or obsolete options to make the list less imposing.
+  # This message is too long to be a string in the A/UX 3.1 sh.
+  cat <<_ACEOF
+\`configure' configures APPLgrid 1.6.36 to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE.  See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+  -h, --help              display this help and exit
+      --help=short        display options specific to this package
+      --help=recursive    display the short help of all the included packages
+  -V, --version           display version information and exit
+  -q, --quiet, --silent   do not print \`checking ...' messages
+      --cache-file=FILE   cache test results in FILE [disabled]
+  -C, --config-cache      alias for \`--cache-file=config.cache'
+  -n, --no-create         do not create output files
+      --srcdir=DIR        find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+  --prefix=PREFIX         install architecture-independent files in PREFIX
+                          [$ac_default_prefix]
+  --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
+                          [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+  --bindir=DIR            user executables [EPREFIX/bin]
+  --sbindir=DIR           system admin executables [EPREFIX/sbin]
+  --libexecdir=DIR        program executables [EPREFIX/libexec]
+  --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
+  --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
+  --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
+  --runstatedir=DIR       modifiable per-process data [LOCALSTATEDIR/run]
+  --libdir=DIR            object code libraries [EPREFIX/lib]
+  --includedir=DIR        C header files [PREFIX/include]
+  --oldincludedir=DIR     C header files for non-gcc [/usr/include]
+  --datarootdir=DIR       read-only arch.-independent data root [PREFIX/share]
+  --datadir=DIR           read-only architecture-independent data [DATAROOTDIR]
+  --infodir=DIR           info documentation [DATAROOTDIR/info]
+  --localedir=DIR         locale-dependent data [DATAROOTDIR/locale]
+  --mandir=DIR            man documentation [DATAROOTDIR/man]
+  --docdir=DIR            documentation root [DATAROOTDIR/doc/applgrid]
+  --htmldir=DIR           html documentation [DOCDIR]
+  --dvidir=DIR            dvi documentation [DOCDIR]
+  --pdfdir=DIR            pdf documentation [DOCDIR]
+  --psdir=DIR             ps documentation [DOCDIR]
+_ACEOF
+
+  cat <<\_ACEOF
+
+Program names:
+  --program-prefix=PREFIX            prepend PREFIX to installed program names
+  --program-suffix=SUFFIX            append SUFFIX to installed program names
+  --program-transform-name=PROGRAM   run sed PROGRAM on installed program names
+
+System types:
+  --build=BUILD     configure for building on BUILD [guessed]
+  --host=HOST       cross-compile to build programs to run on HOST [BUILD]
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+  case $ac_init_help in
+     short | recursive ) echo "Configuration of APPLgrid 1.6.36:";;
+   esac
+  cat <<\_ACEOF
+
+Optional Features:
+  --disable-option-checking  ignore unrecognized --enable/--with options
+  --disable-FEATURE       do not include FEATURE (same as --enable-FEATURE=no)
+  --enable-FEATURE[=ARG]  include FEATURE [ARG=yes]
+  --enable-silent-rules   less verbose build output (undo: "make V=1")
+  --disable-silent-rules  verbose build output (undo: "make V=0")
+  --enable-dependency-tracking
+                          do not reject slow dependency extractors
+  --disable-dependency-tracking
+                          speeds up one-time build
+  --enable-shared[=PKGS]  build shared libraries [default=yes]
+  --enable-static[=PKGS]  build static libraries [default=yes]
+  --enable-fast-install[=PKGS]
+                          optimize for fast installation [default=yes]
+  --disable-libtool-lock  avoid locking (might break parallel builds)
+
+Optional Packages:
+  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
+  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
+  --with-pic[=PKGS]       try to use only PIC/non-PIC objects [default=use
+                          both]
+  --with-aix-soname=aix|svr4|both
+                          shared library versioning (aka "SONAME") variant to
+                          provide on AIX, [default=aix].
+  --with-gnu-ld           assume the C compiler uses GNU ld [default=no]
+  --with-sysroot[=DIR]    Search for dependent libraries within DIR (or the
+                          compiler's sysroot if not specified).
+  --without-root          disable support for root
+
+Some influential environment variables:
+  CC          C compiler command
+  CFLAGS      C compiler flags
+  LDFLAGS     linker flags, e.g. -L<lib dir> if you have libraries in a
+              nonstandard directory <lib dir>
+  LIBS        libraries to pass to the linker, e.g. -l<library>
+  CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+              you have headers in a nonstandard directory <include dir>
+  CXX         C++ compiler command
+  CXXFLAGS    C++ compiler flags
+  LT_SYS_LIBRARY_PATH
+              User-defined run-time library search path.
+  CXXCPP      C++ preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to <sutt@cern.ch>.
+APPLgrid home page: <https://applgrid.hepforge.org>.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+  # If there are subdirs, report their specific --help.
+  for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+    test -d "$ac_dir" ||
+      { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+      continue
+    ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+  ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'`
+  # A ".." for each directory in $ac_dir_suffix.
+  ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+  case $ac_top_builddir_sub in
+  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+  esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+  .)  # We are building in place.
+    ac_srcdir=.
+    ac_top_srcdir=$ac_top_builddir_sub
+    ac_abs_top_srcdir=$ac_pwd ;;
+  [\\/]* | ?:[\\/]* )  # Absolute name.
+    ac_srcdir=$srcdir$ac_dir_suffix;
+    ac_top_srcdir=$srcdir
+    ac_abs_top_srcdir=$srcdir ;;
+  *) # Relative name.
+    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+    ac_top_srcdir=$ac_top_build_prefix$srcdir
+    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+    cd "$ac_dir" || { ac_status=$?; continue; }
+    # Check for configure.gnu first; this name is used for a wrapper for
+    # Metaconfig's "Configure" on case-insensitive file systems.
+    if test -f "$ac_srcdir/configure.gnu"; then
+      echo &&
+      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+    elif test -f "$ac_srcdir/configure"; then
+      echo &&
+      $SHELL "$ac_srcdir/configure" --help=recursive
+    else
+      printf "%s\n" "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+    fi || ac_status=$?
+    cd "$ac_pwd" || { ac_status=$?; break; }
+  done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+  cat <<\_ACEOF
+APPLgrid configure 1.6.36
+generated by GNU Autoconf 2.71
+
+Copyright (C) 2021 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+  exit
+fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest.beam
+  if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext
+then :
+  ac_retval=0
+else $as_nop
+  printf "%s\n" "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_cxx_try_compile LINENO
+# ----------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest.beam
+  if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_cxx_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext
+then :
+  ac_retval=0
+else $as_nop
+  printf "%s\n" "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_compile
+
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 test -x conftest$ac_exeext
+       }
+then :
+  ac_retval=0
+else $as_nop
+  printf "%s\n" "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+
+# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists and can be compiled using the include files in
+# INCLUDES, setting the cache variable VAR accordingly.
+ac_fn_c_check_header_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+printf %s "checking for $2... " >&6; }
+if eval test \${$3+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"
+then :
+  eval "$3=yes"
+else $as_nop
+  eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+printf "%s\n" "$ac_res" >&6; }
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_compile
+
+# ac_fn_c_check_func LINENO FUNC VAR
+# ----------------------------------
+# Tests whether FUNC exists, setting the cache variable VAR accordingly
+ac_fn_c_check_func ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+printf %s "checking for $2... " >&6; }
+if eval test \${$3+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
+   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
+#define $2 innocuous_$2
+
+/* System header to define __stub macros and hopefully few prototypes,
+   which can conflict with char $2 (); below.  */
+
+#include <limits.h>
+#undef $2
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $2 ();
+/* The GNU C library defines this for functions which it implements
+    to always fail with ENOSYS.  Some functions are actually named
+    something starting with __ and the normal name is an alias.  */
+#if defined __stub_$2 || defined __stub___$2
+choke me
+#endif
+
+int
+main (void)
+{
+return $2 ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  eval "$3=yes"
+else $as_nop
+  eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+printf "%s\n" "$ac_res" >&6; }
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_func
+
+# ac_fn_cxx_try_cpp LINENO
+# ------------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_cpp ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } > conftest.i && {
+	 test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+	 test ! -s conftest.err
+       }
+then :
+  ac_retval=0
+else $as_nop
+  printf "%s\n" "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+    ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_cpp
+
+# ac_fn_cxx_try_link LINENO
+# -------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_cxx_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 test -x conftest$ac_exeext
+       }
+then :
+  ac_retval=0
+else $as_nop
+  printf "%s\n" "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_link
+
+# ac_fn_c_try_run LINENO
+# ----------------------
+# Try to run conftest.$ac_ext, and return whether this succeeded. Assumes that
+# executables *can* be run.
+ac_fn_c_try_run ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
+  { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }
+then :
+  ac_retval=0
+else $as_nop
+  printf "%s\n" "$as_me: program exited with status $ac_status" >&5
+       printf "%s\n" "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+       ac_retval=$ac_status
+fi
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_run
+ac_configure_args_raw=
+for ac_arg
+do
+  case $ac_arg in
+  *\'*)
+    ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+  esac
+  as_fn_append ac_configure_args_raw " '$ac_arg'"
+done
+
+case $ac_configure_args_raw in
+  *$as_nl*)
+    ac_safe_unquote= ;;
+  *)
+    ac_unsafe_z='|&;<>()$`\\"*?[ ''	' # This string ends in space, tab.
+    ac_unsafe_a="$ac_unsafe_z#~"
+    ac_safe_unquote="s/ '\\([^$ac_unsafe_a][^$ac_unsafe_z]*\\)'/ \\1/g"
+    ac_configure_args_raw=`      printf "%s\n" "$ac_configure_args_raw" | sed "$ac_safe_unquote"`;;
+esac
+
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by APPLgrid $as_me 1.6.36, which was
+generated by GNU Autoconf 2.71.  Invocation command line was
+
+  $ $0$ac_configure_args_raw
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X     = `(/bin/uname -X) 2>/dev/null     || echo unknown`
+
+/bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
+/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
+/bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
+/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
+/bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    printf "%s\n" "PATH: $as_dir"
+  done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+  for ac_arg
+  do
+    case $ac_arg in
+    -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+    -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+    | -silent | --silent | --silen | --sile | --sil)
+      continue ;;
+    *\'*)
+      ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    esac
+    case $ac_pass in
+    1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+    2)
+      as_fn_append ac_configure_args1 " '$ac_arg'"
+      if test $ac_must_keep_next = true; then
+	ac_must_keep_next=false # Got value, back to normal.
+      else
+	case $ac_arg in
+	  *=* | --config-cache | -C | -disable-* | --disable-* \
+	  | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+	  | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+	  | -with-* | --with-* | -without-* | --without-* | --x)
+	    case "$ac_configure_args0 " in
+	      "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+	    esac
+	    ;;
+	  -* ) ac_must_keep_next=true ;;
+	esac
+      fi
+      as_fn_append ac_configure_args " '$ac_arg'"
+      ;;
+    esac
+  done
+done
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log.  We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+  # Sanitize IFS.
+  IFS=" ""	$as_nl"
+  # Save into config.log some information that might help in debugging.
+  {
+    echo
+
+    printf "%s\n" "## ---------------- ##
+## Cache variables. ##
+## ---------------- ##"
+    echo
+    # The following way of writing the cache mishandles newlines in values,
+(
+  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+    eval ac_val=\$$ac_var
+    case $ac_val in #(
+    *${as_nl}*)
+      case $ac_var in #(
+      *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+      esac
+      case $ac_var in #(
+      _ | IFS | as_nl) ;; #(
+      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+      *) { eval $ac_var=; unset $ac_var;} ;;
+      esac ;;
+    esac
+  done
+  (set) 2>&1 |
+    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+    *${as_nl}ac_space=\ *)
+      sed -n \
+	"s/'\''/'\''\\\\'\'''\''/g;
+	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+      ;; #(
+    *)
+      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+      ;;
+    esac |
+    sort
+)
+    echo
+
+    printf "%s\n" "## ----------------- ##
+## Output variables. ##
+## ----------------- ##"
+    echo
+    for ac_var in $ac_subst_vars
+    do
+      eval ac_val=\$$ac_var
+      case $ac_val in
+      *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+      esac
+      printf "%s\n" "$ac_var='\''$ac_val'\''"
+    done | sort
+    echo
+
+    if test -n "$ac_subst_files"; then
+      printf "%s\n" "## ------------------- ##
+## File substitutions. ##
+## ------------------- ##"
+      echo
+      for ac_var in $ac_subst_files
+      do
+	eval ac_val=\$$ac_var
+	case $ac_val in
+	*\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+	esac
+	printf "%s\n" "$ac_var='\''$ac_val'\''"
+      done | sort
+      echo
+    fi
+
+    if test -s confdefs.h; then
+      printf "%s\n" "## ----------- ##
+## confdefs.h. ##
+## ----------- ##"
+      echo
+      cat confdefs.h
+      echo
+    fi
+    test "$ac_signal" != 0 &&
+      printf "%s\n" "$as_me: caught signal $ac_signal"
+    printf "%s\n" "$as_me: exit $exit_status"
+  } >&5
+  rm -f core *.core core.conftest.* &&
+    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+    exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+  trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+printf "%s\n" "/* confdefs.h */" > confdefs.h
+
+# Predefined preprocessor variables.
+
+printf "%s\n" "#define PACKAGE_NAME \"$PACKAGE_NAME\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_TARNAME \"$PACKAGE_TARNAME\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_VERSION \"$PACKAGE_VERSION\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_STRING \"$PACKAGE_STRING\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_BUGREPORT \"$PACKAGE_BUGREPORT\"" >>confdefs.h
+
+printf "%s\n" "#define PACKAGE_URL \"$PACKAGE_URL\"" >>confdefs.h
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+if test -n "$CONFIG_SITE"; then
+  ac_site_files="$CONFIG_SITE"
+elif test "x$prefix" != xNONE; then
+  ac_site_files="$prefix/share/config.site $prefix/etc/config.site"
+else
+  ac_site_files="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site"
+fi
+
+for ac_site_file in $ac_site_files
+do
+  case $ac_site_file in #(
+  */*) :
+     ;; #(
+  *) :
+    ac_site_file=./$ac_site_file ;;
+esac
+  if test -f "$ac_site_file" && test -r "$ac_site_file"; then
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+printf "%s\n" "$as_me: loading site script $ac_site_file" >&6;}
+    sed 's/^/| /' "$ac_site_file" >&5
+    . "$ac_site_file" \
+      || { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
+  fi
+done
+
+if test -r "$cache_file"; then
+  # Some versions of bash will fail to source /dev/null (special files
+  # actually), so we avoid doing that.  DJGPP emulates it as a regular file.
+  if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+printf "%s\n" "$as_me: loading cache $cache_file" >&6;}
+    case $cache_file in
+      [\\/]* | ?:[\\/]* ) . "$cache_file";;
+      *)                      . "./$cache_file";;
+    esac
+  fi
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+printf "%s\n" "$as_me: creating cache $cache_file" >&6;}
+  >$cache_file
+fi
+
+# Test code for whether the C compiler supports C89 (global declarations)
+ac_c_conftest_c89_globals='
+/* Does the compiler advertise C89 conformance?
+   Do not test the value of __STDC__, because some compilers set it to 0
+   while being otherwise adequately conformant. */
+#if !defined __STDC__
+# error "Compiler does not advertise C89 conformance"
+#endif
+
+#include <stddef.h>
+#include <stdarg.h>
+struct stat;
+/* Most of the following tests are stolen from RCS 5.7 src/conf.sh.  */
+struct buf { int x; };
+struct buf * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+     char **p;
+     int i;
+{
+  return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+  char *s;
+  va_list v;
+  va_start (v,p);
+  s = g (p, va_arg (v,int));
+  va_end (v);
+  return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
+   function prototypes and stuff, but not \xHH hex character constants.
+   These do not provoke an error unfortunately, instead are silently treated
+   as an "x".  The following induces an error, until -std is added to get
+   proper ANSI mode.  Curiously \x00 != x always comes out true, for an
+   array size at least.  It is necessary to write \x00 == 0 to get something
+   that is true only with -std.  */
+int osf4_cc_array ['\''\x00'\'' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+   inside strings and character constants.  */
+#define FOO(x) '\''x'\''
+int xlc6_cc_array[FOO(a) == '\''x'\'' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, int *(*)(struct buf *, struct stat *, int),
+               int, int);'
+
+# Test code for whether the C compiler supports C89 (body of main).
+ac_c_conftest_c89_main='
+ok |= (argc == 0 || f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]);
+'
+
+# Test code for whether the C compiler supports C99 (global declarations)
+ac_c_conftest_c99_globals='
+// Does the compiler advertise C99 conformance?
+#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 199901L
+# error "Compiler does not advertise C99 conformance"
+#endif
+
+#include <stdbool.h>
+extern int puts (const char *);
+extern int printf (const char *, ...);
+extern int dprintf (int, const char *, ...);
+extern void *malloc (size_t);
+
+// Check varargs macros.  These examples are taken from C99 6.10.3.5.
+// dprintf is used instead of fprintf to avoid needing to declare
+// FILE and stderr.
+#define debug(...) dprintf (2, __VA_ARGS__)
+#define showlist(...) puts (#__VA_ARGS__)
+#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__))
+static void
+test_varargs_macros (void)
+{
+  int x = 1234;
+  int y = 5678;
+  debug ("Flag");
+  debug ("X = %d\n", x);
+  showlist (The first, second, and third items.);
+  report (x>y, "x is %d but y is %d", x, y);
+}
+
+// Check long long types.
+#define BIG64 18446744073709551615ull
+#define BIG32 4294967295ul
+#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0)
+#if !BIG_OK
+  #error "your preprocessor is broken"
+#endif
+#if BIG_OK
+#else
+  #error "your preprocessor is broken"
+#endif
+static long long int bignum = -9223372036854775807LL;
+static unsigned long long int ubignum = BIG64;
+
+struct incomplete_array
+{
+  int datasize;
+  double data[];
+};
+
+struct named_init {
+  int number;
+  const wchar_t *name;
+  double average;
+};
+
+typedef const char *ccp;
+
+static inline int
+test_restrict (ccp restrict text)
+{
+  // See if C++-style comments work.
+  // Iterate through items via the restricted pointer.
+  // Also check for declarations in for loops.
+  for (unsigned int i = 0; *(text+i) != '\''\0'\''; ++i)
+    continue;
+  return 0;
+}
+
+// Check varargs and va_copy.
+static bool
+test_varargs (const char *format, ...)
+{
+  va_list args;
+  va_start (args, format);
+  va_list args_copy;
+  va_copy (args_copy, args);
+
+  const char *str = "";
+  int number = 0;
+  float fnumber = 0;
+
+  while (*format)
+    {
+      switch (*format++)
+	{
+	case '\''s'\'': // string
+	  str = va_arg (args_copy, const char *);
+	  break;
+	case '\''d'\'': // int
+	  number = va_arg (args_copy, int);
+	  break;
+	case '\''f'\'': // float
+	  fnumber = va_arg (args_copy, double);
+	  break;
+	default:
+	  break;
+	}
+    }
+  va_end (args_copy);
+  va_end (args);
+
+  return *str && number && fnumber;
+}
+'
+
+# Test code for whether the C compiler supports C99 (body of main).
+ac_c_conftest_c99_main='
+  // Check bool.
+  _Bool success = false;
+  success |= (argc != 0);
+
+  // Check restrict.
+  if (test_restrict ("String literal") == 0)
+    success = true;
+  char *restrict newvar = "Another string";
+
+  // Check varargs.
+  success &= test_varargs ("s, d'\'' f .", "string", 65, 34.234);
+  test_varargs_macros ();
+
+  // Check flexible array members.
+  struct incomplete_array *ia =
+    malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10));
+  ia->datasize = 10;
+  for (int i = 0; i < ia->datasize; ++i)
+    ia->data[i] = i * 1.234;
+
+  // Check named initializers.
+  struct named_init ni = {
+    .number = 34,
+    .name = L"Test wide string",
+    .average = 543.34343,
+  };
+
+  ni.number = 58;
+
+  int dynamic_array[ni.number];
+  dynamic_array[0] = argv[0][0];
+  dynamic_array[ni.number - 1] = 543;
+
+  // work around unused variable warnings
+  ok |= (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == '\''x'\''
+	 || dynamic_array[ni.number - 1] != 543);
+'
+
+# Test code for whether the C compiler supports C11 (global declarations)
+ac_c_conftest_c11_globals='
+// Does the compiler advertise C11 conformance?
+#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L
+# error "Compiler does not advertise C11 conformance"
+#endif
+
+// Check _Alignas.
+char _Alignas (double) aligned_as_double;
+char _Alignas (0) no_special_alignment;
+extern char aligned_as_int;
+char _Alignas (0) _Alignas (int) aligned_as_int;
+
+// Check _Alignof.
+enum
+{
+  int_alignment = _Alignof (int),
+  int_array_alignment = _Alignof (int[100]),
+  char_alignment = _Alignof (char)
+};
+_Static_assert (0 < -_Alignof (int), "_Alignof is signed");
+
+// Check _Noreturn.
+int _Noreturn does_not_return (void) { for (;;) continue; }
+
+// Check _Static_assert.
+struct test_static_assert
+{
+  int x;
+  _Static_assert (sizeof (int) <= sizeof (long int),
+                  "_Static_assert does not work in struct");
+  long int y;
+};
+
+// Check UTF-8 literals.
+#define u8 syntax error!
+char const utf8_literal[] = u8"happens to be ASCII" "another string";
+
+// Check duplicate typedefs.
+typedef long *long_ptr;
+typedef long int *long_ptr;
+typedef long_ptr long_ptr;
+
+// Anonymous structures and unions -- taken from C11 6.7.2.1 Example 1.
+struct anonymous
+{
+  union {
+    struct { int i; int j; };
+    struct { int k; long int l; } w;
+  };
+  int m;
+} v1;
+'
+
+# Test code for whether the C compiler supports C11 (body of main).
+ac_c_conftest_c11_main='
+  _Static_assert ((offsetof (struct anonymous, i)
+		   == offsetof (struct anonymous, w.k)),
+		  "Anonymous union alignment botch");
+  v1.i = 2;
+  v1.w.k = 5;
+  ok |= v1.i != 5;
+'
+
+# Test code for whether the C compiler supports C11 (complete).
+ac_c_conftest_c11_program="${ac_c_conftest_c89_globals}
+${ac_c_conftest_c99_globals}
+${ac_c_conftest_c11_globals}
+
+int
+main (int argc, char **argv)
+{
+  int ok = 0;
+  ${ac_c_conftest_c89_main}
+  ${ac_c_conftest_c99_main}
+  ${ac_c_conftest_c11_main}
+  return ok;
+}
+"
+
+# Test code for whether the C compiler supports C99 (complete).
+ac_c_conftest_c99_program="${ac_c_conftest_c89_globals}
+${ac_c_conftest_c99_globals}
+
+int
+main (int argc, char **argv)
+{
+  int ok = 0;
+  ${ac_c_conftest_c89_main}
+  ${ac_c_conftest_c99_main}
+  return ok;
+}
+"
+
+# Test code for whether the C compiler supports C89 (complete).
+ac_c_conftest_c89_program="${ac_c_conftest_c89_globals}
+
+int
+main (int argc, char **argv)
+{
+  int ok = 0;
+  ${ac_c_conftest_c89_main}
+  return ok;
+}
+"
+
+# Test code for whether the C++ compiler supports C++98 (global declarations)
+ac_cxx_conftest_cxx98_globals='
+// Does the compiler advertise C++98 conformance?
+#if !defined __cplusplus || __cplusplus < 199711L
+# error "Compiler does not advertise C++98 conformance"
+#endif
+
+// These inclusions are to reject old compilers that
+// lack the unsuffixed header files.
+#include <cstdlib>
+#include <exception>
+
+// <cassert> and <cstring> are *not* freestanding headers in C++98.
+extern void assert (int);
+namespace std {
+  extern int strcmp (const char *, const char *);
+}
+
+// Namespaces, exceptions, and templates were all added after "C++ 2.0".
+using std::exception;
+using std::strcmp;
+
+namespace {
+
+void test_exception_syntax()
+{
+  try {
+    throw "test";
+  } catch (const char *s) {
+    // Extra parentheses suppress a warning when building autoconf itself,
+    // due to lint rules shared with more typical C programs.
+    assert (!(strcmp) (s, "test"));
+  }
+}
+
+template <typename T> struct test_template
+{
+  T const val;
+  explicit test_template(T t) : val(t) {}
+  template <typename U> T add(U u) { return static_cast<T>(u) + val; }
+};
+
+} // anonymous namespace
+'
+
+# Test code for whether the C++ compiler supports C++98 (body of main)
+ac_cxx_conftest_cxx98_main='
+  assert (argc);
+  assert (! argv[0]);
+{
+  test_exception_syntax ();
+  test_template<double> tt (2.0);
+  assert (tt.add (4) == 6.0);
+  assert (true && !false);
+}
+'
+
+# Test code for whether the C++ compiler supports C++11 (global declarations)
+ac_cxx_conftest_cxx11_globals='
+// Does the compiler advertise C++ 2011 conformance?
+#if !defined __cplusplus || __cplusplus < 201103L
+# error "Compiler does not advertise C++11 conformance"
+#endif
+
+namespace cxx11test
+{
+  constexpr int get_val() { return 20; }
+
+  struct testinit
+  {
+    int i;
+    double d;
+  };
+
+  class delegate
+  {
+  public:
+    delegate(int n) : n(n) {}
+    delegate(): delegate(2354) {}
+
+    virtual int getval() { return this->n; };
+  protected:
+    int n;
+  };
+
+  class overridden : public delegate
+  {
+  public:
+    overridden(int n): delegate(n) {}
+    virtual int getval() override final { return this->n * 2; }
+  };
+
+  class nocopy
+  {
+  public:
+    nocopy(int i): i(i) {}
+    nocopy() = default;
+    nocopy(const nocopy&) = delete;
+    nocopy & operator=(const nocopy&) = delete;
+  private:
+    int i;
+  };
+
+  // for testing lambda expressions
+  template <typename Ret, typename Fn> Ret eval(Fn f, Ret v)
+  {
+    return f(v);
+  }
+
+  // for testing variadic templates and trailing return types
+  template <typename V> auto sum(V first) -> V
+  {
+    return first;
+  }
+  template <typename V, typename... Args> auto sum(V first, Args... rest) -> V
+  {
+    return first + sum(rest...);
+  }
+}
+'
+
+# Test code for whether the C++ compiler supports C++11 (body of main)
+ac_cxx_conftest_cxx11_main='
+{
+  // Test auto and decltype
+  auto a1 = 6538;
+  auto a2 = 48573953.4;
+  auto a3 = "String literal";
+
+  int total = 0;
+  for (auto i = a3; *i; ++i) { total += *i; }
+
+  decltype(a2) a4 = 34895.034;
+}
+{
+  // Test constexpr
+  short sa[cxx11test::get_val()] = { 0 };
+}
+{
+  // Test initializer lists
+  cxx11test::testinit il = { 4323, 435234.23544 };
+}
+{
+  // Test range-based for
+  int array[] = {9, 7, 13, 15, 4, 18, 12, 10, 5, 3,
+                 14, 19, 17, 8, 6, 20, 16, 2, 11, 1};
+  for (auto &x : array) { x += 23; }
+}
+{
+  // Test lambda expressions
+  using cxx11test::eval;
+  assert (eval ([](int x) { return x*2; }, 21) == 42);
+  double d = 2.0;
+  assert (eval ([&](double x) { return d += x; }, 3.0) == 5.0);
+  assert (d == 5.0);
+  assert (eval ([=](double x) mutable { return d += x; }, 4.0) == 9.0);
+  assert (d == 5.0);
+}
+{
+  // Test use of variadic templates
+  using cxx11test::sum;
+  auto a = sum(1);
+  auto b = sum(1, 2);
+  auto c = sum(1.0, 2.0, 3.0);
+}
+{
+  // Test constructor delegation
+  cxx11test::delegate d1;
+  cxx11test::delegate d2();
+  cxx11test::delegate d3(45);
+}
+{
+  // Test override and final
+  cxx11test::overridden o1(55464);
+}
+{
+  // Test nullptr
+  char *c = nullptr;
+}
+{
+  // Test template brackets
+  test_template<::test_template<int>> v(test_template<int>(12));
+}
+{
+  // Unicode literals
+  char const *utf8 = u8"UTF-8 string \u2500";
+  char16_t const *utf16 = u"UTF-8 string \u2500";
+  char32_t const *utf32 = U"UTF-32 string \u2500";
+}
+'
+
+# Test code for whether the C compiler supports C++11 (complete).
+ac_cxx_conftest_cxx11_program="${ac_cxx_conftest_cxx98_globals}
+${ac_cxx_conftest_cxx11_globals}
+
+int
+main (int argc, char **argv)
+{
+  int ok = 0;
+  ${ac_cxx_conftest_cxx98_main}
+  ${ac_cxx_conftest_cxx11_main}
+  return ok;
+}
+"
+
+# Test code for whether the C compiler supports C++98 (complete).
+ac_cxx_conftest_cxx98_program="${ac_cxx_conftest_cxx98_globals}
+int
+main (int argc, char **argv)
+{
+  int ok = 0;
+  ${ac_cxx_conftest_cxx98_main}
+  return ok;
+}
+"
+
+as_fn_append ac_header_c_list " stdio.h stdio_h HAVE_STDIO_H"
+as_fn_append ac_header_c_list " stdlib.h stdlib_h HAVE_STDLIB_H"
+as_fn_append ac_header_c_list " string.h string_h HAVE_STRING_H"
+as_fn_append ac_header_c_list " inttypes.h inttypes_h HAVE_INTTYPES_H"
+as_fn_append ac_header_c_list " stdint.h stdint_h HAVE_STDINT_H"
+as_fn_append ac_header_c_list " strings.h strings_h HAVE_STRINGS_H"
+as_fn_append ac_header_c_list " sys/stat.h sys_stat_h HAVE_SYS_STAT_H"
+as_fn_append ac_header_c_list " sys/types.h sys_types_h HAVE_SYS_TYPES_H"
+as_fn_append ac_header_c_list " unistd.h unistd_h HAVE_UNISTD_H"
+as_fn_append ac_header_c_list " utime.h utime_h HAVE_UTIME_H"
+
+# Auxiliary files required by this configure script.
+ac_aux_files="config.guess config.sub ltmain.sh compile missing install-sh"
+
+# Locations in which to look for auxiliary files.
+ac_aux_dir_candidates="${srcdir}${PATH_SEPARATOR}${srcdir}/..${PATH_SEPARATOR}${srcdir}/../.."
+
+# Search for a directory containing all of the required auxiliary files,
+# $ac_aux_files, from the $PATH-style list $ac_aux_dir_candidates.
+# If we don't find one directory that contains all the files we need,
+# we report the set of missing files from the *first* directory in
+# $ac_aux_dir_candidates and give up.
+ac_missing_aux_files=""
+ac_first_candidate=:
+printf "%s\n" "$as_me:${as_lineno-$LINENO}: looking for aux files: $ac_aux_files" >&5
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in $ac_aux_dir_candidates
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+  as_found=:
+
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}:  trying $as_dir" >&5
+  ac_aux_dir_found=yes
+  ac_install_sh=
+  for ac_aux in $ac_aux_files
+  do
+    # As a special case, if "install-sh" is required, that requirement
+    # can be satisfied by any of "install-sh", "install.sh", or "shtool",
+    # and $ac_install_sh is set appropriately for whichever one is found.
+    if test x"$ac_aux" = x"install-sh"
+    then
+      if test -f "${as_dir}install-sh"; then
+        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}install-sh found" >&5
+        ac_install_sh="${as_dir}install-sh -c"
+      elif test -f "${as_dir}install.sh"; then
+        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}install.sh found" >&5
+        ac_install_sh="${as_dir}install.sh -c"
+      elif test -f "${as_dir}shtool"; then
+        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}shtool found" >&5
+        ac_install_sh="${as_dir}shtool install -c"
+      else
+        ac_aux_dir_found=no
+        if $ac_first_candidate; then
+          ac_missing_aux_files="${ac_missing_aux_files} install-sh"
+        else
+          break
+        fi
+      fi
+    else
+      if test -f "${as_dir}${ac_aux}"; then
+        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}${ac_aux} found" >&5
+      else
+        ac_aux_dir_found=no
+        if $ac_first_candidate; then
+          ac_missing_aux_files="${ac_missing_aux_files} ${ac_aux}"
+        else
+          break
+        fi
+      fi
+    fi
+  done
+  if test "$ac_aux_dir_found" = yes; then
+    ac_aux_dir="$as_dir"
+    break
+  fi
+  ac_first_candidate=false
+
+  as_found=false
+done
+IFS=$as_save_IFS
+if $as_found
+then :
+
+else $as_nop
+  as_fn_error $? "cannot find required auxiliary files:$ac_missing_aux_files" "$LINENO" 5
+fi
+
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+if test -f "${ac_aux_dir}config.guess"; then
+  ac_config_guess="$SHELL ${ac_aux_dir}config.guess"
+fi
+if test -f "${ac_aux_dir}config.sub"; then
+  ac_config_sub="$SHELL ${ac_aux_dir}config.sub"
+fi
+if test -f "$ac_aux_dir/configure"; then
+  ac_configure="$SHELL ${ac_aux_dir}configure"
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+  eval ac_old_set=\$ac_cv_env_${ac_var}_set
+  eval ac_new_set=\$ac_env_${ac_var}_set
+  eval ac_old_val=\$ac_cv_env_${ac_var}_value
+  eval ac_new_val=\$ac_env_${ac_var}_value
+  case $ac_old_set,$ac_new_set in
+    set,)
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+printf "%s\n" "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+      ac_cache_corrupted=: ;;
+    ,set)
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+printf "%s\n" "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+      ac_cache_corrupted=: ;;
+    ,);;
+    *)
+      if test "x$ac_old_val" != "x$ac_new_val"; then
+	# differences in whitespace do not lead to failure.
+	ac_old_val_w=`echo x $ac_old_val`
+	ac_new_val_w=`echo x $ac_new_val`
+	if test "$ac_old_val_w" != "$ac_new_val_w"; then
+	  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+printf "%s\n" "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+	  ac_cache_corrupted=:
+	else
+	  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+printf "%s\n" "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+	  eval $ac_var=\$ac_old_val
+	fi
+	{ printf "%s\n" "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
+printf "%s\n" "$as_me:   former value:  \`$ac_old_val'" >&2;}
+	{ printf "%s\n" "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
+printf "%s\n" "$as_me:   current value: \`$ac_new_val'" >&2;}
+      fi;;
+  esac
+  # Pass precious variables to config.status.
+  if test "$ac_new_set" = set; then
+    case $ac_new_val in
+    *\'*) ac_arg=$ac_var=`printf "%s\n" "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+    *) ac_arg=$ac_var=$ac_new_val ;;
+    esac
+    case " $ac_configure_args " in
+      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
+      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+    esac
+  fi
+done
+if $ac_cache_corrupted; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+printf "%s\n" "$as_me: error: changes in the environment can compromise the build" >&2;}
+  as_fn_error $? "run \`${MAKE-make} distclean' and/or \`rm $cache_file'
+	    and start over" "$LINENO" 5
+fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+am__api_version='1.16'
+
+
+
+  # Find a good install program.  We prefer a C program (faster),
+# so one script is as good as another.  But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
+printf %s "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if test ${ac_cv_path_install+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    # Account for fact that we put trailing slashes in our PATH walk.
+case $as_dir in #((
+  ./ | /[cC]/* | \
+  /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+  ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
+  /usr/ucb/* ) ;;
+  *)
+    # OSF1 and SCO ODT 3.0 have their own names for install.
+    # Don't use installbsd from OSF since it installs stuff as root
+    # by default.
+    for ac_prog in ginstall scoinst install; do
+      for ac_exec_ext in '' $ac_executable_extensions; do
+	if as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext"; then
+	  if test $ac_prog = install &&
+	    grep dspmsg "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+	    # AIX install.  It has an incompatible calling convention.
+	    :
+	  elif test $ac_prog = install &&
+	    grep pwplus "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+	    # program-specific install script used by HP pwplus--don't use.
+	    :
+	  else
+	    rm -rf conftest.one conftest.two conftest.dir
+	    echo one > conftest.one
+	    echo two > conftest.two
+	    mkdir conftest.dir
+	    if "$as_dir$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir/" &&
+	      test -s conftest.one && test -s conftest.two &&
+	      test -s conftest.dir/conftest.one &&
+	      test -s conftest.dir/conftest.two
+	    then
+	      ac_cv_path_install="$as_dir$ac_prog$ac_exec_ext -c"
+	      break 3
+	    fi
+	  fi
+	fi
+      done
+    done
+    ;;
+esac
+
+  done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+  if test ${ac_cv_path_install+y}; then
+    INSTALL=$ac_cv_path_install
+  else
+    # As a last resort, use the slow shell script.  Don't cache a
+    # value for INSTALL within a source directory, because that will
+    # break other packages using the cache if that directory is
+    # removed, or if the value is a relative name.
+    INSTALL=$ac_install_sh
+  fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
+printf "%s\n" "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
+printf %s "checking whether build environment is sane... " >&6; }
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name.  Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+  *[\\\"\#\$\&\'\`$am_lf]*)
+    as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;;
+esac
+case $srcdir in
+  *[\\\"\#\$\&\'\`$am_lf\ \	]*)
+    as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;;
+esac
+
+# Do 'set' in a subshell so we don't clobber the current shell's
+# arguments.  Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+   am_has_slept=no
+   for am_try in 1 2; do
+     echo "timestamp, slept: $am_has_slept" > conftest.file
+     set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+     if test "$*" = "X"; then
+	# -L didn't work.
+	set X `ls -t "$srcdir/configure" conftest.file`
+     fi
+     if test "$*" != "X $srcdir/configure conftest.file" \
+	&& test "$*" != "X conftest.file $srcdir/configure"; then
+
+	# If neither matched, then we have a broken ls.  This can happen
+	# if, for instance, CONFIG_SHELL is bash and it inherits a
+	# broken ls alias from the environment.  This has actually
+	# happened.  Such a system could not be considered "sane".
+	as_fn_error $? "ls -t appears to fail.  Make sure there is not a broken
+  alias in your environment" "$LINENO" 5
+     fi
+     if test "$2" = conftest.file || test $am_try -eq 2; then
+       break
+     fi
+     # Just in case.
+     sleep 1
+     am_has_slept=yes
+   done
+   test "$2" = conftest.file
+   )
+then
+   # Ok.
+   :
+else
+   as_fn_error $? "newly created file is older than distributed files!
+Check your system clock" "$LINENO" 5
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+printf "%s\n" "yes" >&6; }
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+  ( sleep 1 ) &
+  am_sleep_pid=$!
+fi
+
+rm -f conftest.file
+
+test "$program_prefix" != NONE &&
+  program_transform_name="s&^&$program_prefix&;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+  program_transform_name="s&\$&$program_suffix&;$program_transform_name"
+# Double any \ or $.
+# By default was `s,x,x', remove it if useless.
+ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
+program_transform_name=`printf "%s\n" "$program_transform_name" | sed "$ac_script"`
+
+
+# Expand $ac_aux_dir to an absolute path.
+am_aux_dir=`cd "$ac_aux_dir" && pwd`
+
+
+  if test x"${MISSING+set}" != xset; then
+  MISSING="\${SHELL} '$am_aux_dir/missing'"
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --is-lightweight"; then
+  am_missing_run="$MISSING "
+else
+  am_missing_run=
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
+printf "%s\n" "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
+fi
+
+if test x"${install_sh+set}" != xset; then
+  case $am_aux_dir in
+  *\ * | *\	*)
+    install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+  *)
+    install_sh="\${SHELL} $am_aux_dir/install-sh"
+  esac
+fi
+
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip".  However 'strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the 'STRIP' environment variable to overrule this program.
+if test "$cross_compiling" != no; then
+  if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_STRIP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$STRIP"; then
+  ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+printf "%s\n" "$STRIP" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+  ac_ct_STRIP=$STRIP
+  # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_STRIP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_STRIP"; then
+  ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_STRIP="strip"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+printf "%s\n" "$ac_ct_STRIP" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_STRIP" = x; then
+    STRIP=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    STRIP=$ac_ct_STRIP
+  fi
+else
+  STRIP="$ac_cv_prog_STRIP"
+fi
+
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a race-free mkdir -p" >&5
+printf %s "checking for a race-free mkdir -p... " >&6; }
+if test -z "$MKDIR_P"; then
+  if test ${ac_cv_path_mkdir+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_prog in mkdir gmkdir; do
+	 for ac_exec_ext in '' $ac_executable_extensions; do
+	   as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext" || continue
+	   case `"$as_dir$ac_prog$ac_exec_ext" --version 2>&1` in #(
+	     'mkdir ('*'coreutils) '* | \
+	     'BusyBox '* | \
+	     'mkdir (fileutils) '4.1*)
+	       ac_cv_path_mkdir=$as_dir$ac_prog$ac_exec_ext
+	       break 3;;
+	   esac
+	 done
+       done
+  done
+IFS=$as_save_IFS
+
+fi
+
+  test -d ./--version && rmdir ./--version
+  if test ${ac_cv_path_mkdir+y}; then
+    MKDIR_P="$ac_cv_path_mkdir -p"
+  else
+    # As a last resort, use the slow shell script.  Don't cache a
+    # value for MKDIR_P within a source directory, because that will
+    # break other packages using the cache if that directory is
+    # removed, or if the value is a relative name.
+    MKDIR_P="$ac_install_sh -d"
+  fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
+printf "%s\n" "$MKDIR_P" >&6; }
+
+for ac_prog in gawk mawk nawk awk
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_AWK+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$AWK"; then
+  ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_AWK="$ac_prog"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
+printf "%s\n" "$AWK" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+  test -n "$AWK" && break
+done
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+set x ${MAKE-make}
+ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+if eval test \${ac_cv_prog_make_${ac_make}_set+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat >conftest.make <<\_ACEOF
+SHELL = /bin/sh
+all:
+	@echo '@@@%%%=$(MAKE)=@@@%%%'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
+case `${MAKE-make} -f conftest.make 2>/dev/null` in
+  *@@@%%%=?*=@@@%%%*)
+    eval ac_cv_prog_make_${ac_make}_set=yes;;
+  *)
+    eval ac_cv_prog_make_${ac_make}_set=no;;
+esac
+rm -f conftest.make
+fi
+if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+printf "%s\n" "yes" >&6; }
+  SET_MAKE=
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+  SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+  am__leading_dot=.
+else
+  am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+
+# Check whether --enable-silent-rules was given.
+if test ${enable_silent_rules+y}
+then :
+  enableval=$enable_silent_rules;
+fi
+
+case $enable_silent_rules in # (((
+  yes) AM_DEFAULT_VERBOSITY=0;;
+   no) AM_DEFAULT_VERBOSITY=1;;
+    *) AM_DEFAULT_VERBOSITY=1;;
+esac
+am_make=${MAKE-make}
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
+printf %s "checking whether $am_make supports nested variables... " >&6; }
+if test ${am_cv_make_support_nested_variables+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if printf "%s\n" 'TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+	@$(TRUE)
+.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then
+  am_cv_make_support_nested_variables=yes
+else
+  am_cv_make_support_nested_variables=no
+fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
+printf "%s\n" "$am_cv_make_support_nested_variables" >&6; }
+if test $am_cv_make_support_nested_variables = yes; then
+    AM_V='$(V)'
+  AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+  AM_V=$AM_DEFAULT_VERBOSITY
+  AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AM_BACKSLASH='\'
+
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+  # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+  # is not polluted with repeated "-I."
+  am__isrc=' -I$(srcdir)'
+  # test to see if srcdir already configured
+  if test -f $srcdir/config.status; then
+    as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5
+  fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+  if (cygpath --version) >/dev/null 2>/dev/null; then
+    CYGPATH_W='cygpath -w'
+  else
+    CYGPATH_W=echo
+  fi
+fi
+
+
+# Define the identity of the package.
+ PACKAGE='applgrid'
+ VERSION='1.6.36'
+
+
+printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h
+
+
+printf "%s\n" "#define VERSION \"$VERSION\"" >>confdefs.h
+
+# Some tools Automake needs.
+
+ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
+
+
+AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
+
+
+AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
+
+
+AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
+
+
+MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+
+# For better backward compatibility.  To be removed once Automake 1.9.x
+# dies out for good.  For more background, see:
+# <https://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <https://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+mkdir_p='$(MKDIR_P)'
+
+# We need awk for the "check" target (and possibly the TAP driver).  The
+# system "awk" is bad on some platforms.
+# Always define AMTAR for backward compatibility.  Yes, it's still used
+# in the wild :-(  We should find a proper way to deprecate it ...
+AMTAR='$${TAR-tar}'
+
+
+# We'll loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar  pax cpio none'
+
+am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
+
+
+
+
+
+# Variables for tags utilities; see am/tags.am
+if test -z "$CTAGS"; then
+  CTAGS=ctags
+fi
+
+if test -z "$ETAGS"; then
+  ETAGS=etags
+fi
+
+if test -z "$CSCOPE"; then
+  CSCOPE=cscope
+fi
+
+
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes.  So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+  cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present.  This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message.  This
+can help us improve future automake versions.
+
+END
+  if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+    echo 'Configuration will proceed anyway, since you have set the' >&2
+    echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+    echo >&2
+  else
+    cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <https://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+    as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5
+  fi
+fi
+
+ac_config_headers="$ac_config_headers amconfig.h:amconfig.in"
+
+#AC_PROG_FC
+#AC_CHECK_PROGS(FC, [gfortran g77 f77 fort77 f90], no)
+#AC_PROG_FC
+#AC_LANG_PUSH(Fortran)
+#AC_FC_SRCEXT(f90)
+#AC_FC_LIBRARY_LDFLAGS
+#AC_FC_WRAPPERS
+#AC_FC_FREEFORM
+#AC_FC_DUMMY_MAIN
+#AC_LANG_POP(Fortran)
+
+
+
+
+
+
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_CC+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CC="${ac_tool_prefix}gcc"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+printf "%s\n" "$CC" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+  ac_ct_CC=$CC
+  # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_CC+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_CC"; then
+  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_CC="gcc"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+printf "%s\n" "$ac_ct_CC" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
+else
+  CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+          if test -n "$ac_tool_prefix"; then
+    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_CC+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CC="${ac_tool_prefix}cc"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+printf "%s\n" "$CC" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+  fi
+fi
+if test -z "$CC"; then
+  # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_CC+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+  ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+       ac_prog_rejected=yes
+       continue
+     fi
+    ac_cv_prog_CC="cc"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+  # We found a bogon in the path, so make sure we never use it.
+  set dummy $ac_cv_prog_CC
+  shift
+  if test $# != 0; then
+    # We chose a different compiler from the bogus one.
+    # However, it has the same basename, so the bogon will be chosen
+    # first if we set CC to just the basename; use the full file name.
+    shift
+    ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@"
+  fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+printf "%s\n" "$CC" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+  if test -n "$ac_tool_prefix"; then
+  for ac_prog in cl.exe
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_CC+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+printf "%s\n" "$CC" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+    test -n "$CC" && break
+  done
+fi
+if test -z "$CC"; then
+  ac_ct_CC=$CC
+  for ac_prog in cl.exe
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_CC+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_CC"; then
+  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_CC="$ac_prog"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+printf "%s\n" "$ac_ct_CC" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_CC" && break
+done
+
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
+fi
+
+fi
+if test -z "$CC"; then
+  if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args.
+set dummy ${ac_tool_prefix}clang; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_CC+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CC="${ac_tool_prefix}clang"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+printf "%s\n" "$CC" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+  ac_ct_CC=$CC
+  # Extract the first word of "clang", so it can be a program name with args.
+set dummy clang; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_CC+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_CC"; then
+  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_CC="clang"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+printf "%s\n" "$ac_ct_CC" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
+else
+  CC="$ac_cv_prog_CC"
+fi
+
+fi
+
+
+test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
+
+# Provide some information about the compiler.
+printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion -version; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+printf %s "checking whether the C compiler works... " >&6; }
+ac_link_default=`printf "%s\n" "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+  esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_link_default") 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+then :
+  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile.  We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+  test -f "$ac_file" || continue
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+	;;
+    [ab].out )
+	# We found the default executable, but exeext='' is most
+	# certainly right.
+	break;;
+    *.* )
+	if test ${ac_cv_exeext+y} && test "$ac_cv_exeext" != no;
+	then :; else
+	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+	fi
+	# We set ac_cv_exeext here because the later test for it is not
+	# safe: cross compilers may not add the suffix if given an `-o'
+	# argument, so we may need to know it at that point already.
+	# Even if this section looks crufty: it has the advantage of
+	# actually working.
+	break;;
+    * )
+	break;;
+  esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else $as_nop
+  ac_file=''
+fi
+if test -z "$ac_file"
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+printf "%s\n" "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5; }
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+printf "%s\n" "yes" >&6; }
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+printf %s "checking for C compiler default output file name... " >&6; }
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+printf "%s\n" "$ac_file" >&6; }
+ac_exeext=$ac_cv_exeext
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+printf %s "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+then :
+  # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+  test -f "$ac_file" || continue
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+    *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+	  break;;
+    * ) break;;
+  esac
+done
+else $as_nop
+  { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest conftest$ac_cv_exeext
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+printf "%s\n" "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdio.h>
+int
+main (void)
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+  ;
+  return 0;
+}
+_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run.  If not, either
+# the compiler is broken, or we cross compile.
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+printf %s "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+  { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+  if { ac_try='./conftest$ac_cv_exeext'
+  { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then
+    cross_compiling=no
+  else
+    if test "$cross_compiling" = maybe; then
+	cross_compiling=yes
+    else
+	{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details" "$LINENO" 5; }
+    fi
+  fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+printf "%s\n" "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+printf %s "checking for suffix of object files... " >&6; }
+if test ${ac_cv_objext+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+then :
+  for ac_file in conftest.o conftest.obj conftest.*; do
+  test -f "$ac_file" || continue;
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+    *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+       break;;
+  esac
+done
+else $as_nop
+  printf "%s\n" "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+printf "%s\n" "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5
+printf %s "checking whether the compiler supports GNU C... " >&6; }
+if test ${ac_cv_c_compiler_gnu+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+#ifndef __GNUC__
+       choke me
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"
+then :
+  ac_compiler_gnu=yes
+else $as_nop
+  ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; }
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+if test $ac_compiler_gnu = yes; then
+  GCC=yes
+else
+  GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+y}
+ac_save_CFLAGS=$CFLAGS
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+printf %s "checking whether $CC accepts -g... " >&6; }
+if test ${ac_cv_prog_cc_g+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_save_c_werror_flag=$ac_c_werror_flag
+   ac_c_werror_flag=yes
+   ac_cv_prog_cc_g=no
+   CFLAGS="-g"
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"
+then :
+  ac_cv_prog_cc_g=yes
+else $as_nop
+  CFLAGS=""
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"
+then :
+
+else $as_nop
+  ac_c_werror_flag=$ac_save_c_werror_flag
+	 CFLAGS="-g"
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"
+then :
+  ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+   ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+printf "%s\n" "$ac_cv_prog_cc_g" >&6; }
+if test $ac_test_CFLAGS; then
+  CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+  if test "$GCC" = yes; then
+    CFLAGS="-g -O2"
+  else
+    CFLAGS="-g"
+  fi
+else
+  if test "$GCC" = yes; then
+    CFLAGS="-O2"
+  else
+    CFLAGS=
+  fi
+fi
+ac_prog_cc_stdc=no
+if test x$ac_prog_cc_stdc = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5
+printf %s "checking for $CC option to enable C11 features... " >&6; }
+if test ${ac_cv_prog_cc_c11+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_cv_prog_cc_c11=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$ac_c_conftest_c11_program
+_ACEOF
+for ac_arg in '' -std=gnu11
+do
+  CC="$ac_save_CC $ac_arg"
+  if ac_fn_c_try_compile "$LINENO"
+then :
+  ac_cv_prog_cc_c11=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam
+  test "x$ac_cv_prog_cc_c11" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+fi
+
+if test "x$ac_cv_prog_cc_c11" = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+printf "%s\n" "unsupported" >&6; }
+else $as_nop
+  if test "x$ac_cv_prog_cc_c11" = x
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+printf "%s\n" "none needed" >&6; }
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5
+printf "%s\n" "$ac_cv_prog_cc_c11" >&6; }
+     CC="$CC $ac_cv_prog_cc_c11"
+fi
+  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11
+  ac_prog_cc_stdc=c11
+fi
+fi
+if test x$ac_prog_cc_stdc = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5
+printf %s "checking for $CC option to enable C99 features... " >&6; }
+if test ${ac_cv_prog_cc_c99+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_cv_prog_cc_c99=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$ac_c_conftest_c99_program
+_ACEOF
+for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99=
+do
+  CC="$ac_save_CC $ac_arg"
+  if ac_fn_c_try_compile "$LINENO"
+then :
+  ac_cv_prog_cc_c99=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam
+  test "x$ac_cv_prog_cc_c99" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+fi
+
+if test "x$ac_cv_prog_cc_c99" = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+printf "%s\n" "unsupported" >&6; }
+else $as_nop
+  if test "x$ac_cv_prog_cc_c99" = x
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+printf "%s\n" "none needed" >&6; }
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5
+printf "%s\n" "$ac_cv_prog_cc_c99" >&6; }
+     CC="$CC $ac_cv_prog_cc_c99"
+fi
+  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99
+  ac_prog_cc_stdc=c99
+fi
+fi
+if test x$ac_prog_cc_stdc = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5
+printf %s "checking for $CC option to enable C89 features... " >&6; }
+if test ${ac_cv_prog_cc_c89+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$ac_c_conftest_c89_program
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+  CC="$ac_save_CC $ac_arg"
+  if ac_fn_c_try_compile "$LINENO"
+then :
+  ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam
+  test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+fi
+
+if test "x$ac_cv_prog_cc_c89" = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+printf "%s\n" "unsupported" >&6; }
+else $as_nop
+  if test "x$ac_cv_prog_cc_c89" = x
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+printf "%s\n" "none needed" >&6; }
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+printf "%s\n" "$ac_cv_prog_cc_c89" >&6; }
+     CC="$CC $ac_cv_prog_cc_c89"
+fi
+  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89
+  ac_prog_cc_stdc=c89
+fi
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+  ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
+printf %s "checking whether $CC understands -c and -o together... " >&6; }
+if test ${am_cv_prog_cc_c_o+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+  # Make sure it works both with $CC and with simple cc.
+  # Following AC_PROG_CC_C_O, we do the test twice because some
+  # compilers refuse to overwrite an existing .o file with -o,
+  # though they will create one.
+  am_cv_prog_cc_c_o=yes
+  for am_i in 1 2; do
+    if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5
+   ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5
+   ac_status=$?
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   (exit $ac_status); } \
+         && test -f conftest2.$ac_objext; then
+      : OK
+    else
+      am_cv_prog_cc_c_o=no
+      break
+    fi
+  done
+  rm -f core conftest*
+  unset am_i
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
+printf "%s\n" "$am_cv_prog_cc_c_o" >&6; }
+if test "$am_cv_prog_cc_c_o" != yes; then
+   # Losing compiler, so override with the script.
+   # FIXME: It is wrong to rewrite CC.
+   # But if we don't then we get into trouble of one sort or another.
+   # A longer-term fix would be to have automake use am__CC in this case,
+   # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+   CC="$am_aux_dir/compile $CC"
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+DEPDIR="${am__leading_dot}deps"
+
+ac_config_commands="$ac_config_commands depfiles"
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5
+printf %s "checking whether ${MAKE-make} supports the include directive... " >&6; }
+cat > confinc.mk << 'END'
+am__doit:
+	@echo this is the am__doit target >confinc.out
+.PHONY: am__doit
+END
+am__include="#"
+am__quote=
+# BSD make does it like this.
+echo '.include "confinc.mk" # ignored' > confmf.BSD
+# Other make implementations (GNU, Solaris 10, AIX) do it like this.
+echo 'include confinc.mk # ignored' > confmf.GNU
+_am_result=no
+for s in GNU BSD; do
+  { echo "$as_me:$LINENO: ${MAKE-make} -f confmf.$s && cat confinc.out" >&5
+   (${MAKE-make} -f confmf.$s && cat confinc.out) >&5 2>&5
+   ac_status=$?
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   (exit $ac_status); }
+  case $?:`cat confinc.out 2>/dev/null` in #(
+  '0:this is the am__doit target') :
+    case $s in #(
+  BSD) :
+    am__include='.include' am__quote='"' ;; #(
+  *) :
+    am__include='include' am__quote='' ;;
+esac ;; #(
+  *) :
+     ;;
+esac
+  if test "$am__include" != "#"; then
+    _am_result="yes ($s style)"
+    break
+  fi
+done
+rm -f confinc.* confmf.*
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5
+printf "%s\n" "${_am_result}" >&6; }
+
+# Check whether --enable-dependency-tracking was given.
+if test ${enable_dependency_tracking+y}
+then :
+  enableval=$enable_dependency_tracking;
+fi
+
+if test "x$enable_dependency_tracking" != xno; then
+  am_depcomp="$ac_aux_dir/depcomp"
+  AMDEPBACKSLASH='\'
+  am__nodep='_no'
+fi
+ if test "x$enable_dependency_tracking" != xno; then
+  AMDEP_TRUE=
+  AMDEP_FALSE='#'
+else
+  AMDEP_TRUE='#'
+  AMDEP_FALSE=
+fi
+
+
+
+depcc="$CC"   am_compiler_list=
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+printf %s "checking dependency style of $depcc... " >&6; }
+if test ${am_cv_CC_dependencies_compiler_type+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+  # We make a subdir and do the tests there.  Otherwise we can end up
+  # making bogus files that we don't know about and never remove.  For
+  # instance it was reported that on HP-UX the gcc test will end up
+  # making a dummy file named 'D' -- because '-MD' means "put the output
+  # in D".
+  rm -rf conftest.dir
+  mkdir conftest.dir
+  # Copy depcomp to subdir because otherwise we won't find it if we're
+  # using a relative directory.
+  cp "$am_depcomp" conftest.dir
+  cd conftest.dir
+  # We will build objects and dependencies in a subdirectory because
+  # it helps to detect inapplicable dependency modes.  For instance
+  # both Tru64's cc and ICC support -MD to output dependencies as a
+  # side effect of compilation, but ICC will put the dependencies in
+  # the current directory while Tru64 will put them in the object
+  # directory.
+  mkdir sub
+
+  am_cv_CC_dependencies_compiler_type=none
+  if test "$am_compiler_list" = ""; then
+     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+  fi
+  am__universal=false
+  case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac
+
+  for depmode in $am_compiler_list; do
+    # Setup a source with many dependencies, because some compilers
+    # like to wrap large dependency lists on column 80 (with \), and
+    # we should not choose a depcomp mode which is confused by this.
+    #
+    # We need to recreate these files for each test, as the compiler may
+    # overwrite some of them when testing with obscure command lines.
+    # This happens at least with the AIX C compiler.
+    : > sub/conftest.c
+    for i in 1 2 3 4 5 6; do
+      echo '#include "conftst'$i'.h"' >> sub/conftest.c
+      # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+      # Solaris 10 /bin/sh.
+      echo '/* dummy */' > sub/conftst$i.h
+    done
+    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+    # We check with '-c' and '-o' for the sake of the "dashmstdout"
+    # mode.  It turns out that the SunPro C++ compiler does not properly
+    # handle '-M -o', and we need to detect this.  Also, some Intel
+    # versions had trouble with output in subdirs.
+    am__obj=sub/conftest.${OBJEXT-o}
+    am__minus_obj="-o $am__obj"
+    case $depmode in
+    gcc)
+      # This depmode causes a compiler race in universal mode.
+      test "$am__universal" = false || continue
+      ;;
+    nosideeffect)
+      # After this tag, mechanisms are not by side-effect, so they'll
+      # only be used when explicitly requested.
+      if test "x$enable_dependency_tracking" = xyes; then
+	continue
+      else
+	break
+      fi
+      ;;
+    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+      # This compiler won't grok '-c -o', but also, the minuso test has
+      # not run yet.  These depmodes are late enough in the game, and
+      # so weak that their functioning should not be impacted.
+      am__obj=conftest.${OBJEXT-o}
+      am__minus_obj=
+      ;;
+    none) break ;;
+    esac
+    if depmode=$depmode \
+       source=sub/conftest.c object=$am__obj \
+       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+         >/dev/null 2>conftest.err &&
+       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+      # icc doesn't choke on unknown options, it will just issue warnings
+      # or remarks (even with -Werror).  So we grep stderr for any message
+      # that says an option was ignored or not supported.
+      # When given -MP, icc 7.0 and 7.1 complain thusly:
+      #   icc: Command line warning: ignoring option '-M'; no argument required
+      # The diagnosis changed in icc 8.0:
+      #   icc: Command line remark: option '-MP' not supported
+      if (grep 'ignoring option' conftest.err ||
+          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+        am_cv_CC_dependencies_compiler_type=$depmode
+        break
+      fi
+    fi
+  done
+
+  cd ..
+  rm -rf conftest.dir
+else
+  am_cv_CC_dependencies_compiler_type=none
+fi
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
+printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; }
+CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+
+ if
+  test "x$enable_dependency_tracking" != xno \
+  && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
+  am__fastdepCC_TRUE=
+  am__fastdepCC_FALSE='#'
+else
+  am__fastdepCC_TRUE='#'
+  am__fastdepCC_FALSE=
+fi
+
+
+
+
+
+
+
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -z "$CXX"; then
+  if test -n "$CCC"; then
+    CXX=$CCC
+  else
+    if test -n "$ac_tool_prefix"; then
+  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$CXX"; then
+  ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+printf "%s\n" "$CXX" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+    test -n "$CXX" && break
+  done
+fi
+if test -z "$CXX"; then
+  ac_ct_CXX=$CXX
+  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_CXX"; then
+  ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_CXX="$ac_prog"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+printf "%s\n" "$ac_ct_CXX" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_CXX" && break
+done
+
+  if test "x$ac_ct_CXX" = x; then
+    CXX="g++"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CXX=$ac_ct_CXX
+  fi
+fi
+
+  fi
+fi
+# Provide some information about the compiler.
+printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+printf "%s\n" "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C++" >&5
+printf %s "checking whether the compiler supports GNU C++... " >&6; }
+if test ${ac_cv_cxx_compiler_gnu+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+#ifndef __GNUC__
+       choke me
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"
+then :
+  ac_compiler_gnu=yes
+else $as_nop
+  ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+printf "%s\n" "$ac_cv_cxx_compiler_gnu" >&6; }
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+if test $ac_compiler_gnu = yes; then
+  GXX=yes
+else
+  GXX=
+fi
+ac_test_CXXFLAGS=${CXXFLAGS+y}
+ac_save_CXXFLAGS=$CXXFLAGS
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+printf %s "checking whether $CXX accepts -g... " >&6; }
+if test ${ac_cv_prog_cxx_g+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+   ac_cxx_werror_flag=yes
+   ac_cv_prog_cxx_g=no
+   CXXFLAGS="-g"
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"
+then :
+  ac_cv_prog_cxx_g=yes
+else $as_nop
+  CXXFLAGS=""
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"
+then :
+
+else $as_nop
+  ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+	 CXXFLAGS="-g"
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"
+then :
+  ac_cv_prog_cxx_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+   ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+printf "%s\n" "$ac_cv_prog_cxx_g" >&6; }
+if test $ac_test_CXXFLAGS; then
+  CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+  if test "$GXX" = yes; then
+    CXXFLAGS="-g -O2"
+  else
+    CXXFLAGS="-g"
+  fi
+else
+  if test "$GXX" = yes; then
+    CXXFLAGS="-O2"
+  else
+    CXXFLAGS=
+  fi
+fi
+ac_prog_cxx_stdcxx=no
+if test x$ac_prog_cxx_stdcxx = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++11 features" >&5
+printf %s "checking for $CXX option to enable C++11 features... " >&6; }
+if test ${ac_cv_prog_cxx_11+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_cv_prog_cxx_11=no
+ac_save_CXX=$CXX
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$ac_cxx_conftest_cxx11_program
+_ACEOF
+for ac_arg in '' -std=gnu++11 -std=gnu++0x -std=c++11 -std=c++0x -qlanglvl=extended0x -AA
+do
+  CXX="$ac_save_CXX $ac_arg"
+  if ac_fn_cxx_try_compile "$LINENO"
+then :
+  ac_cv_prog_cxx_cxx11=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam
+  test "x$ac_cv_prog_cxx_cxx11" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CXX=$ac_save_CXX
+fi
+
+if test "x$ac_cv_prog_cxx_cxx11" = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+printf "%s\n" "unsupported" >&6; }
+else $as_nop
+  if test "x$ac_cv_prog_cxx_cxx11" = x
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+printf "%s\n" "none needed" >&6; }
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx11" >&5
+printf "%s\n" "$ac_cv_prog_cxx_cxx11" >&6; }
+     CXX="$CXX $ac_cv_prog_cxx_cxx11"
+fi
+  ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx11
+  ac_prog_cxx_stdcxx=cxx11
+fi
+fi
+if test x$ac_prog_cxx_stdcxx = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++98 features" >&5
+printf %s "checking for $CXX option to enable C++98 features... " >&6; }
+if test ${ac_cv_prog_cxx_98+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_cv_prog_cxx_98=no
+ac_save_CXX=$CXX
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$ac_cxx_conftest_cxx98_program
+_ACEOF
+for ac_arg in '' -std=gnu++98 -std=c++98 -qlanglvl=extended -AA
+do
+  CXX="$ac_save_CXX $ac_arg"
+  if ac_fn_cxx_try_compile "$LINENO"
+then :
+  ac_cv_prog_cxx_cxx98=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam
+  test "x$ac_cv_prog_cxx_cxx98" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CXX=$ac_save_CXX
+fi
+
+if test "x$ac_cv_prog_cxx_cxx98" = xno
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+printf "%s\n" "unsupported" >&6; }
+else $as_nop
+  if test "x$ac_cv_prog_cxx_cxx98" = x
+then :
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+printf "%s\n" "none needed" >&6; }
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx98" >&5
+printf "%s\n" "$ac_cv_prog_cxx_cxx98" >&6; }
+     CXX="$CXX $ac_cv_prog_cxx_cxx98"
+fi
+  ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx98
+  ac_prog_cxx_stdcxx=cxx98
+fi
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+depcc="$CXX"  am_compiler_list=
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+printf %s "checking dependency style of $depcc... " >&6; }
+if test ${am_cv_CXX_dependencies_compiler_type+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+  # We make a subdir and do the tests there.  Otherwise we can end up
+  # making bogus files that we don't know about and never remove.  For
+  # instance it was reported that on HP-UX the gcc test will end up
+  # making a dummy file named 'D' -- because '-MD' means "put the output
+  # in D".
+  rm -rf conftest.dir
+  mkdir conftest.dir
+  # Copy depcomp to subdir because otherwise we won't find it if we're
+  # using a relative directory.
+  cp "$am_depcomp" conftest.dir
+  cd conftest.dir
+  # We will build objects and dependencies in a subdirectory because
+  # it helps to detect inapplicable dependency modes.  For instance
+  # both Tru64's cc and ICC support -MD to output dependencies as a
+  # side effect of compilation, but ICC will put the dependencies in
+  # the current directory while Tru64 will put them in the object
+  # directory.
+  mkdir sub
+
+  am_cv_CXX_dependencies_compiler_type=none
+  if test "$am_compiler_list" = ""; then
+     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+  fi
+  am__universal=false
+  case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac
+
+  for depmode in $am_compiler_list; do
+    # Setup a source with many dependencies, because some compilers
+    # like to wrap large dependency lists on column 80 (with \), and
+    # we should not choose a depcomp mode which is confused by this.
+    #
+    # We need to recreate these files for each test, as the compiler may
+    # overwrite some of them when testing with obscure command lines.
+    # This happens at least with the AIX C compiler.
+    : > sub/conftest.c
+    for i in 1 2 3 4 5 6; do
+      echo '#include "conftst'$i'.h"' >> sub/conftest.c
+      # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+      # Solaris 10 /bin/sh.
+      echo '/* dummy */' > sub/conftst$i.h
+    done
+    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+    # We check with '-c' and '-o' for the sake of the "dashmstdout"
+    # mode.  It turns out that the SunPro C++ compiler does not properly
+    # handle '-M -o', and we need to detect this.  Also, some Intel
+    # versions had trouble with output in subdirs.
+    am__obj=sub/conftest.${OBJEXT-o}
+    am__minus_obj="-o $am__obj"
+    case $depmode in
+    gcc)
+      # This depmode causes a compiler race in universal mode.
+      test "$am__universal" = false || continue
+      ;;
+    nosideeffect)
+      # After this tag, mechanisms are not by side-effect, so they'll
+      # only be used when explicitly requested.
+      if test "x$enable_dependency_tracking" = xyes; then
+	continue
+      else
+	break
+      fi
+      ;;
+    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+      # This compiler won't grok '-c -o', but also, the minuso test has
+      # not run yet.  These depmodes are late enough in the game, and
+      # so weak that their functioning should not be impacted.
+      am__obj=conftest.${OBJEXT-o}
+      am__minus_obj=
+      ;;
+    none) break ;;
+    esac
+    if depmode=$depmode \
+       source=sub/conftest.c object=$am__obj \
+       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+         >/dev/null 2>conftest.err &&
+       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+      # icc doesn't choke on unknown options, it will just issue warnings
+      # or remarks (even with -Werror).  So we grep stderr for any message
+      # that says an option was ignored or not supported.
+      # When given -MP, icc 7.0 and 7.1 complain thusly:
+      #   icc: Command line warning: ignoring option '-M'; no argument required
+      # The diagnosis changed in icc 8.0:
+      #   icc: Command line remark: option '-MP' not supported
+      if (grep 'ignoring option' conftest.err ||
+          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+        am_cv_CXX_dependencies_compiler_type=$depmode
+        break
+      fi
+    fi
+  done
+
+  cd ..
+  rm -rf conftest.dir
+else
+  am_cv_CXX_dependencies_compiler_type=none
+fi
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
+printf "%s\n" "$am_cv_CXX_dependencies_compiler_type" >&6; }
+CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
+
+ if
+  test "x$enable_dependency_tracking" != xno \
+  && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then
+  am__fastdepCXX_TRUE=
+  am__fastdepCXX_FALSE='#'
+else
+  am__fastdepCXX_TRUE='#'
+  am__fastdepCXX_FALSE=
+fi
+
+
+case `pwd` in
+  *\ * | *\	*)
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5
+printf "%s\n" "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;;
+esac
+
+
+
+macro_version='2.4.6'
+macro_revision='2.4.6'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ltmain=$ac_aux_dir/ltmain.sh
+
+
+
+  # Make sure we can run config.sub.
+$SHELL "${ac_aux_dir}config.sub" sun4 >/dev/null 2>&1 ||
+  as_fn_error $? "cannot run $SHELL ${ac_aux_dir}config.sub" "$LINENO" 5
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
+printf %s "checking build system type... " >&6; }
+if test ${ac_cv_build+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_build_alias=$build_alias
+test "x$ac_build_alias" = x &&
+  ac_build_alias=`$SHELL "${ac_aux_dir}config.guess"`
+test "x$ac_build_alias" = x &&
+  as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
+ac_cv_build=`$SHELL "${ac_aux_dir}config.sub" $ac_build_alias` ||
+  as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $ac_build_alias failed" "$LINENO" 5
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
+printf "%s\n" "$ac_cv_build" >&6; }
+case $ac_cv_build in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
+esac
+build=$ac_cv_build
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_build
+shift
+build_cpu=$1
+build_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+build_os=$*
+IFS=$ac_save_IFS
+case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
+printf %s "checking host system type... " >&6; }
+if test ${ac_cv_host+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test "x$host_alias" = x; then
+  ac_cv_host=$ac_cv_build
+else
+  ac_cv_host=`$SHELL "${ac_aux_dir}config.sub" $host_alias` ||
+    as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $host_alias failed" "$LINENO" 5
+fi
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
+printf "%s\n" "$ac_cv_host" >&6; }
+case $ac_cv_host in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
+esac
+host=$ac_cv_host
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_host
+shift
+host_cpu=$1
+host_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+host_os=$*
+IFS=$ac_save_IFS
+case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+
+
+# Backslashify metacharacters that are still active within
+# double-quoted strings.
+sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+
+ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5
+printf %s "checking how to print strings... " >&6; }
+# Test print first, because it will be a builtin if present.
+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
+   test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+  ECHO='print -r --'
+elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+  ECHO='printf %s\n'
+else
+  # Use this function as a fallback that always works.
+  func_fallback_echo ()
+  {
+    eval 'cat <<_LTECHO_EOF
+$1
+_LTECHO_EOF'
+  }
+  ECHO='func_fallback_echo'
+fi
+
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+    $ECHO ""
+}
+
+case $ECHO in
+  printf*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: printf" >&5
+printf "%s\n" "printf" >&6; } ;;
+  print*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: print -r" >&5
+printf "%s\n" "print -r" >&6; } ;;
+  *) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: cat" >&5
+printf "%s\n" "cat" >&6; } ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5
+printf %s "checking for a sed that does not truncate output... " >&6; }
+if test ${ac_cv_path_SED+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+            ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
+     for ac_i in 1 2 3 4 5 6 7; do
+       ac_script="$ac_script$as_nl$ac_script"
+     done
+     echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed
+     { ac_script=; unset ac_script;}
+     if test -z "$SED"; then
+  ac_path_SED_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_prog in sed gsed
+   do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_SED="$as_dir$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_SED" || continue
+# Check for GNU ac_path_SED and select it if it is found.
+  # Check for GNU $ac_path_SED
+case `"$ac_path_SED" --version 2>&1` in
+*GNU*)
+  ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;;
+*)
+  ac_count=0
+  printf %s 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    printf "%s\n" '' >> "conftest.nl"
+    "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_SED_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_SED="$ac_path_SED"
+      ac_path_SED_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_SED_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_SED"; then
+    as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5
+  fi
+else
+  ac_cv_path_SED=$SED
+fi
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5
+printf "%s\n" "$ac_cv_path_SED" >&6; }
+ SED="$ac_cv_path_SED"
+  rm -f conftest.sed
+
+test -z "$SED" && SED=sed
+Xsed="$SED -e 1s/^X//"
+
+
+
+
+
+
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+printf %s "checking for grep that handles long lines and -e... " >&6; }
+if test ${ac_cv_path_GREP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -z "$GREP"; then
+  ac_path_GREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_prog in grep ggrep
+   do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_GREP="$as_dir$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_GREP" || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+  # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+  ac_count=0
+  printf %s 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    printf "%s\n" 'GREP' >> "conftest.nl"
+    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_GREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_GREP="$ac_path_GREP"
+      ac_path_GREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_GREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_GREP"; then
+    as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+printf "%s\n" "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+printf %s "checking for egrep... " >&6; }
+if test ${ac_cv_path_EGREP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+   then ac_cv_path_EGREP="$GREP -E"
+   else
+     if test -z "$EGREP"; then
+  ac_path_EGREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_prog in egrep
+   do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_EGREP="$as_dir$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_EGREP" || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+  # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+  ac_count=0
+  printf %s 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    printf "%s\n" 'EGREP' >> "conftest.nl"
+    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_EGREP="$ac_path_EGREP"
+      ac_path_EGREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_EGREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_EGREP"; then
+    as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_EGREP=$EGREP
+fi
+
+   fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+printf "%s\n" "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5
+printf %s "checking for fgrep... " >&6; }
+if test ${ac_cv_path_FGREP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1
+   then ac_cv_path_FGREP="$GREP -F"
+   else
+     if test -z "$FGREP"; then
+  ac_path_FGREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_prog in fgrep
+   do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_FGREP="$as_dir$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_FGREP" || continue
+# Check for GNU ac_path_FGREP and select it if it is found.
+  # Check for GNU $ac_path_FGREP
+case `"$ac_path_FGREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;;
+*)
+  ac_count=0
+  printf %s 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    printf "%s\n" 'FGREP' >> "conftest.nl"
+    "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_FGREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_FGREP="$ac_path_FGREP"
+      ac_path_FGREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_FGREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_FGREP"; then
+    as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_FGREP=$FGREP
+fi
+
+   fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5
+printf "%s\n" "$ac_cv_path_FGREP" >&6; }
+ FGREP="$ac_cv_path_FGREP"
+
+
+test -z "$GREP" && GREP=grep
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-gnu-ld was given.
+if test ${with_gnu_ld+y}
+then :
+  withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes
+else $as_nop
+  with_gnu_ld=no
+fi
+
+ac_prog=ld
+if test yes = "$GCC"; then
+  # Check if gcc -print-prog-name=ld gives a path.
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
+printf %s "checking for ld used by $CC... " >&6; }
+  case $host in
+  *-*-mingw*)
+    # gcc leaves a trailing carriage return, which upsets mingw
+    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+  *)
+    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+  esac
+  case $ac_prog in
+    # Accept absolute paths.
+    [\\/]* | ?:[\\/]*)
+      re_direlt='/[^/][^/]*/\.\./'
+      # Canonicalize the pathname of ld
+      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+      done
+      test -z "$LD" && LD=$ac_prog
+      ;;
+  "")
+    # If it fails, then pretend we aren't using GCC.
+    ac_prog=ld
+    ;;
+  *)
+    # If it is relative, then search for the first ld in PATH.
+    with_gnu_ld=unknown
+    ;;
+  esac
+elif test yes = "$with_gnu_ld"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
+printf %s "checking for GNU ld... " >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
+printf %s "checking for non-GNU ld... " >&6; }
+fi
+if test ${lt_cv_path_LD+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -z "$LD"; then
+  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+  for ac_dir in $PATH; do
+    IFS=$lt_save_ifs
+    test -z "$ac_dir" && ac_dir=.
+    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+      lt_cv_path_LD=$ac_dir/$ac_prog
+      # Check to see if the program is GNU ld.  I'd rather use --version,
+      # but apparently some variants of GNU ld only accept -v.
+      # Break only if it was the GNU/non-GNU ld that we prefer.
+      case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+      *GNU* | *'with BFD'*)
+	test no != "$with_gnu_ld" && break
+	;;
+      *)
+	test yes != "$with_gnu_ld" && break
+	;;
+      esac
+    fi
+  done
+  IFS=$lt_save_ifs
+else
+  lt_cv_path_LD=$LD # Let the user override the test with a path.
+fi
+fi
+
+LD=$lt_cv_path_LD
+if test -n "$LD"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
+printf "%s\n" "$LD" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
+printf %s "checking if the linker ($LD) is GNU ld... " >&6; }
+if test ${lt_cv_prog_gnu_ld+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  # I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+  lt_cv_prog_gnu_ld=yes
+  ;;
+*)
+  lt_cv_prog_gnu_ld=no
+  ;;
+esac
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5
+printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; }
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+
+
+
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5
+printf %s "checking for BSD- or MS-compatible name lister (nm)... " >&6; }
+if test ${lt_cv_path_NM+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$NM"; then
+  # Let the user override the test.
+  lt_cv_path_NM=$NM
+else
+  lt_nm_to_check=${ac_tool_prefix}nm
+  if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+    lt_nm_to_check="$lt_nm_to_check nm"
+  fi
+  for lt_tmp_nm in $lt_nm_to_check; do
+    lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+    for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+      IFS=$lt_save_ifs
+      test -z "$ac_dir" && ac_dir=.
+      tmp_nm=$ac_dir/$lt_tmp_nm
+      if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then
+	# Check to see if the nm accepts a BSD-compat flag.
+	# Adding the 'sed 1q' prevents false positives on HP-UX, which says:
+	#   nm: unknown option "B" ignored
+	# Tru64's nm complains that /dev/null is an invalid object file
+	# MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty
+	case $build_os in
+	mingw*) lt_bad_file=conftest.nm/nofile ;;
+	*) lt_bad_file=/dev/null ;;
+	esac
+	case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in
+	*$lt_bad_file* | *'Invalid file or object type'*)
+	  lt_cv_path_NM="$tmp_nm -B"
+	  break 2
+	  ;;
+	*)
+	  case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in
+	  */dev/null*)
+	    lt_cv_path_NM="$tmp_nm -p"
+	    break 2
+	    ;;
+	  *)
+	    lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+	    continue # so that we can try to find one that supports BSD flags
+	    ;;
+	  esac
+	  ;;
+	esac
+      fi
+    done
+    IFS=$lt_save_ifs
+  done
+  : ${lt_cv_path_NM=no}
+fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5
+printf "%s\n" "$lt_cv_path_NM" >&6; }
+if test no != "$lt_cv_path_NM"; then
+  NM=$lt_cv_path_NM
+else
+  # Didn't find any BSD compatible name lister, look for dumpbin.
+  if test -n "$DUMPBIN"; then :
+    # Let the user override the test.
+  else
+    if test -n "$ac_tool_prefix"; then
+  for ac_prog in dumpbin "link -dump"
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_DUMPBIN+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$DUMPBIN"; then
+  ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+DUMPBIN=$ac_cv_prog_DUMPBIN
+if test -n "$DUMPBIN"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5
+printf "%s\n" "$DUMPBIN" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+    test -n "$DUMPBIN" && break
+  done
+fi
+if test -z "$DUMPBIN"; then
+  ac_ct_DUMPBIN=$DUMPBIN
+  for ac_prog in dumpbin "link -dump"
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_DUMPBIN+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_DUMPBIN"; then
+  ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_DUMPBIN="$ac_prog"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN
+if test -n "$ac_ct_DUMPBIN"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5
+printf "%s\n" "$ac_ct_DUMPBIN" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_DUMPBIN" && break
+done
+
+  if test "x$ac_ct_DUMPBIN" = x; then
+    DUMPBIN=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    DUMPBIN=$ac_ct_DUMPBIN
+  fi
+fi
+
+    case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in
+    *COFF*)
+      DUMPBIN="$DUMPBIN -symbols -headers"
+      ;;
+    *)
+      DUMPBIN=:
+      ;;
+    esac
+  fi
+
+  if test : != "$DUMPBIN"; then
+    NM=$DUMPBIN
+  fi
+fi
+test -z "$NM" && NM=nm
+
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5
+printf %s "checking the name lister ($NM) interface... " >&6; }
+if test ${lt_cv_nm_interface+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_nm_interface="BSD nm"
+  echo "int some_variable = 0;" > conftest.$ac_ext
+  (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5)
+  (eval "$ac_compile" 2>conftest.err)
+  cat conftest.err >&5
+  (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+  (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
+  cat conftest.err >&5
+  (eval echo "\"\$as_me:$LINENO: output\"" >&5)
+  cat conftest.out >&5
+  if $GREP 'External.*some_variable' conftest.out > /dev/null; then
+    lt_cv_nm_interface="MS dumpbin"
+  fi
+  rm -f conftest*
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5
+printf "%s\n" "$lt_cv_nm_interface" >&6; }
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5
+printf %s "checking whether ln -s works... " >&6; }
+LN_S=$as_ln_s
+if test "$LN_S" = "ln -s"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+printf "%s\n" "yes" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5
+printf "%s\n" "no, using $LN_S" >&6; }
+fi
+
+# find the maximum length of command line arguments
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5
+printf %s "checking the maximum length of command line arguments... " >&6; }
+if test ${lt_cv_sys_max_cmd_len+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+    i=0
+  teststring=ABCD
+
+  case $build_os in
+  msdosdjgpp*)
+    # On DJGPP, this test can blow up pretty badly due to problems in libc
+    # (any single argument exceeding 2000 bytes causes a buffer overrun
+    # during glob expansion).  Even if it were fixed, the result of this
+    # check would be larger than it should be.
+    lt_cv_sys_max_cmd_len=12288;    # 12K is about right
+    ;;
+
+  gnu*)
+    # Under GNU Hurd, this test is not required because there is
+    # no limit to the length of command line arguments.
+    # Libtool will interpret -1 as no limit whatsoever
+    lt_cv_sys_max_cmd_len=-1;
+    ;;
+
+  cygwin* | mingw* | cegcc*)
+    # On Win9x/ME, this test blows up -- it succeeds, but takes
+    # about 5 minutes as the teststring grows exponentially.
+    # Worse, since 9x/ME are not pre-emptively multitasking,
+    # you end up with a "frozen" computer, even though with patience
+    # the test eventually succeeds (with a max line length of 256k).
+    # Instead, let's just punt: use the minimum linelength reported by
+    # all of the supported platforms: 8192 (on NT/2K/XP).
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  mint*)
+    # On MiNT this can take a long time and run out of memory.
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  amigaos*)
+    # On AmigaOS with pdksh, this test takes hours, literally.
+    # So we just punt and use a minimum line length of 8192.
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*)
+    # This has been around since 386BSD, at least.  Likely further.
+    if test -x /sbin/sysctl; then
+      lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+    elif test -x /usr/sbin/sysctl; then
+      lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
+    else
+      lt_cv_sys_max_cmd_len=65536	# usable default for all BSDs
+    fi
+    # And add a safety zone
+    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+    ;;
+
+  interix*)
+    # We know the value 262144 and hardcode it with a safety zone (like BSD)
+    lt_cv_sys_max_cmd_len=196608
+    ;;
+
+  os2*)
+    # The test takes a long time on OS/2.
+    lt_cv_sys_max_cmd_len=8192
+    ;;
+
+  osf*)
+    # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+    # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+    # nice to cause kernel panics so lets avoid the loop below.
+    # First set a reasonable default.
+    lt_cv_sys_max_cmd_len=16384
+    #
+    if test -x /sbin/sysconfig; then
+      case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+        *1*) lt_cv_sys_max_cmd_len=-1 ;;
+      esac
+    fi
+    ;;
+  sco3.2v5*)
+    lt_cv_sys_max_cmd_len=102400
+    ;;
+  sysv5* | sco5v6* | sysv4.2uw2*)
+    kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+    if test -n "$kargmax"; then
+      lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[	 ]//'`
+    else
+      lt_cv_sys_max_cmd_len=32768
+    fi
+    ;;
+  *)
+    lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+    if test -n "$lt_cv_sys_max_cmd_len" && \
+       test undefined != "$lt_cv_sys_max_cmd_len"; then
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+    else
+      # Make teststring a little bigger before we do anything with it.
+      # a 1K string should be a reasonable start.
+      for i in 1 2 3 4 5 6 7 8; do
+        teststring=$teststring$teststring
+      done
+      SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+      # If test is not a shell built-in, we'll probably end up computing a
+      # maximum length that is only half of the actual maximum length, but
+      # we can't tell.
+      while { test X`env echo "$teststring$teststring" 2>/dev/null` \
+	         = "X$teststring$teststring"; } >/dev/null 2>&1 &&
+	      test 17 != "$i" # 1/2 MB should be enough
+      do
+        i=`expr $i + 1`
+        teststring=$teststring$teststring
+      done
+      # Only check the string length outside the loop.
+      lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
+      teststring=
+      # Add a significant safety factor because C++ compilers can tack on
+      # massive amounts of additional arguments before passing them to the
+      # linker.  It appears as though 1/2 is a usable value.
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
+    fi
+    ;;
+  esac
+
+fi
+
+if test -n "$lt_cv_sys_max_cmd_len"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5
+printf "%s\n" "$lt_cv_sys_max_cmd_len" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none" >&5
+printf "%s\n" "none" >&6; }
+fi
+max_cmd_len=$lt_cv_sys_max_cmd_len
+
+
+
+
+
+
+: ${CP="cp -f"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+  lt_unset=unset
+else
+  lt_unset=false
+fi
+
+
+
+
+
+# test EBCDIC or ASCII
+case `echo X|tr X '\101'` in
+ A) # ASCII based system
+    # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+  lt_SP2NL='tr \040 \012'
+  lt_NL2SP='tr \015\012 \040\040'
+  ;;
+ *) # EBCDIC based system
+  lt_SP2NL='tr \100 \n'
+  lt_NL2SP='tr \r\n \100\100'
+  ;;
+esac
+
+
+
+
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5
+printf %s "checking how to convert $build file names to $host format... " >&6; }
+if test ${lt_cv_to_host_file_cmd+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  case $host in
+  *-*-mingw* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+        ;;
+      *-*-cygwin* )
+        lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+        ;;
+      * ) # otherwise, assume *nix
+        lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+        ;;
+    esac
+    ;;
+  *-*-cygwin* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+        ;;
+      *-*-cygwin* )
+        lt_cv_to_host_file_cmd=func_convert_file_noop
+        ;;
+      * ) # otherwise, assume *nix
+        lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+        ;;
+    esac
+    ;;
+  * ) # unhandled hosts (and "normal" native builds)
+    lt_cv_to_host_file_cmd=func_convert_file_noop
+    ;;
+esac
+
+fi
+
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5
+printf "%s\n" "$lt_cv_to_host_file_cmd" >&6; }
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5
+printf %s "checking how to convert $build file names to toolchain format... " >&6; }
+if test ${lt_cv_to_tool_file_cmd+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  #assume ordinary cross tools, or native build.
+lt_cv_to_tool_file_cmd=func_convert_file_noop
+case $host in
+  *-*-mingw* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+        ;;
+    esac
+    ;;
+esac
+
+fi
+
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5
+printf "%s\n" "$lt_cv_to_tool_file_cmd" >&6; }
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5
+printf %s "checking for $LD option to reload object files... " >&6; }
+if test ${lt_cv_ld_reload_flag+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_ld_reload_flag='-r'
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5
+printf "%s\n" "$lt_cv_ld_reload_flag" >&6; }
+reload_flag=$lt_cv_ld_reload_flag
+case $reload_flag in
+"" | " "*) ;;
+*) reload_flag=" $reload_flag" ;;
+esac
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+case $host_os in
+  cygwin* | mingw* | pw32* | cegcc*)
+    if test yes != "$GCC"; then
+      reload_cmds=false
+    fi
+    ;;
+  darwin*)
+    if test yes = "$GCC"; then
+      reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs'
+    else
+      reload_cmds='$LD$reload_flag -o $output$reload_objs'
+    fi
+    ;;
+esac
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args.
+set dummy ${ac_tool_prefix}objdump; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_OBJDUMP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$OBJDUMP"; then
+  ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+OBJDUMP=$ac_cv_prog_OBJDUMP
+if test -n "$OBJDUMP"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5
+printf "%s\n" "$OBJDUMP" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OBJDUMP"; then
+  ac_ct_OBJDUMP=$OBJDUMP
+  # Extract the first word of "objdump", so it can be a program name with args.
+set dummy objdump; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_OBJDUMP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_OBJDUMP"; then
+  ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_OBJDUMP="objdump"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP
+if test -n "$ac_ct_OBJDUMP"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5
+printf "%s\n" "$ac_ct_OBJDUMP" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_OBJDUMP" = x; then
+    OBJDUMP="false"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    OBJDUMP=$ac_ct_OBJDUMP
+  fi
+else
+  OBJDUMP="$ac_cv_prog_OBJDUMP"
+fi
+
+test -z "$OBJDUMP" && OBJDUMP=objdump
+
+
+
+
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5
+printf %s "checking how to recognize dependent libraries... " >&6; }
+if test ${lt_cv_deplibs_check_method+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# 'unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# that responds to the $file_magic_cmd with a given extended regex.
+# If you have 'file' or equivalent on your system and you're not sure
+# whether 'pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix[4-9]*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+beos*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+bsdi[45]*)
+  lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)'
+  lt_cv_file_magic_cmd='/usr/bin/file -L'
+  lt_cv_file_magic_test_file=/shlib/libc.so
+  ;;
+
+cygwin*)
+  # func_win32_libid is a shell function defined in ltmain.sh
+  lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+  lt_cv_file_magic_cmd='func_win32_libid'
+  ;;
+
+mingw* | pw32*)
+  # Base MSYS/MinGW do not provide the 'file' command needed by
+  # func_win32_libid shell function, so use a weaker test based on 'objdump',
+  # unless we find 'file', for example because we are cross-compiling.
+  if ( file / ) >/dev/null 2>&1; then
+    lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+    lt_cv_file_magic_cmd='func_win32_libid'
+  else
+    # Keep this pattern in sync with the one in func_win32_libid.
+    lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
+    lt_cv_file_magic_cmd='$OBJDUMP -f'
+  fi
+  ;;
+
+cegcc*)
+  # use the weaker test based on 'objdump'. See mingw*.
+  lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
+  lt_cv_file_magic_cmd='$OBJDUMP -f'
+  ;;
+
+darwin* | rhapsody*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+freebsd* | dragonfly*)
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+    case $host_cpu in
+    i*86 )
+      # Not sure whether the presence of OpenBSD here was a mistake.
+      # Let's accept both of them until this is cleared up.
+      lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library'
+      lt_cv_file_magic_cmd=/usr/bin/file
+      lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+      ;;
+    esac
+  else
+    lt_cv_deplibs_check_method=pass_all
+  fi
+  ;;
+
+haiku*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+hpux10.20* | hpux11*)
+  lt_cv_file_magic_cmd=/usr/bin/file
+  case $host_cpu in
+  ia64*)
+    lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64'
+    lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
+    ;;
+  hppa*64*)
+    lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'
+    lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
+    ;;
+  *)
+    lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library'
+    lt_cv_file_magic_test_file=/usr/lib/libc.sl
+    ;;
+  esac
+  ;;
+
+interix[3-9]*)
+  # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
+  lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$'
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $LD in
+  *-32|*"-32 ") libmagic=32-bit;;
+  *-n32|*"-n32 ") libmagic=N32;;
+  *-64|*"-64 ") libmagic=64-bit;;
+  *) libmagic=never-match;;
+  esac
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+netbsd*)
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
+  else
+    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$'
+  fi
+  ;;
+
+newos6*)
+  lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)'
+  lt_cv_file_magic_cmd=/usr/bin/file
+  lt_cv_file_magic_test_file=/usr/lib/libnls.so
+  ;;
+
+*nto* | *qnx*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+openbsd* | bitrig*)
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$'
+  else
+    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
+  fi
+  ;;
+
+osf3* | osf4* | osf5*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+rdos*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+solaris*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+sysv4 | sysv4.3*)
+  case $host_vendor in
+  motorola)
+    lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]'
+    lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+    ;;
+  ncr)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  sequent)
+    lt_cv_file_magic_cmd='/bin/file'
+    lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
+    ;;
+  sni)
+    lt_cv_file_magic_cmd='/bin/file'
+    lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib"
+    lt_cv_file_magic_test_file=/lib/libc.so
+    ;;
+  siemens)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  pc)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  esac
+  ;;
+
+tpf*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+os2*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+esac
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5
+printf "%s\n" "$lt_cv_deplibs_check_method" >&6; }
+
+file_magic_glob=
+want_nocaseglob=no
+if test "$build" = "$host"; then
+  case $host_os in
+  mingw* | pw32*)
+    if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+      want_nocaseglob=yes
+    else
+      file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"`
+    fi
+    ;;
+  esac
+fi
+
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args.
+set dummy ${ac_tool_prefix}dlltool; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_DLLTOOL+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$DLLTOOL"; then
+  ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+DLLTOOL=$ac_cv_prog_DLLTOOL
+if test -n "$DLLTOOL"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5
+printf "%s\n" "$DLLTOOL" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_DLLTOOL"; then
+  ac_ct_DLLTOOL=$DLLTOOL
+  # Extract the first word of "dlltool", so it can be a program name with args.
+set dummy dlltool; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_DLLTOOL+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_DLLTOOL"; then
+  ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_DLLTOOL="dlltool"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL
+if test -n "$ac_ct_DLLTOOL"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5
+printf "%s\n" "$ac_ct_DLLTOOL" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_DLLTOOL" = x; then
+    DLLTOOL="false"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    DLLTOOL=$ac_ct_DLLTOOL
+  fi
+else
+  DLLTOOL="$ac_cv_prog_DLLTOOL"
+fi
+
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+
+
+
+
+
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5
+printf %s "checking how to associate runtime and link libraries... " >&6; }
+if test ${lt_cv_sharedlib_from_linklib_cmd+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_sharedlib_from_linklib_cmd='unknown'
+
+case $host_os in
+cygwin* | mingw* | pw32* | cegcc*)
+  # two different shell functions defined in ltmain.sh;
+  # decide which one to use based on capabilities of $DLLTOOL
+  case `$DLLTOOL --help 2>&1` in
+  *--identify-strict*)
+    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+    ;;
+  *)
+    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
+    ;;
+  esac
+  ;;
+*)
+  # fallback: assume linklib IS sharedlib
+  lt_cv_sharedlib_from_linklib_cmd=$ECHO
+  ;;
+esac
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5
+printf "%s\n" "$lt_cv_sharedlib_from_linklib_cmd" >&6; }
+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  for ac_prog in ar
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_AR+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$AR"; then
+  ac_cv_prog_AR="$AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_AR="$ac_tool_prefix$ac_prog"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+AR=$ac_cv_prog_AR
+if test -n "$AR"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
+printf "%s\n" "$AR" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+    test -n "$AR" && break
+  done
+fi
+if test -z "$AR"; then
+  ac_ct_AR=$AR
+  for ac_prog in ar
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_AR+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_AR"; then
+  ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_AR="$ac_prog"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_AR=$ac_cv_prog_ac_ct_AR
+if test -n "$ac_ct_AR"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
+printf "%s\n" "$ac_ct_AR" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_AR" && break
+done
+
+  if test "x$ac_ct_AR" = x; then
+    AR="false"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    AR=$ac_ct_AR
+  fi
+fi
+
+: ${AR=ar}
+: ${AR_FLAGS=cru}
+
+
+
+
+
+
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5
+printf %s "checking for archiver @FILE support... " >&6; }
+if test ${lt_cv_ar_at_file+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_ar_at_file=no
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"
+then :
+  echo conftest.$ac_objext > conftest.lst
+      lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5'
+      { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+  (eval $lt_ar_try) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+      if test 0 -eq "$ac_status"; then
+	# Ensure the archiver fails upon bogus file names.
+	rm -f conftest.$ac_objext libconftest.a
+	{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+  (eval $lt_ar_try) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+	if test 0 -ne "$ac_status"; then
+          lt_cv_ar_at_file=@
+        fi
+      fi
+      rm -f conftest.* libconftest.a
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5
+printf "%s\n" "$lt_cv_ar_at_file" >&6; }
+
+if test no = "$lt_cv_ar_at_file"; then
+  archiver_list_spec=
+else
+  archiver_list_spec=$lt_cv_ar_at_file
+fi
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_STRIP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$STRIP"; then
+  ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+printf "%s\n" "$STRIP" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+  ac_ct_STRIP=$STRIP
+  # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_STRIP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_STRIP"; then
+  ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_STRIP="strip"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+printf "%s\n" "$ac_ct_STRIP" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_STRIP" = x; then
+    STRIP=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    STRIP=$ac_ct_STRIP
+  fi
+else
+  STRIP="$ac_cv_prog_STRIP"
+fi
+
+test -z "$STRIP" && STRIP=:
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_RANLIB+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$RANLIB"; then
+  ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+RANLIB=$ac_cv_prog_RANLIB
+if test -n "$RANLIB"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
+printf "%s\n" "$RANLIB" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_RANLIB"; then
+  ac_ct_RANLIB=$RANLIB
+  # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_RANLIB+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_RANLIB"; then
+  ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_RANLIB="ranlib"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+if test -n "$ac_ct_RANLIB"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
+printf "%s\n" "$ac_ct_RANLIB" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_RANLIB" = x; then
+    RANLIB=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    RANLIB=$ac_ct_RANLIB
+  fi
+else
+  RANLIB="$ac_cv_prog_RANLIB"
+fi
+
+test -z "$RANLIB" && RANLIB=:
+
+
+
+
+
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+  case $host_os in
+  bitrig* | openbsd*)
+    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
+    ;;
+  *)
+    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
+    ;;
+  esac
+  old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
+fi
+
+case $host_os in
+  darwin*)
+    lock_old_archive_extraction=yes ;;
+  *)
+    lock_old_archive_extraction=no ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5
+printf %s "checking command to parse $NM output from $compiler object... " >&6; }
+if test ${lt_cv_sys_global_symbol_pipe+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix.  What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[BCDEGRST]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([_A-Za-z][_A-Za-z0-9]*\)'
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+  symcode='[BCDT]'
+  ;;
+cygwin* | mingw* | pw32* | cegcc*)
+  symcode='[ABCDGISTW]'
+  ;;
+hpux*)
+  if test ia64 = "$host_cpu"; then
+    symcode='[ABCDEGRST]'
+  fi
+  ;;
+irix* | nonstopux*)
+  symcode='[BCDEGRST]'
+  ;;
+osf*)
+  symcode='[BCDEGQRST]'
+  ;;
+solaris*)
+  symcode='[BDRT]'
+  ;;
+sco3.2v5*)
+  symcode='[DT]'
+  ;;
+sysv4.2uw2*)
+  symcode='[DT]'
+  ;;
+sysv5* | sco5v6* | unixware* | OpenUNIX*)
+  symcode='[ABDT]'
+  ;;
+sysv4)
+  symcode='[DFNSTU]'
+  ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+case `$NM -V 2>&1` in
+*GNU* | *'with BFD'*)
+  symcode='[ABCDGIRSTW]' ;;
+esac
+
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+  # Gets list of data symbols to import.
+  lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'"
+  # Adjust the below global symbol transforms to fixup imported variables.
+  lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'"
+  lt_c_name_hook=" -e 's/^I .* \(.*\)$/  {\"\1\", (void *) 0},/p'"
+  lt_c_name_lib_hook="\
+  -e 's/^I .* \(lib.*\)$/  {\"\1\", (void *) 0},/p'\
+  -e 's/^I .* \(.*\)$/  {\"lib\1\", (void *) 0},/p'"
+else
+  # Disable hooks by default.
+  lt_cv_sys_global_symbol_to_import=
+  lt_cdecl_hook=
+  lt_c_name_hook=
+  lt_c_name_lib_hook=
+fi
+
+# Transform an extracted symbol line into a proper C declaration.
+# Some systems (esp. on ia64) link data and code symbols differently,
+# so use this general approach.
+lt_cv_sys_global_symbol_to_cdecl="sed -n"\
+$lt_cdecl_hook\
+" -e 's/^T .* \(.*\)$/extern int \1();/p'"\
+" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_sys_global_symbol_to_c_name_address="sed -n"\
+$lt_c_name_hook\
+" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
+" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/p'"
+
+# Transform an extracted symbol line into symbol name with lib prefix and
+# symbol address.
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\
+$lt_c_name_lib_hook\
+" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
+" -e 's/^$symcode$symcode* .* \(lib.*\)$/  {\"\1\", (void *) \&\1},/p'"\
+" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"lib\1\", (void *) \&\1},/p'"
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $build_os in
+mingw*)
+  opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+  ;;
+esac
+
+# Try without a prefix underscore, then with it.
+for ac_symprfx in "" "_"; do
+
+  # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+  symxfrm="\\1 $ac_symprfx\\2 \\2"
+
+  # Write the raw and C identifiers.
+  if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+    # Fake it for dumpbin and say T for any non-static function,
+    # D for any global variable and I for any imported variable.
+    # Also find C++ and __fastcall symbols from MSVC++,
+    # which start with @ or ?.
+    lt_cv_sys_global_symbol_pipe="$AWK '"\
+"     {last_section=section; section=\$ 3};"\
+"     /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
+"     /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+"     /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\
+"     /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\
+"     /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\
+"     \$ 0!~/External *\|/{next};"\
+"     / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+"     {if(hide[section]) next};"\
+"     {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\
+"     {split(\$ 0,a,/\||\r/); split(a[2],s)};"\
+"     s[1]~/^[@?]/{print f,s[1],s[1]; next};"\
+"     s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\
+"     ' prfx=^$ac_symprfx"
+  else
+    lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[	 ]\($symcode$symcode*\)[	 ][	 ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+  fi
+  lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'"
+
+  # Check to see that the pipe works correctly.
+  pipe_works=no
+
+  rm -f conftest*
+  cat > conftest.$ac_ext <<_LT_EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(void);
+void nm_test_func(void){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+_LT_EOF
+
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    # Now try to grab the symbols.
+    nlist=conftest.nm
+    if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5
+  (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && test -s "$nlist"; then
+      # Try sorting and uniquifying the output.
+      if sort "$nlist" | uniq > "$nlist"T; then
+	mv -f "$nlist"T "$nlist"
+      else
+	rm -f "$nlist"T
+      fi
+
+      # Make sure that we snagged all the symbols we need.
+      if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+	if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+	  cat <<_LT_EOF > conftest.$ac_ext
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
+#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE
+/* DATA imports from DLLs on WIN32 can't be const, because runtime
+   relocations are performed -- see ld's documentation on pseudo-relocs.  */
+# define LT_DLSYM_CONST
+#elif defined __osf__
+/* This system does not cope well with relocations in const data.  */
+# define LT_DLSYM_CONST
+#else
+# define LT_DLSYM_CONST const
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_LT_EOF
+	  # Now generate the symbol file.
+	  eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
+
+	  cat <<_LT_EOF >> conftest.$ac_ext
+
+/* The mapping between symbol names and symbols.  */
+LT_DLSYM_CONST struct {
+  const char *name;
+  void       *address;
+}
+lt__PROGRAM__LTX_preloaded_symbols[] =
+{
+  { "@PROGRAM@", (void *) 0 },
+_LT_EOF
+	  $SED "s/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+	  cat <<\_LT_EOF >> conftest.$ac_ext
+  {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+  return lt__PROGRAM__LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+_LT_EOF
+	  # Now try linking the two files.
+	  mv conftest.$ac_objext conftstm.$ac_objext
+	  lt_globsym_save_LIBS=$LIBS
+	  lt_globsym_save_CFLAGS=$CFLAGS
+	  LIBS=conftstm.$ac_objext
+	  CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag"
+	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+  (eval $ac_link) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && test -s conftest$ac_exeext; then
+	    pipe_works=yes
+	  fi
+	  LIBS=$lt_globsym_save_LIBS
+	  CFLAGS=$lt_globsym_save_CFLAGS
+	else
+	  echo "cannot find nm_test_func in $nlist" >&5
+	fi
+      else
+	echo "cannot find nm_test_var in $nlist" >&5
+      fi
+    else
+      echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5
+    fi
+  else
+    echo "$progname: failed program was:" >&5
+    cat conftest.$ac_ext >&5
+  fi
+  rm -rf conftest* conftst*
+
+  # Do not use the global_symbol_pipe unless it works.
+  if test yes = "$pipe_works"; then
+    break
+  else
+    lt_cv_sys_global_symbol_pipe=
+  fi
+done
+
+fi
+
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+  lt_cv_sys_global_symbol_to_cdecl=
+fi
+if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: failed" >&5
+printf "%s\n" "failed" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ok" >&5
+printf "%s\n" "ok" >&6; }
+fi
+
+# Response file support.
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+  nm_file_list_spec='@'
+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then
+  nm_file_list_spec='@'
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5
+printf %s "checking for sysroot... " >&6; }
+
+# Check whether --with-sysroot was given.
+if test ${with_sysroot+y}
+then :
+  withval=$with_sysroot;
+else $as_nop
+  with_sysroot=no
+fi
+
+
+lt_sysroot=
+case $with_sysroot in #(
+ yes)
+   if test yes = "$GCC"; then
+     lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+   fi
+   ;; #(
+ /*)
+   lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"`
+   ;; #(
+ no|'')
+   ;; #(
+ *)
+   { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5
+printf "%s\n" "$with_sysroot" >&6; }
+   as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5
+   ;;
+esac
+
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5
+printf "%s\n" "${lt_sysroot:-no}" >&6; }
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5
+printf %s "checking for a working dd... " >&6; }
+if test ${ac_cv_path_lt_DD+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  printf 0123456789abcdef0123456789abcdef >conftest.i
+cat conftest.i conftest.i >conftest2.i
+: ${lt_DD:=$DD}
+if test -z "$lt_DD"; then
+  ac_path_lt_DD_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_prog in dd
+   do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_lt_DD="$as_dir$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_lt_DD" || continue
+if "$ac_path_lt_DD" bs=32 count=1 <conftest2.i >conftest.out 2>/dev/null; then
+  cmp -s conftest.i conftest.out \
+  && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=:
+fi
+      $ac_path_lt_DD_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_lt_DD"; then
+    :
+  fi
+else
+  ac_cv_path_lt_DD=$lt_DD
+fi
+
+rm -f conftest.i conftest2.i conftest.out
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5
+printf "%s\n" "$ac_cv_path_lt_DD" >&6; }
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5
+printf %s "checking how to truncate binary pipes... " >&6; }
+if test ${lt_cv_truncate_bin+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  printf 0123456789abcdef0123456789abcdef >conftest.i
+cat conftest.i conftest.i >conftest2.i
+lt_cv_truncate_bin=
+if "$ac_cv_path_lt_DD" bs=32 count=1 <conftest2.i >conftest.out 2>/dev/null; then
+  cmp -s conftest.i conftest.out \
+  && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1"
+fi
+rm -f conftest.i conftest2.i conftest.out
+test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5
+printf "%s\n" "$lt_cv_truncate_bin" >&6; }
+
+
+
+
+
+
+
+# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
+func_cc_basename ()
+{
+    for cc_temp in $*""; do
+      case $cc_temp in
+        compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+        distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+        \-*) ;;
+        *) break;;
+      esac
+    done
+    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+}
+
+# Check whether --enable-libtool-lock was given.
+if test ${enable_libtool_lock+y}
+then :
+  enableval=$enable_libtool_lock;
+fi
+
+test no = "$enable_libtool_lock" || enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+ia64-*-hpux*)
+  # Find out what ABI is being produced by ac_compile, and set mode
+  # options accordingly.
+  echo 'int i;' > conftest.$ac_ext
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    case `/usr/bin/file conftest.$ac_objext` in
+      *ELF-32*)
+	HPUX_IA64_MODE=32
+	;;
+      *ELF-64*)
+	HPUX_IA64_MODE=64
+	;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+*-*-irix6*)
+  # Find out what ABI is being produced by ac_compile, and set linker
+  # options accordingly.
+  echo '#line '$LINENO' "configure"' > conftest.$ac_ext
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    if test yes = "$lt_cv_prog_gnu_ld"; then
+      case `/usr/bin/file conftest.$ac_objext` in
+	*32-bit*)
+	  LD="${LD-ld} -melf32bsmip"
+	  ;;
+	*N32*)
+	  LD="${LD-ld} -melf32bmipn32"
+	  ;;
+	*64-bit*)
+	  LD="${LD-ld} -melf64bmip"
+	;;
+      esac
+    else
+      case `/usr/bin/file conftest.$ac_objext` in
+	*32-bit*)
+	  LD="${LD-ld} -32"
+	  ;;
+	*N32*)
+	  LD="${LD-ld} -n32"
+	  ;;
+	*64-bit*)
+	  LD="${LD-ld} -64"
+	  ;;
+      esac
+    fi
+  fi
+  rm -rf conftest*
+  ;;
+
+mips64*-*linux*)
+  # Find out what ABI is being produced by ac_compile, and set linker
+  # options accordingly.
+  echo '#line '$LINENO' "configure"' > conftest.$ac_ext
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    emul=elf
+    case `/usr/bin/file conftest.$ac_objext` in
+      *32-bit*)
+	emul="${emul}32"
+	;;
+      *64-bit*)
+	emul="${emul}64"
+	;;
+    esac
+    case `/usr/bin/file conftest.$ac_objext` in
+      *MSB*)
+	emul="${emul}btsmip"
+	;;
+      *LSB*)
+	emul="${emul}ltsmip"
+	;;
+    esac
+    case `/usr/bin/file conftest.$ac_objext` in
+      *N32*)
+	emul="${emul}n32"
+	;;
+    esac
+    LD="${LD-ld} -m $emul"
+  fi
+  rm -rf conftest*
+  ;;
+
+x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \
+s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+  # Find out what ABI is being produced by ac_compile, and set linker
+  # options accordingly.  Note that the listed cases only cover the
+  # situations where additional linker options are needed (such as when
+  # doing 32-bit compilation for a host where ld defaults to 64-bit, or
+  # vice versa); the common cases where no linker options are needed do
+  # not appear in the list.
+  echo 'int i;' > conftest.$ac_ext
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    case `/usr/bin/file conftest.o` in
+      *32-bit*)
+	case $host in
+	  x86_64-*kfreebsd*-gnu)
+	    LD="${LD-ld} -m elf_i386_fbsd"
+	    ;;
+	  x86_64-*linux*)
+	    case `/usr/bin/file conftest.o` in
+	      *x86-64*)
+		LD="${LD-ld} -m elf32_x86_64"
+		;;
+	      *)
+		LD="${LD-ld} -m elf_i386"
+		;;
+	    esac
+	    ;;
+	  powerpc64le-*linux*)
+	    LD="${LD-ld} -m elf32lppclinux"
+	    ;;
+	  powerpc64-*linux*)
+	    LD="${LD-ld} -m elf32ppclinux"
+	    ;;
+	  s390x-*linux*)
+	    LD="${LD-ld} -m elf_s390"
+	    ;;
+	  sparc64-*linux*)
+	    LD="${LD-ld} -m elf32_sparc"
+	    ;;
+	esac
+	;;
+      *64-bit*)
+	case $host in
+	  x86_64-*kfreebsd*-gnu)
+	    LD="${LD-ld} -m elf_x86_64_fbsd"
+	    ;;
+	  x86_64-*linux*)
+	    LD="${LD-ld} -m elf_x86_64"
+	    ;;
+	  powerpcle-*linux*)
+	    LD="${LD-ld} -m elf64lppc"
+	    ;;
+	  powerpc-*linux*)
+	    LD="${LD-ld} -m elf64ppc"
+	    ;;
+	  s390*-*linux*|s390*-*tpf*)
+	    LD="${LD-ld} -m elf64_s390"
+	    ;;
+	  sparc*-*linux*)
+	    LD="${LD-ld} -m elf64_sparc"
+	    ;;
+	esac
+	;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+
+*-*-sco3.2v5*)
+  # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+  SAVE_CFLAGS=$CFLAGS
+  CFLAGS="$CFLAGS -belf"
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5
+printf %s "checking whether the C compiler needs -belf... " >&6; }
+if test ${lt_cv_cc_needs_belf+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+     cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  lt_cv_cc_needs_belf=yes
+else $as_nop
+  lt_cv_cc_needs_belf=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+     ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5
+printf "%s\n" "$lt_cv_cc_needs_belf" >&6; }
+  if test yes != "$lt_cv_cc_needs_belf"; then
+    # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+    CFLAGS=$SAVE_CFLAGS
+  fi
+  ;;
+*-*solaris*)
+  # Find out what ABI is being produced by ac_compile, and set linker
+  # options accordingly.
+  echo 'int i;' > conftest.$ac_ext
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    case `/usr/bin/file conftest.o` in
+    *64-bit*)
+      case $lt_cv_prog_gnu_ld in
+      yes*)
+        case $host in
+        i?86-*-solaris*|x86_64-*-solaris*)
+          LD="${LD-ld} -m elf_x86_64"
+          ;;
+        sparc*-*-solaris*)
+          LD="${LD-ld} -m elf64_sparc"
+          ;;
+        esac
+        # GNU ld 2.21 introduced _sol2 emulations.  Use them if available.
+        if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+          LD=${LD-ld}_sol2
+        fi
+        ;;
+      *)
+	if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+	  LD="${LD-ld} -64"
+	fi
+	;;
+      esac
+      ;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+esac
+
+need_locks=$enable_libtool_lock
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args.
+set dummy ${ac_tool_prefix}mt; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_MANIFEST_TOOL+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$MANIFEST_TOOL"; then
+  ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL
+if test -n "$MANIFEST_TOOL"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5
+printf "%s\n" "$MANIFEST_TOOL" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then
+  ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL
+  # Extract the first word of "mt", so it can be a program name with args.
+set dummy mt; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_MANIFEST_TOOL+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_MANIFEST_TOOL"; then
+  ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_MANIFEST_TOOL="mt"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL
+if test -n "$ac_ct_MANIFEST_TOOL"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5
+printf "%s\n" "$ac_ct_MANIFEST_TOOL" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_MANIFEST_TOOL" = x; then
+    MANIFEST_TOOL=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL
+  fi
+else
+  MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL"
+fi
+
+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5
+printf %s "checking if $MANIFEST_TOOL is a manifest tool... " >&6; }
+if test ${lt_cv_path_mainfest_tool+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_path_mainfest_tool=no
+  echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5
+  $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+  cat conftest.err >&5
+  if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+    lt_cv_path_mainfest_tool=yes
+  fi
+  rm -f conftest*
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5
+printf "%s\n" "$lt_cv_path_mainfest_tool" >&6; }
+if test yes != "$lt_cv_path_mainfest_tool"; then
+  MANIFEST_TOOL=:
+fi
+
+
+
+
+
+
+  case $host_os in
+    rhapsody* | darwin*)
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args.
+set dummy ${ac_tool_prefix}dsymutil; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_DSYMUTIL+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$DSYMUTIL"; then
+  ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+DSYMUTIL=$ac_cv_prog_DSYMUTIL
+if test -n "$DSYMUTIL"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5
+printf "%s\n" "$DSYMUTIL" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_DSYMUTIL"; then
+  ac_ct_DSYMUTIL=$DSYMUTIL
+  # Extract the first word of "dsymutil", so it can be a program name with args.
+set dummy dsymutil; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_DSYMUTIL+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_DSYMUTIL"; then
+  ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_DSYMUTIL="dsymutil"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL
+if test -n "$ac_ct_DSYMUTIL"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5
+printf "%s\n" "$ac_ct_DSYMUTIL" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_DSYMUTIL" = x; then
+    DSYMUTIL=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    DSYMUTIL=$ac_ct_DSYMUTIL
+  fi
+else
+  DSYMUTIL="$ac_cv_prog_DSYMUTIL"
+fi
+
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args.
+set dummy ${ac_tool_prefix}nmedit; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_NMEDIT+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$NMEDIT"; then
+  ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+NMEDIT=$ac_cv_prog_NMEDIT
+if test -n "$NMEDIT"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5
+printf "%s\n" "$NMEDIT" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_NMEDIT"; then
+  ac_ct_NMEDIT=$NMEDIT
+  # Extract the first word of "nmedit", so it can be a program name with args.
+set dummy nmedit; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_NMEDIT+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_NMEDIT"; then
+  ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_NMEDIT="nmedit"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT
+if test -n "$ac_ct_NMEDIT"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5
+printf "%s\n" "$ac_ct_NMEDIT" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_NMEDIT" = x; then
+    NMEDIT=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    NMEDIT=$ac_ct_NMEDIT
+  fi
+else
+  NMEDIT="$ac_cv_prog_NMEDIT"
+fi
+
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args.
+set dummy ${ac_tool_prefix}lipo; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_LIPO+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$LIPO"; then
+  ac_cv_prog_LIPO="$LIPO" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_LIPO="${ac_tool_prefix}lipo"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+LIPO=$ac_cv_prog_LIPO
+if test -n "$LIPO"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5
+printf "%s\n" "$LIPO" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_LIPO"; then
+  ac_ct_LIPO=$LIPO
+  # Extract the first word of "lipo", so it can be a program name with args.
+set dummy lipo; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_LIPO+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_LIPO"; then
+  ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_LIPO="lipo"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO
+if test -n "$ac_ct_LIPO"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5
+printf "%s\n" "$ac_ct_LIPO" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_LIPO" = x; then
+    LIPO=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    LIPO=$ac_ct_LIPO
+  fi
+else
+  LIPO="$ac_cv_prog_LIPO"
+fi
+
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args.
+set dummy ${ac_tool_prefix}otool; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_OTOOL+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$OTOOL"; then
+  ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_OTOOL="${ac_tool_prefix}otool"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+OTOOL=$ac_cv_prog_OTOOL
+if test -n "$OTOOL"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5
+printf "%s\n" "$OTOOL" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OTOOL"; then
+  ac_ct_OTOOL=$OTOOL
+  # Extract the first word of "otool", so it can be a program name with args.
+set dummy otool; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_OTOOL+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_OTOOL"; then
+  ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_OTOOL="otool"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL
+if test -n "$ac_ct_OTOOL"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5
+printf "%s\n" "$ac_ct_OTOOL" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_OTOOL" = x; then
+    OTOOL=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    OTOOL=$ac_ct_OTOOL
+  fi
+else
+  OTOOL="$ac_cv_prog_OTOOL"
+fi
+
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args.
+set dummy ${ac_tool_prefix}otool64; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_OTOOL64+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$OTOOL64"; then
+  ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+OTOOL64=$ac_cv_prog_OTOOL64
+if test -n "$OTOOL64"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5
+printf "%s\n" "$OTOOL64" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OTOOL64"; then
+  ac_ct_OTOOL64=$OTOOL64
+  # Extract the first word of "otool64", so it can be a program name with args.
+set dummy otool64; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_prog_ac_ct_OTOOL64+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -n "$ac_ct_OTOOL64"; then
+  ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_OTOOL64="otool64"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64
+if test -n "$ac_ct_OTOOL64"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5
+printf "%s\n" "$ac_ct_OTOOL64" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+  if test "x$ac_ct_OTOOL64" = x; then
+    OTOOL64=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    OTOOL64=$ac_ct_OTOOL64
+  fi
+else
+  OTOOL64="$ac_cv_prog_OTOOL64"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5
+printf %s "checking for -single_module linker flag... " >&6; }
+if test ${lt_cv_apple_cc_single_mod+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_apple_cc_single_mod=no
+      if test -z "$LT_MULTI_MODULE"; then
+	# By default we will add the -single_module flag. You can override
+	# by either setting the environment variable LT_MULTI_MODULE
+	# non-empty at configure time, or by adding -multi_module to the
+	# link flags.
+	rm -rf libconftest.dylib*
+	echo "int foo(void){return 1;}" > conftest.c
+	echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+-dynamiclib -Wl,-single_module conftest.c" >&5
+	$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+	  -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+        _lt_result=$?
+	# If there is a non-empty error log, and "single_module"
+	# appears in it, assume the flag caused a linker warning
+        if test -s conftest.err && $GREP single_module conftest.err; then
+	  cat conftest.err >&5
+	# Otherwise, if the output was created with a 0 exit code from
+	# the compiler, it worked.
+	elif test -f libconftest.dylib && test 0 = "$_lt_result"; then
+	  lt_cv_apple_cc_single_mod=yes
+	else
+	  cat conftest.err >&5
+	fi
+	rm -rf libconftest.dylib*
+	rm -f conftest.*
+      fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5
+printf "%s\n" "$lt_cv_apple_cc_single_mod" >&6; }
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5
+printf %s "checking for -exported_symbols_list linker flag... " >&6; }
+if test ${lt_cv_ld_exported_symbols_list+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_ld_exported_symbols_list=no
+      save_LDFLAGS=$LDFLAGS
+      echo "_main" > conftest.sym
+      LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  lt_cv_ld_exported_symbols_list=yes
+else $as_nop
+  lt_cv_ld_exported_symbols_list=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+	LDFLAGS=$save_LDFLAGS
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5
+printf "%s\n" "$lt_cv_ld_exported_symbols_list" >&6; }
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5
+printf %s "checking for -force_load linker flag... " >&6; }
+if test ${lt_cv_ld_force_load+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_ld_force_load=no
+      cat > conftest.c << _LT_EOF
+int forced_loaded() { return 2;}
+_LT_EOF
+      echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5
+      $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5
+      echo "$AR cru libconftest.a conftest.o" >&5
+      $AR cru libconftest.a conftest.o 2>&5
+      echo "$RANLIB libconftest.a" >&5
+      $RANLIB libconftest.a 2>&5
+      cat > conftest.c << _LT_EOF
+int main() { return 0;}
+_LT_EOF
+      echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5
+      $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+      _lt_result=$?
+      if test -s conftest.err && $GREP force_load conftest.err; then
+	cat conftest.err >&5
+      elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then
+	lt_cv_ld_force_load=yes
+      else
+	cat conftest.err >&5
+      fi
+        rm -f conftest.err libconftest.a conftest conftest.c
+        rm -rf conftest.dSYM
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5
+printf "%s\n" "$lt_cv_ld_force_load" >&6; }
+    case $host_os in
+    rhapsody* | darwin1.[012])
+      _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;;
+    darwin1.*)
+      _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
+    darwin*)
+      case $MACOSX_DEPLOYMENT_TARGET,$host in
+	10.[012],*|,*powerpc*-darwin[5-8]*)
+	  _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
+	*)
+	  _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;;
+      esac
+    ;;
+  esac
+    if test yes = "$lt_cv_apple_cc_single_mod"; then
+      _lt_dar_single_mod='$single_module'
+    fi
+    if test yes = "$lt_cv_ld_exported_symbols_list"; then
+      _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym'
+    else
+      _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib'
+    fi
+    if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then
+      _lt_dsymutil='~$DSYMUTIL $lib || :'
+    else
+      _lt_dsymutil=
+    fi
+    ;;
+  esac
+
+# func_munge_path_list VARIABLE PATH
+# -----------------------------------
+# VARIABLE is name of variable containing _space_ separated list of
+# directories to be munged by the contents of PATH, which is string
+# having a format:
+# "DIR[:DIR]:"
+#       string "DIR[ DIR]" will be prepended to VARIABLE
+# ":DIR[:DIR]"
+#       string "DIR[ DIR]" will be appended to VARIABLE
+# "DIRP[:DIRP]::[DIRA:]DIRA"
+#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
+#       "DIRA[ DIRA]" will be appended to VARIABLE
+# "DIR[:DIR]"
+#       VARIABLE will be replaced by "DIR[ DIR]"
+func_munge_path_list ()
+{
+    case x$2 in
+    x)
+        ;;
+    *:)
+        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\"
+        ;;
+    x:*)
+        eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\"
+        ;;
+    *::*)
+        eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
+        eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\"
+        ;;
+    *)
+        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\"
+        ;;
+    esac
+}
+
+ac_header= ac_cache=
+for ac_item in $ac_header_c_list
+do
+  if test $ac_cache; then
+    ac_fn_c_check_header_compile "$LINENO" $ac_header ac_cv_header_$ac_cache "$ac_includes_default"
+    if eval test \"x\$ac_cv_header_$ac_cache\" = xyes; then
+      printf "%s\n" "#define $ac_item 1" >> confdefs.h
+    fi
+    ac_header= ac_cache=
+  elif test $ac_header; then
+    ac_cache=$ac_item
+  else
+    ac_header=$ac_item
+  fi
+done
+
+
+
+
+
+
+
+
+if test $ac_cv_header_stdlib_h = yes && test $ac_cv_header_string_h = yes
+then :
+
+printf "%s\n" "#define STDC_HEADERS 1" >>confdefs.h
+
+fi
+ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default
+"
+if test "x$ac_cv_header_dlfcn_h" = xyes
+then :
+  printf "%s\n" "#define HAVE_DLFCN_H 1" >>confdefs.h
+
+fi
+
+
+
+
+func_stripname_cnf ()
+{
+  case $2 in
+  .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;;
+  *)  func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;;
+  esac
+} # func_stripname_cnf
+
+
+
+
+
+# Set options
+
+
+
+        enable_dlopen=no
+
+
+  enable_win32_dll=no
+
+
+            # Check whether --enable-shared was given.
+if test ${enable_shared+y}
+then :
+  enableval=$enable_shared; p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_shared=yes ;;
+    no) enable_shared=no ;;
+    *)
+      enable_shared=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+      for pkg in $enableval; do
+	IFS=$lt_save_ifs
+	if test "X$pkg" = "X$p"; then
+	  enable_shared=yes
+	fi
+      done
+      IFS=$lt_save_ifs
+      ;;
+    esac
+else $as_nop
+  enable_shared=yes
+fi
+
+
+
+
+
+
+
+
+
+  # Check whether --enable-static was given.
+if test ${enable_static+y}
+then :
+  enableval=$enable_static; p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_static=yes ;;
+    no) enable_static=no ;;
+    *)
+     enable_static=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+      for pkg in $enableval; do
+	IFS=$lt_save_ifs
+	if test "X$pkg" = "X$p"; then
+	  enable_static=yes
+	fi
+      done
+      IFS=$lt_save_ifs
+      ;;
+    esac
+else $as_nop
+  enable_static=yes
+fi
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-pic was given.
+if test ${with_pic+y}
+then :
+  withval=$with_pic; lt_p=${PACKAGE-default}
+    case $withval in
+    yes|no) pic_mode=$withval ;;
+    *)
+      pic_mode=default
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+      for lt_pkg in $withval; do
+	IFS=$lt_save_ifs
+	if test "X$lt_pkg" = "X$lt_p"; then
+	  pic_mode=yes
+	fi
+      done
+      IFS=$lt_save_ifs
+      ;;
+    esac
+else $as_nop
+  pic_mode=default
+fi
+
+
+
+
+
+
+
+
+  # Check whether --enable-fast-install was given.
+if test ${enable_fast_install+y}
+then :
+  enableval=$enable_fast_install; p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_fast_install=yes ;;
+    no) enable_fast_install=no ;;
+    *)
+      enable_fast_install=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+      for pkg in $enableval; do
+	IFS=$lt_save_ifs
+	if test "X$pkg" = "X$p"; then
+	  enable_fast_install=yes
+	fi
+      done
+      IFS=$lt_save_ifs
+      ;;
+    esac
+else $as_nop
+  enable_fast_install=yes
+fi
+
+
+
+
+
+
+
+
+  shared_archive_member_spec=
+case $host,$enable_shared in
+power*-*-aix[5-9]*,yes)
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5
+printf %s "checking which variant of shared library versioning to provide... " >&6; }
+
+# Check whether --with-aix-soname was given.
+if test ${with_aix_soname+y}
+then :
+  withval=$with_aix_soname; case $withval in
+    aix|svr4|both)
+      ;;
+    *)
+      as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5
+      ;;
+    esac
+    lt_cv_with_aix_soname=$with_aix_soname
+else $as_nop
+  if test ${lt_cv_with_aix_soname+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_with_aix_soname=aix
+fi
+
+    with_aix_soname=$lt_cv_with_aix_soname
+fi
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5
+printf "%s\n" "$with_aix_soname" >&6; }
+  if test aix != "$with_aix_soname"; then
+    # For the AIX way of multilib, we name the shared archive member
+    # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o',
+    # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File.
+    # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag,
+    # the AIX toolchain works better with OBJECT_MODE set (default 32).
+    if test 64 = "${OBJECT_MODE-32}"; then
+      shared_archive_member_spec=shr_64
+    else
+      shared_archive_member_spec=shr
+    fi
+  fi
+  ;;
+*)
+  with_aix_soname=aix
+  ;;
+esac
+
+
+
+
+
+
+
+
+
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS=$ltmain
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+test -z "$LN_S" && LN_S="ln -s"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+if test -n "${ZSH_VERSION+set}"; then
+   setopt NO_GLOB_SUBST
+fi
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5
+printf %s "checking for objdir... " >&6; }
+if test ${lt_cv_objdir+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+  lt_cv_objdir=.libs
+else
+  # MS-DOS does not allow filenames that begin with a dot.
+  lt_cv_objdir=_libs
+fi
+rmdir .libs 2>/dev/null
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5
+printf "%s\n" "$lt_cv_objdir" >&6; }
+objdir=$lt_cv_objdir
+
+
+
+
+
+printf "%s\n" "#define LT_OBJDIR \"$lt_cv_objdir/\"" >>confdefs.h
+
+
+
+
+case $host_os in
+aix3*)
+  # AIX sometimes has problems with the GCC collect2 program.  For some
+  # reason, if we set the COLLECT_NAMES environment variable, the problems
+  # vanish in a puff of smoke.
+  if test set != "${COLLECT_NAMES+set}"; then
+    COLLECT_NAMES=
+    export COLLECT_NAMES
+  fi
+  ;;
+esac
+
+# Global variables:
+ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a '.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+old_CC=$CC
+old_CFLAGS=$CFLAGS
+
+# Set sane defaults for various variables
+test -z "$CC" && CC=cc
+test -z "$LTCC" && LTCC=$CC
+test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+test -z "$LD" && LD=ld
+test -z "$ac_objext" && ac_objext=o
+
+func_cc_basename $compiler
+cc_basename=$func_cc_basename_result
+
+
+# Only perform the check for file, if the check method requires it
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+case $deplibs_check_method in
+file_magic*)
+  if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5
+printf %s "checking for ${ac_tool_prefix}file... " >&6; }
+if test ${lt_cv_path_MAGIC_CMD+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  case $MAGIC_CMD in
+[\\/*] |  ?:[\\/]*)
+  lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path.
+  ;;
+*)
+  lt_save_MAGIC_CMD=$MAGIC_CMD
+  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+  ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+  for ac_dir in $ac_dummy; do
+    IFS=$lt_save_ifs
+    test -z "$ac_dir" && ac_dir=.
+    if test -f "$ac_dir/${ac_tool_prefix}file"; then
+      lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file"
+      if test -n "$file_magic_test_file"; then
+	case $deplibs_check_method in
+	"file_magic "*)
+	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+	  MAGIC_CMD=$lt_cv_path_MAGIC_CMD
+	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+	    $EGREP "$file_magic_regex" > /dev/null; then
+	    :
+	  else
+	    cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such.  This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem.  Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+_LT_EOF
+	  fi ;;
+	esac
+      fi
+      break
+    fi
+  done
+  IFS=$lt_save_ifs
+  MAGIC_CMD=$lt_save_MAGIC_CMD
+  ;;
+esac
+fi
+
+MAGIC_CMD=$lt_cv_path_MAGIC_CMD
+if test -n "$MAGIC_CMD"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
+printf "%s\n" "$MAGIC_CMD" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+
+
+
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+  if test -n "$ac_tool_prefix"; then
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for file" >&5
+printf %s "checking for file... " >&6; }
+if test ${lt_cv_path_MAGIC_CMD+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  case $MAGIC_CMD in
+[\\/*] |  ?:[\\/]*)
+  lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path.
+  ;;
+*)
+  lt_save_MAGIC_CMD=$MAGIC_CMD
+  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+  ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+  for ac_dir in $ac_dummy; do
+    IFS=$lt_save_ifs
+    test -z "$ac_dir" && ac_dir=.
+    if test -f "$ac_dir/file"; then
+      lt_cv_path_MAGIC_CMD=$ac_dir/"file"
+      if test -n "$file_magic_test_file"; then
+	case $deplibs_check_method in
+	"file_magic "*)
+	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+	  MAGIC_CMD=$lt_cv_path_MAGIC_CMD
+	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+	    $EGREP "$file_magic_regex" > /dev/null; then
+	    :
+	  else
+	    cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such.  This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem.  Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+_LT_EOF
+	  fi ;;
+	esac
+      fi
+      break
+    fi
+  done
+  IFS=$lt_save_ifs
+  MAGIC_CMD=$lt_save_MAGIC_CMD
+  ;;
+esac
+fi
+
+MAGIC_CMD=$lt_cv_path_MAGIC_CMD
+if test -n "$MAGIC_CMD"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
+printf "%s\n" "$MAGIC_CMD" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+  else
+    MAGIC_CMD=:
+  fi
+fi
+
+  fi
+  ;;
+esac
+
+# Use C for the default configuration in the libtool script
+
+lt_save_CC=$CC
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+# Source file extension for C test sources.
+ac_ext=c
+
+# Object file extension for compiled C test sources.
+objext=o
+objext=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="int some_variable = 0;"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='int main(){return(0);}'
+
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+# Save the default compiler, since it gets overwritten when the other
+# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
+compiler_DEFAULT=$CC
+
+# save warnings/boilerplate of simple test code
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+
+
+if test -n "$compiler"; then
+
+lt_prog_compiler_no_builtin_flag=
+
+if test yes = "$GCC"; then
+  case $cc_basename in
+  nvcc*)
+    lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;;
+  *)
+    lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;;
+  esac
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
+printf %s "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; }
+if test ${lt_cv_prog_compiler_rtti_exceptions+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_rtti_exceptions=no
+   ac_outfile=conftest.$ac_objext
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+   lt_compiler_flag="-fno-rtti -fno-exceptions"  ## exclude from sc_useless_quotes_in_assignment
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   # The option is referenced via a variable to avoid confusing sed.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>conftest.err)
+   ac_status=$?
+   cat conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s "$ac_outfile"; then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings other than the usual output.
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_rtti_exceptions=yes
+     fi
+   fi
+   $RM conftest*
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5
+printf "%s\n" "$lt_cv_prog_compiler_rtti_exceptions" >&6; }
+
+if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then
+    lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions"
+else
+    :
+fi
+
+fi
+
+
+
+
+
+
+  lt_prog_compiler_wl=
+lt_prog_compiler_pic=
+lt_prog_compiler_static=
+
+
+  if test yes = "$GCC"; then
+    lt_prog_compiler_wl='-Wl,'
+    lt_prog_compiler_static='-static'
+
+    case $host_os in
+      aix*)
+      # All AIX code is PIC.
+      if test ia64 = "$host_cpu"; then
+	# AIX 5 now supports IA64 processor
+	lt_prog_compiler_static='-Bstatic'
+      fi
+      lt_prog_compiler_pic='-fPIC'
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            lt_prog_compiler_pic='-fPIC'
+        ;;
+      m68k)
+            # FIXME: we need at least 68020 code to build shared libraries, but
+            # adding the '-m68020' flag to GCC prevents building anything better,
+            # like '-m68040'.
+            lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4'
+        ;;
+      esac
+      ;;
+
+    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+      # PIC is the default for these OSes.
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+      # (--disable-auto-import) libraries
+      lt_prog_compiler_pic='-DDLL_EXPORT'
+      case $host_os in
+      os2*)
+	lt_prog_compiler_static='$wl-static'
+	;;
+      esac
+      ;;
+
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      lt_prog_compiler_pic='-fno-common'
+      ;;
+
+    haiku*)
+      # PIC is the default for Haiku.
+      # The "-static" flag exists, but is broken.
+      lt_prog_compiler_static=
+      ;;
+
+    hpux*)
+      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+      # sets the default TLS model and affects inlining.
+      case $host_cpu in
+      hppa*64*)
+	# +Z the default
+	;;
+      *)
+	lt_prog_compiler_pic='-fPIC'
+	;;
+      esac
+      ;;
+
+    interix[3-9]*)
+      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+      # Instead, we relocate shared libraries at runtime.
+      ;;
+
+    msdosdjgpp*)
+      # Just because we use GCC doesn't mean we suddenly get shared libraries
+      # on systems that don't support them.
+      lt_prog_compiler_can_build_shared=no
+      enable_shared=no
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      lt_prog_compiler_pic='-fPIC -shared'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	lt_prog_compiler_pic=-Kconform_pic
+      fi
+      ;;
+
+    *)
+      lt_prog_compiler_pic='-fPIC'
+      ;;
+    esac
+
+    case $cc_basename in
+    nvcc*) # Cuda Compiler Driver 2.2
+      lt_prog_compiler_wl='-Xlinker '
+      if test -n "$lt_prog_compiler_pic"; then
+        lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic"
+      fi
+      ;;
+    esac
+  else
+    # PORTME Check for flag to pass linker flags through the system compiler.
+    case $host_os in
+    aix*)
+      lt_prog_compiler_wl='-Wl,'
+      if test ia64 = "$host_cpu"; then
+	# AIX 5 now supports IA64 processor
+	lt_prog_compiler_static='-Bstatic'
+      else
+	lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp'
+      fi
+      ;;
+
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      lt_prog_compiler_pic='-fno-common'
+      case $cc_basename in
+      nagfor*)
+        # NAG Fortran compiler
+        lt_prog_compiler_wl='-Wl,-Wl,,'
+        lt_prog_compiler_pic='-PIC'
+        lt_prog_compiler_static='-Bstatic'
+        ;;
+      esac
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      lt_prog_compiler_pic='-DDLL_EXPORT'
+      case $host_os in
+      os2*)
+	lt_prog_compiler_static='$wl-static'
+	;;
+      esac
+      ;;
+
+    hpux9* | hpux10* | hpux11*)
+      lt_prog_compiler_wl='-Wl,'
+      # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+      # not for PA HP-UX.
+      case $host_cpu in
+      hppa*64*|ia64*)
+	# +Z the default
+	;;
+      *)
+	lt_prog_compiler_pic='+Z'
+	;;
+      esac
+      # Is there a better lt_prog_compiler_static that works with the bundled CC?
+      lt_prog_compiler_static='$wl-a ${wl}archive'
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      lt_prog_compiler_wl='-Wl,'
+      # PIC (with -KPIC) is the default.
+      lt_prog_compiler_static='-non_shared'
+      ;;
+
+    linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+      case $cc_basename in
+      # old Intel for x86_64, which still supported -KPIC.
+      ecc*)
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='-KPIC'
+	lt_prog_compiler_static='-static'
+        ;;
+      # icc used to be incompatible with GCC.
+      # ICC 10 doesn't accept -KPIC any more.
+      icc* | ifort*)
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='-fPIC'
+	lt_prog_compiler_static='-static'
+        ;;
+      # Lahey Fortran 8.1.
+      lf95*)
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='--shared'
+	lt_prog_compiler_static='--static'
+	;;
+      nagfor*)
+	# NAG Fortran compiler
+	lt_prog_compiler_wl='-Wl,-Wl,,'
+	lt_prog_compiler_pic='-PIC'
+	lt_prog_compiler_static='-Bstatic'
+	;;
+      tcc*)
+	# Fabrice Bellard et al's Tiny C Compiler
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='-fPIC'
+	lt_prog_compiler_static='-static'
+	;;
+      pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
+        # Portland Group compilers (*not* the Pentium gcc compiler,
+	# which looks to be a dead project)
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='-fpic'
+	lt_prog_compiler_static='-Bstatic'
+        ;;
+      ccc*)
+        lt_prog_compiler_wl='-Wl,'
+        # All Alpha code is PIC.
+        lt_prog_compiler_static='-non_shared'
+        ;;
+      xl* | bgxl* | bgf* | mpixl*)
+	# IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='-qpic'
+	lt_prog_compiler_static='-qstaticlink'
+	;;
+      *)
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*)
+	  # Sun Fortran 8.3 passes all unrecognized flags to the linker
+	  lt_prog_compiler_pic='-KPIC'
+	  lt_prog_compiler_static='-Bstatic'
+	  lt_prog_compiler_wl=''
+	  ;;
+	*Sun\ F* | *Sun*Fortran*)
+	  lt_prog_compiler_pic='-KPIC'
+	  lt_prog_compiler_static='-Bstatic'
+	  lt_prog_compiler_wl='-Qoption ld '
+	  ;;
+	*Sun\ C*)
+	  # Sun C 5.9
+	  lt_prog_compiler_pic='-KPIC'
+	  lt_prog_compiler_static='-Bstatic'
+	  lt_prog_compiler_wl='-Wl,'
+	  ;;
+        *Intel*\ [CF]*Compiler*)
+	  lt_prog_compiler_wl='-Wl,'
+	  lt_prog_compiler_pic='-fPIC'
+	  lt_prog_compiler_static='-static'
+	  ;;
+	*Portland\ Group*)
+	  lt_prog_compiler_wl='-Wl,'
+	  lt_prog_compiler_pic='-fpic'
+	  lt_prog_compiler_static='-Bstatic'
+	  ;;
+	esac
+	;;
+      esac
+      ;;
+
+    newsos6)
+      lt_prog_compiler_pic='-KPIC'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      lt_prog_compiler_pic='-fPIC -shared'
+      ;;
+
+    osf3* | osf4* | osf5*)
+      lt_prog_compiler_wl='-Wl,'
+      # All OSF/1 code is PIC.
+      lt_prog_compiler_static='-non_shared'
+      ;;
+
+    rdos*)
+      lt_prog_compiler_static='-non_shared'
+      ;;
+
+    solaris*)
+      lt_prog_compiler_pic='-KPIC'
+      lt_prog_compiler_static='-Bstatic'
+      case $cc_basename in
+      f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
+	lt_prog_compiler_wl='-Qoption ld ';;
+      *)
+	lt_prog_compiler_wl='-Wl,';;
+      esac
+      ;;
+
+    sunos4*)
+      lt_prog_compiler_wl='-Qoption ld '
+      lt_prog_compiler_pic='-PIC'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    sysv4 | sysv4.2uw2* | sysv4.3*)
+      lt_prog_compiler_wl='-Wl,'
+      lt_prog_compiler_pic='-KPIC'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	lt_prog_compiler_pic='-Kconform_pic'
+	lt_prog_compiler_static='-Bstatic'
+      fi
+      ;;
+
+    sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+      lt_prog_compiler_wl='-Wl,'
+      lt_prog_compiler_pic='-KPIC'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    unicos*)
+      lt_prog_compiler_wl='-Wl,'
+      lt_prog_compiler_can_build_shared=no
+      ;;
+
+    uts4*)
+      lt_prog_compiler_pic='-pic'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    *)
+      lt_prog_compiler_can_build_shared=no
+      ;;
+    esac
+  fi
+
+case $host_os in
+  # For platforms that do not support PIC, -DPIC is meaningless:
+  *djgpp*)
+    lt_prog_compiler_pic=
+    ;;
+  *)
+    lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC"
+    ;;
+esac
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+printf %s "checking for $compiler option to produce PIC... " >&6; }
+if test ${lt_cv_prog_compiler_pic+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_pic=$lt_prog_compiler_pic
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5
+printf "%s\n" "$lt_cv_prog_compiler_pic" >&6; }
+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$lt_prog_compiler_pic"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5
+printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; }
+if test ${lt_cv_prog_compiler_pic_works+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_pic_works=no
+   ac_outfile=conftest.$ac_objext
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+   lt_compiler_flag="$lt_prog_compiler_pic -DPIC"  ## exclude from sc_useless_quotes_in_assignment
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   # The option is referenced via a variable to avoid confusing sed.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>conftest.err)
+   ac_status=$?
+   cat conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s "$ac_outfile"; then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings other than the usual output.
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_pic_works=yes
+     fi
+   fi
+   $RM conftest*
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5
+printf "%s\n" "$lt_cv_prog_compiler_pic_works" >&6; }
+
+if test yes = "$lt_cv_prog_compiler_pic_works"; then
+    case $lt_prog_compiler_pic in
+     "" | " "*) ;;
+     *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;;
+     esac
+else
+    lt_prog_compiler_pic=
+     lt_prog_compiler_can_build_shared=no
+fi
+
+fi
+
+
+
+
+
+
+
+
+
+
+
+#
+# Check to make sure the static flag actually works.
+#
+wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\"
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+if test ${lt_cv_prog_compiler_static_works+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_static_works=no
+   save_LDFLAGS=$LDFLAGS
+   LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+     # The linker can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     if test -s conftest.err; then
+       # Append any errors to the config.log.
+       cat conftest.err 1>&5
+       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         lt_cv_prog_compiler_static_works=yes
+       fi
+     else
+       lt_cv_prog_compiler_static_works=yes
+     fi
+   fi
+   $RM -r conftest*
+   LDFLAGS=$save_LDFLAGS
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5
+printf "%s\n" "$lt_cv_prog_compiler_static_works" >&6; }
+
+if test yes = "$lt_cv_prog_compiler_static_works"; then
+    :
+else
+    lt_prog_compiler_static=
+fi
+
+
+
+
+
+
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if test ${lt_cv_prog_compiler_c_o+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_c_o=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
+printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; }
+
+
+
+
+
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if test ${lt_cv_prog_compiler_c_o+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_c_o=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
+printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; }
+
+
+
+
+hard_links=nottested
+if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then
+  # do not overwrite the value of need_locks provided by the user
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
+printf %s "checking if we can lock with hard links... " >&6; }
+  hard_links=yes
+  $RM conftest*
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  touch conftest.a
+  ln conftest.a conftest.b 2>&5 || hard_links=no
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
+printf "%s\n" "$hard_links" >&6; }
+  if test no = "$hard_links"; then
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5
+printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;}
+    need_locks=warn
+  fi
+else
+  need_locks=no
+fi
+
+
+
+
+
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+
+  runpath_var=
+  allow_undefined_flag=
+  always_export_symbols=no
+  archive_cmds=
+  archive_expsym_cmds=
+  compiler_needs_object=no
+  enable_shared_with_static_runtimes=no
+  export_dynamic_flag_spec=
+  export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+  hardcode_automatic=no
+  hardcode_direct=no
+  hardcode_direct_absolute=no
+  hardcode_libdir_flag_spec=
+  hardcode_libdir_separator=
+  hardcode_minus_L=no
+  hardcode_shlibpath_var=unsupported
+  inherit_rpath=no
+  link_all_deplibs=unknown
+  module_cmds=
+  module_expsym_cmds=
+  old_archive_from_new_cmds=
+  old_archive_from_expsyms_cmds=
+  thread_safe_flag_spec=
+  whole_archive_flag_spec=
+  # include_expsyms should be a list of space-separated symbols to be *always*
+  # included in the symbol list
+  include_expsyms=
+  # exclude_expsyms can be an extended regexp of symbols to exclude
+  # it will be wrapped by ' (' and ')$', so one must not match beginning or
+  # end of line.  Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc',
+  # as well as any symbol that contains 'd'.
+  exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+  # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+  # platforms (ab)use it in PIC code, but their linkers get confused if
+  # the symbol is explicitly referenced.  Since portable code cannot
+  # rely on this symbol name, it's probably fine to never include it in
+  # preloaded symbol tables.
+  # Exclude shared library initialization/finalization symbols.
+  extract_expsyms_cmds=
+
+  case $host_os in
+  cygwin* | mingw* | pw32* | cegcc*)
+    # FIXME: the MSVC++ port hasn't been tested in a loooong time
+    # When not using gcc, we currently assume that we are using
+    # Microsoft Visual C++.
+    if test yes != "$GCC"; then
+      with_gnu_ld=no
+    fi
+    ;;
+  interix*)
+    # we just hope/assume this is gcc and not c89 (= MSVC++)
+    with_gnu_ld=yes
+    ;;
+  openbsd* | bitrig*)
+    with_gnu_ld=no
+    ;;
+  esac
+
+  ld_shlibs=yes
+
+  # On some targets, GNU ld is compatible enough with the native linker
+  # that we're better off using the native interface for both.
+  lt_use_gnu_ld_interface=no
+  if test yes = "$with_gnu_ld"; then
+    case $host_os in
+      aix*)
+	# The AIX port of GNU ld has always aspired to compatibility
+	# with the native linker.  However, as the warning in the GNU ld
+	# block says, versions before 2.19.5* couldn't really create working
+	# shared libraries, regardless of the interface used.
+	case `$LD -v 2>&1` in
+	  *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+	  *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;;
+	  *\ \(GNU\ Binutils\)\ [3-9]*) ;;
+	  *)
+	    lt_use_gnu_ld_interface=yes
+	    ;;
+	esac
+	;;
+      *)
+	lt_use_gnu_ld_interface=yes
+	;;
+    esac
+  fi
+
+  if test yes = "$lt_use_gnu_ld_interface"; then
+    # If archive_cmds runs LD, not CC, wlarc should be empty
+    wlarc='$wl'
+
+    # Set some defaults for GNU ld with shared library support. These
+    # are reset later if shared libraries are not supported. Putting them
+    # here allows them to be overridden if necessary.
+    runpath_var=LD_RUN_PATH
+    hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+    export_dynamic_flag_spec='$wl--export-dynamic'
+    # ancient GNU ld didn't support --whole-archive et. al.
+    if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+      whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
+    else
+      whole_archive_flag_spec=
+    fi
+    supports_anon_versioning=no
+    case `$LD -v | $SED -e 's/(^)\+)\s\+//' 2>&1` in
+      *GNU\ gold*) supports_anon_versioning=yes ;;
+      *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11
+      *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+      *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+      *\ 2.11.*) ;; # other 2.11 versions
+      *) supports_anon_versioning=yes ;;
+    esac
+
+    # See if GNU ld supports shared libraries.
+    case $host_os in
+    aix[3-9]*)
+      # On AIX/PPC, the GNU linker is very broken
+      if test ia64 != "$host_cpu"; then
+	ld_shlibs=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.19, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support.  If you
+*** really care for shared libraries, you may want to install binutils
+*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+*** You will then need to restart the configuration process.
+
+_LT_EOF
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+            archive_expsym_cmds=''
+        ;;
+      m68k)
+            archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            hardcode_libdir_flag_spec='-L$libdir'
+            hardcode_minus_L=yes
+        ;;
+      esac
+      ;;
+
+    beos*)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	allow_undefined_flag=unsupported
+	# Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+	# support --undefined.  This deserves some investigation.  FIXME
+	archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+      else
+	ld_shlibs=no
+      fi
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless,
+      # as there is no search path for DLLs.
+      hardcode_libdir_flag_spec='-L$libdir'
+      export_dynamic_flag_spec='$wl--export-all-symbols'
+      allow_undefined_flag=unsupported
+      always_export_symbols=no
+      enable_shared_with_static_runtimes=yes
+      export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+      exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
+
+      if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+        archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	# If the export-symbols file already is a .def file, use it as
+	# is; otherwise, prepend EXPORTS...
+	archive_expsym_cmds='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
+          cp $export_symbols $output_objdir/$soname.def;
+        else
+          echo EXPORTS > $output_objdir/$soname.def;
+          cat $export_symbols >> $output_objdir/$soname.def;
+        fi~
+        $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+      else
+	ld_shlibs=no
+      fi
+      ;;
+
+    haiku*)
+      archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+      link_all_deplibs=yes
+      ;;
+
+    os2*)
+      hardcode_libdir_flag_spec='-L$libdir'
+      hardcode_minus_L=yes
+      allow_undefined_flag=unsupported
+      shrext_cmds=.dll
+      archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	$ECHO EXPORTS >> $output_objdir/$libname.def~
+	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	emximp -o $lib $output_objdir/$libname.def'
+      archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	$ECHO EXPORTS >> $output_objdir/$libname.def~
+	prefix_cmds="$SED"~
+	if test EXPORTS = "`$SED 1q $export_symbols`"; then
+	  prefix_cmds="$prefix_cmds -e 1d";
+	fi~
+	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	emximp -o $lib $output_objdir/$libname.def'
+      old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+      enable_shared_with_static_runtimes=yes
+      ;;
+
+    interix[3-9]*)
+      hardcode_direct=no
+      hardcode_shlibpath_var=no
+      hardcode_libdir_flag_spec='$wl-rpath,$libdir'
+      export_dynamic_flag_spec='$wl-E'
+      # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+      # Instead, shared libraries are loaded at an image base (0x10000000 by
+      # default) and relocated if they conflict, which is a slow very memory
+      # consuming and fragmenting process.  To avoid this, we pick a random,
+      # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+      # time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+      archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      archive_expsym_cmds='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      ;;
+
+    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
+      tmp_diet=no
+      if test linux-dietlibc = "$host_os"; then
+	case $cc_basename in
+	  diet\ *) tmp_diet=yes;;	# linux-dietlibc with static linking (!diet-dyn)
+	esac
+      fi
+      if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+	 && test no = "$tmp_diet"
+      then
+	tmp_addflag=' $pic_flag'
+	tmp_sharedflag='-shared'
+	case $cc_basename,$host_cpu in
+        pgcc*)				# Portland Group C compiler
+	  whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	  tmp_addflag=' $pic_flag'
+	  ;;
+	pgf77* | pgf90* | pgf95* | pgfortran*)
+					# Portland Group f77 and f90 compilers
+	  whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	  tmp_addflag=' $pic_flag -Mnomain' ;;
+	ecc*,ia64* | icc*,ia64*)	# Intel C compiler on ia64
+	  tmp_addflag=' -i_dynamic' ;;
+	efc*,ia64* | ifort*,ia64*)	# Intel Fortran compiler on ia64
+	  tmp_addflag=' -i_dynamic -nofor_main' ;;
+	ifc* | ifort*)			# Intel Fortran compiler
+	  tmp_addflag=' -nofor_main' ;;
+	lf95*)				# Lahey Fortran 8.1
+	  whole_archive_flag_spec=
+	  tmp_sharedflag='--shared' ;;
+        nagfor*)                        # NAGFOR 5.3
+          tmp_sharedflag='-Wl,-shared' ;;
+	xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+	  tmp_sharedflag='-qmkshrobj'
+	  tmp_addflag= ;;
+	nvcc*)	# Cuda Compiler Driver 2.2
+	  whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	  compiler_needs_object=yes
+	  ;;
+	esac
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ C*)			# Sun C 5.9
+	  whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	  compiler_needs_object=yes
+	  tmp_sharedflag='-G' ;;
+	*Sun\ F*)			# Sun Fortran 8.3
+	  tmp_sharedflag='-G' ;;
+	esac
+	archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+
+        if test yes = "$supports_anon_versioning"; then
+          archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+            cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+            echo "local: *; };" >> $output_objdir/$libname.ver~
+            $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
+        fi
+
+	case $cc_basename in
+	tcc*)
+	  export_dynamic_flag_spec='-rdynamic'
+	  ;;
+	xlf* | bgf* | bgxlf* | mpixlf*)
+	  # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+	  whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive'
+	  hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+	  archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
+	  if test yes = "$supports_anon_versioning"; then
+	    archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+              cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+              echo "local: *; };" >> $output_objdir/$libname.ver~
+              $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+	  fi
+	  ;;
+	esac
+      else
+        ld_shlibs=no
+      fi
+      ;;
+
+    netbsd*)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+	wlarc=
+      else
+	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+      fi
+      ;;
+
+    solaris*)
+      if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+	ld_shlibs=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+      elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	ld_shlibs=no
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+      case `$LD -v 2>&1` in
+        *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*)
+	ld_shlibs=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot
+*** reliably create shared libraries on SCO systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.16.91.0.3 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+	;;
+	*)
+	  # For security reasons, it is highly recommended that you always
+	  # use absolute paths for naming shared libraries, and exclude the
+	  # DT_RUNPATH tag from executables and libraries.  But doing so
+	  # requires that you compile everything twice, which is a pain.
+	  if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	    hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+	    archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	    archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+	  else
+	    ld_shlibs=no
+	  fi
+	;;
+      esac
+      ;;
+
+    sunos4*)
+      archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      wlarc=
+      hardcode_direct=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    *)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	ld_shlibs=no
+      fi
+      ;;
+    esac
+
+    if test no = "$ld_shlibs"; then
+      runpath_var=
+      hardcode_libdir_flag_spec=
+      export_dynamic_flag_spec=
+      whole_archive_flag_spec=
+    fi
+  else
+    # PORTME fill in a description of your system's linker (not GNU ld)
+    case $host_os in
+    aix3*)
+      allow_undefined_flag=unsupported
+      always_export_symbols=yes
+      archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+      # Note: this linker hardcodes the directories in LIBPATH if there
+      # are no directories specified by -L.
+      hardcode_minus_L=yes
+      if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then
+	# Neither direct hardcoding nor static linking is supported with a
+	# broken collect2.
+	hardcode_direct=unsupported
+      fi
+      ;;
+
+    aix[4-9]*)
+      if test ia64 = "$host_cpu"; then
+	# On IA64, the linker does run time linking by default, so we don't
+	# have to do anything special.
+	aix_use_runtimelinking=no
+	exp_sym_flag='-Bexport'
+	no_entry_flag=
+      else
+	# If we're using GNU nm, then we don't want the "-C" option.
+	# -C means demangle to GNU nm, but means don't demangle to AIX nm.
+	# Without the "-l" option, or with the "-B" option, AIX nm treats
+	# weak defined symbols like other global defined symbols, whereas
+	# GNU nm marks them as "W".
+	# While the 'weak' keyword is ignored in the Export File, we need
+	# it in the Import File for the 'aix-soname' feature, so we have
+	# to replace the "-B" option with "-P" for AIX nm.
+	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+	  export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
+	else
+	  export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
+	fi
+	aix_use_runtimelinking=no
+
+	# Test if we are trying to use run time linking or normal
+	# AIX style linking. If -brtl is somewhere in LDFLAGS, we
+	# have runtime linking enabled, and use it for executables.
+	# For shared libraries, we enable/disable runtime linking
+	# depending on the kind of the shared library created -
+	# when "with_aix_soname,aix_use_runtimelinking" is:
+	# "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
+	# "aix,yes"  lib.so          shared, rtl:yes, for executables
+	#            lib.a           static archive
+	# "both,no"  lib.so.V(shr.o) shared, rtl:yes
+	#            lib.a(lib.so.V) shared, rtl:no,  for executables
+	# "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
+	#            lib.a(lib.so.V) shared, rtl:no
+	# "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
+	#            lib.a           static archive
+	case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+	  for ld_flag in $LDFLAGS; do
+	  if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then
+	    aix_use_runtimelinking=yes
+	    break
+	  fi
+	  done
+	  if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
+	    # With aix-soname=svr4, we create the lib.so.V shared archives only,
+	    # so we don't have lib.a shared libs to link our executables.
+	    # We have to force runtime linking in this case.
+	    aix_use_runtimelinking=yes
+	    LDFLAGS="$LDFLAGS -Wl,-brtl"
+	  fi
+	  ;;
+	esac
+
+	exp_sym_flag='-bexport'
+	no_entry_flag='-bnoentry'
+      fi
+
+      # When large executables or shared objects are built, AIX ld can
+      # have problems creating the table of contents.  If linking a library
+      # or program results in "error TOC overflow" add -mminimal-toc to
+      # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+      # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+      archive_cmds=''
+      hardcode_direct=yes
+      hardcode_direct_absolute=yes
+      hardcode_libdir_separator=':'
+      link_all_deplibs=yes
+      file_list_spec='$wl-f,'
+      case $with_aix_soname,$aix_use_runtimelinking in
+      aix,*) ;; # traditional, no import file
+      svr4,* | *,yes) # use import file
+	# The Import File defines what to hardcode.
+	hardcode_direct=no
+	hardcode_direct_absolute=no
+	;;
+      esac
+
+      if test yes = "$GCC"; then
+	case $host_os in aix4.[012]|aix4.[012].*)
+	# We only want to do this on AIX 4.2 and lower, the check
+	# below for broken collect2 doesn't work under 4.3+
+	  collect2name=`$CC -print-prog-name=collect2`
+	  if test -f "$collect2name" &&
+	   strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+	  then
+	  # We have reworked collect2
+	  :
+	  else
+	  # We have old collect2
+	  hardcode_direct=unsupported
+	  # It fails to find uninstalled libraries when the uninstalled
+	  # path is not listed in the libpath.  Setting hardcode_minus_L
+	  # to unsupported forces relinking
+	  hardcode_minus_L=yes
+	  hardcode_libdir_flag_spec='-L$libdir'
+	  hardcode_libdir_separator=
+	  fi
+	  ;;
+	esac
+	shared_flag='-shared'
+	if test yes = "$aix_use_runtimelinking"; then
+	  shared_flag="$shared_flag "'$wl-G'
+	fi
+	# Need to ensure runtime linking is disabled for the traditional
+	# shared library, or the linker may eventually find shared libraries
+	# /with/ Import File - we do not want to mix them.
+	shared_flag_aix='-shared'
+	shared_flag_svr4='-shared $wl-G'
+      else
+	# not using gcc
+	if test ia64 = "$host_cpu"; then
+	# VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+	# chokes on -Wl,-G. The following line is correct:
+	  shared_flag='-G'
+	else
+	  if test yes = "$aix_use_runtimelinking"; then
+	    shared_flag='$wl-G'
+	  else
+	    shared_flag='$wl-bM:SRE'
+	  fi
+	  shared_flag_aix='$wl-bM:SRE'
+	  shared_flag_svr4='$wl-G'
+	fi
+      fi
+
+      export_dynamic_flag_spec='$wl-bexpall'
+      # It seems that -bexpall does not export symbols beginning with
+      # underscore (_), so it is better to generate a list of symbols to export.
+      always_export_symbols=yes
+      if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
+	# Warning - without using the other runtime loading flags (-brtl),
+	# -berok will link without error, but may produce a broken library.
+	allow_undefined_flag='-berok'
+        # Determine the default libpath from the value encoded in an
+        # empty executable.
+        if test set = "${lt_cv_aix_libpath+set}"; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if test ${lt_cv_aix_libpath_+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath_"; then
+    lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath_"; then
+    lt_cv_aix_libpath_=/usr/lib:/lib
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath_
+fi
+
+        hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath"
+        archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
+      else
+	if test ia64 = "$host_cpu"; then
+	  hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib'
+	  allow_undefined_flag="-z nodefs"
+	  archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
+	else
+	 # Determine the default libpath from the value encoded in an
+	 # empty executable.
+	 if test set = "${lt_cv_aix_libpath+set}"; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if test ${lt_cv_aix_libpath_+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath_"; then
+    lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath_"; then
+    lt_cv_aix_libpath_=/usr/lib:/lib
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath_
+fi
+
+	 hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath"
+	  # Warning - without using the other run time loading flags,
+	  # -berok will link without error, but may produce a broken library.
+	  no_undefined_flag=' $wl-bernotok'
+	  allow_undefined_flag=' $wl-berok'
+	  if test yes = "$with_gnu_ld"; then
+	    # We only use this code for GNU lds that support --whole-archive.
+	    whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive'
+	  else
+	    # Exported symbols can be pulled into shared objects from archives
+	    whole_archive_flag_spec='$convenience'
+	  fi
+	  archive_cmds_need_lc=yes
+	  archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
+	  # -brtl affects multiple linker settings, -berok does not and is overridden later
+	  compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`'
+	  if test svr4 != "$with_aix_soname"; then
+	    # This is similar to how AIX traditionally builds its shared libraries.
+	    archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
+	  fi
+	  if test aix != "$with_aix_soname"; then
+	    archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
+	  else
+	    # used by -dlpreopen to get the symbols
+	    archive_expsym_cmds="$archive_expsym_cmds"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
+	  fi
+	  archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d'
+	fi
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+            archive_expsym_cmds=''
+        ;;
+      m68k)
+            archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            hardcode_libdir_flag_spec='-L$libdir'
+            hardcode_minus_L=yes
+        ;;
+      esac
+      ;;
+
+    bsdi[45]*)
+      export_dynamic_flag_spec=-rdynamic
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # When not using gcc, we currently assume that we are using
+      # Microsoft Visual C++.
+      # hardcode_libdir_flag_spec is actually meaningless, as there is
+      # no search path for DLLs.
+      case $cc_basename in
+      cl*)
+	# Native MSVC
+	hardcode_libdir_flag_spec=' '
+	allow_undefined_flag=unsupported
+	always_export_symbols=yes
+	file_list_spec='@'
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=.dll
+	# FIXME: Setting linknames here is a bad hack.
+	archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
+	archive_expsym_cmds='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
+            cp "$export_symbols" "$output_objdir/$soname.def";
+            echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
+          else
+            $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
+          fi~
+          $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+          linknames='
+	# The linker will not automatically build a static lib if we build a DLL.
+	# _LT_TAGVAR(old_archive_from_new_cmds, )='true'
+	enable_shared_with_static_runtimes=yes
+	exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+	export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
+	# Don't use ranlib
+	old_postinstall_cmds='chmod 644 $oldlib'
+	postlink_cmds='lt_outputfile="@OUTPUT@"~
+          lt_tool_outputfile="@TOOL_OUTPUT@"~
+          case $lt_outputfile in
+            *.exe|*.EXE) ;;
+            *)
+              lt_outputfile=$lt_outputfile.exe
+              lt_tool_outputfile=$lt_tool_outputfile.exe
+              ;;
+          esac~
+          if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
+            $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+            $RM "$lt_outputfile.manifest";
+          fi'
+	;;
+      *)
+	# Assume MSVC wrapper
+	hardcode_libdir_flag_spec=' '
+	allow_undefined_flag=unsupported
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=.dll
+	# FIXME: Setting linknames here is a bad hack.
+	archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+	# The linker will automatically build a .lib file if we build a DLL.
+	old_archive_from_new_cmds='true'
+	# FIXME: Should let the user specify the lib program.
+	old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs'
+	enable_shared_with_static_runtimes=yes
+	;;
+      esac
+      ;;
+
+    darwin* | rhapsody*)
+
+
+  archive_cmds_need_lc=no
+  hardcode_direct=no
+  hardcode_automatic=yes
+  hardcode_shlibpath_var=unsupported
+  if test yes = "$lt_cv_ld_force_load"; then
+    whole_archive_flag_spec='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+
+  else
+    whole_archive_flag_spec=''
+  fi
+  link_all_deplibs=yes
+  allow_undefined_flag=$_lt_dar_allow_undefined
+  case $cc_basename in
+     ifort*|nagfor*) _lt_dar_can_shared=yes ;;
+     *) _lt_dar_can_shared=$GCC ;;
+  esac
+  if test yes = "$_lt_dar_can_shared"; then
+    output_verbose_link_cmd=func_echo_all
+    archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil"
+    module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil"
+    archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil"
+    module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil"
+
+  else
+  ld_shlibs=no
+  fi
+
+      ;;
+
+    dgux*)
+      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_libdir_flag_spec='-L$libdir'
+      hardcode_shlibpath_var=no
+      ;;
+
+    # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+    # support.  Future versions do this automatically, but an explicit c++rt0.o
+    # does not break anything, and helps significantly (at the cost of a little
+    # extra space).
+    freebsd2.2*)
+      archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+      hardcode_libdir_flag_spec='-R$libdir'
+      hardcode_direct=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+    freebsd2.*)
+      archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_direct=yes
+      hardcode_minus_L=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+    freebsd* | dragonfly*)
+      archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+      hardcode_libdir_flag_spec='-R$libdir'
+      hardcode_direct=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    hpux9*)
+      if test yes = "$GCC"; then
+	archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+      else
+	archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+      fi
+      hardcode_libdir_flag_spec='$wl+b $wl$libdir'
+      hardcode_libdir_separator=:
+      hardcode_direct=yes
+
+      # hardcode_minus_L: Not really in the search PATH,
+      # but as the default location of the library.
+      hardcode_minus_L=yes
+      export_dynamic_flag_spec='$wl-E'
+      ;;
+
+    hpux10*)
+      if test yes,no = "$GCC,$with_gnu_ld"; then
+	archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      if test no = "$with_gnu_ld"; then
+	hardcode_libdir_flag_spec='$wl+b $wl$libdir'
+	hardcode_libdir_separator=:
+	hardcode_direct=yes
+	hardcode_direct_absolute=yes
+	export_dynamic_flag_spec='$wl-E'
+	# hardcode_minus_L: Not really in the search PATH,
+	# but as the default location of the library.
+	hardcode_minus_L=yes
+      fi
+      ;;
+
+    hpux11*)
+      if test yes,no = "$GCC,$with_gnu_ld"; then
+	case $host_cpu in
+	hppa*64*)
+	  archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	  archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	esac
+      else
+	case $host_cpu in
+	hppa*64*)
+	  archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+
+	  # Older versions of the 11.00 compiler do not understand -b yet
+	  # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+	  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5
+printf %s "checking if $CC understands -b... " >&6; }
+if test ${lt_cv_prog_compiler__b+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler__b=no
+   save_LDFLAGS=$LDFLAGS
+   LDFLAGS="$LDFLAGS -b"
+   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+     # The linker can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     if test -s conftest.err; then
+       # Append any errors to the config.log.
+       cat conftest.err 1>&5
+       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         lt_cv_prog_compiler__b=yes
+       fi
+     else
+       lt_cv_prog_compiler__b=yes
+     fi
+   fi
+   $RM -r conftest*
+   LDFLAGS=$save_LDFLAGS
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5
+printf "%s\n" "$lt_cv_prog_compiler__b" >&6; }
+
+if test yes = "$lt_cv_prog_compiler__b"; then
+    archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+else
+    archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+fi
+
+	  ;;
+	esac
+      fi
+      if test no = "$with_gnu_ld"; then
+	hardcode_libdir_flag_spec='$wl+b $wl$libdir'
+	hardcode_libdir_separator=:
+
+	case $host_cpu in
+	hppa*64*|ia64*)
+	  hardcode_direct=no
+	  hardcode_shlibpath_var=no
+	  ;;
+	*)
+	  hardcode_direct=yes
+	  hardcode_direct_absolute=yes
+	  export_dynamic_flag_spec='$wl-E'
+
+	  # hardcode_minus_L: Not really in the search PATH,
+	  # but as the default location of the library.
+	  hardcode_minus_L=yes
+	  ;;
+	esac
+      fi
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      if test yes = "$GCC"; then
+	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+	# Try to use the -exported_symbol ld option, if it does not
+	# work, assume that -exports_file does not work either and
+	# implicitly export all symbols.
+	# This should be the same for all languages, so no per-tag cache variable.
+	{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5
+printf %s "checking whether the $host_os linker accepts -exported_symbol... " >&6; }
+if test ${lt_cv_irix_exported_symbol+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  save_LDFLAGS=$LDFLAGS
+	   LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null"
+	   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+int foo (void) { return 0; }
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  lt_cv_irix_exported_symbol=yes
+else $as_nop
+  lt_cv_irix_exported_symbol=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+           LDFLAGS=$save_LDFLAGS
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5
+printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+	if test yes = "$lt_cv_irix_exported_symbol"; then
+          archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib'
+	fi
+      else
+	archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+	archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib'
+      fi
+      archive_cmds_need_lc='no'
+      hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+      hardcode_libdir_separator=:
+      inherit_rpath=yes
+      link_all_deplibs=yes
+      ;;
+
+    linux*)
+      case $cc_basename in
+      tcc*)
+	# Fabrice Bellard et al's Tiny C Compiler
+	ld_shlibs=yes
+	archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	;;
+      esac
+      ;;
+
+    netbsd*)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
+      else
+	archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags'      # ELF
+      fi
+      hardcode_libdir_flag_spec='-R$libdir'
+      hardcode_direct=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    newsos6)
+      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_direct=yes
+      hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+      hardcode_libdir_separator=:
+      hardcode_shlibpath_var=no
+      ;;
+
+    *nto* | *qnx*)
+      ;;
+
+    openbsd* | bitrig*)
+      if test -f /usr/libexec/ld.so; then
+	hardcode_direct=yes
+	hardcode_shlibpath_var=no
+	hardcode_direct_absolute=yes
+	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+	  archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols'
+	  hardcode_libdir_flag_spec='$wl-rpath,$libdir'
+	  export_dynamic_flag_spec='$wl-E'
+	else
+	  archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	  hardcode_libdir_flag_spec='$wl-rpath,$libdir'
+	fi
+      else
+	ld_shlibs=no
+      fi
+      ;;
+
+    os2*)
+      hardcode_libdir_flag_spec='-L$libdir'
+      hardcode_minus_L=yes
+      allow_undefined_flag=unsupported
+      shrext_cmds=.dll
+      archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	$ECHO EXPORTS >> $output_objdir/$libname.def~
+	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	emximp -o $lib $output_objdir/$libname.def'
+      archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	$ECHO EXPORTS >> $output_objdir/$libname.def~
+	prefix_cmds="$SED"~
+	if test EXPORTS = "`$SED 1q $export_symbols`"; then
+	  prefix_cmds="$prefix_cmds -e 1d";
+	fi~
+	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	emximp -o $lib $output_objdir/$libname.def'
+      old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+      enable_shared_with_static_runtimes=yes
+      ;;
+
+    osf3*)
+      if test yes = "$GCC"; then
+	allow_undefined_flag=' $wl-expect_unresolved $wl\*'
+	archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+      else
+	allow_undefined_flag=' -expect_unresolved \*'
+	archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+      fi
+      archive_cmds_need_lc='no'
+      hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+      hardcode_libdir_separator=:
+      ;;
+
+    osf4* | osf5*)	# as osf3* with the addition of -msym flag
+      if test yes = "$GCC"; then
+	allow_undefined_flag=' $wl-expect_unresolved $wl\*'
+	archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+	hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+      else
+	allow_undefined_flag=' -expect_unresolved \*'
+	archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+	archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+          $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp'
+
+	# Both c and cxx compiler support -rpath directly
+	hardcode_libdir_flag_spec='-rpath $libdir'
+      fi
+      archive_cmds_need_lc='no'
+      hardcode_libdir_separator=:
+      ;;
+
+    solaris*)
+      no_undefined_flag=' -z defs'
+      if test yes = "$GCC"; then
+	wlarc='$wl'
+	archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+          $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+      else
+	case `$CC -V 2>&1` in
+	*"Compilers 5.0"*)
+	  wlarc=''
+	  archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+            $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+	  ;;
+	*)
+	  wlarc='$wl'
+	  archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+            $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+	  ;;
+	esac
+      fi
+      hardcode_libdir_flag_spec='-R$libdir'
+      hardcode_shlibpath_var=no
+      case $host_os in
+      solaris2.[0-5] | solaris2.[0-5].*) ;;
+      *)
+	# The compiler driver will combine and reorder linker options,
+	# but understands '-z linker_flag'.  GCC discards it without '$wl',
+	# but is careful enough not to reorder.
+	# Supported since Solaris 2.6 (maybe 2.5.1?)
+	if test yes = "$GCC"; then
+	  whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
+	else
+	  whole_archive_flag_spec='-z allextract$convenience -z defaultextract'
+	fi
+	;;
+      esac
+      link_all_deplibs=yes
+      ;;
+
+    sunos4*)
+      if test sequent = "$host_vendor"; then
+	# Use $CC to link under sequent, because it throws in some extra .o
+	# files that make .init and .fini sections work.
+	archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      hardcode_libdir_flag_spec='-L$libdir'
+      hardcode_direct=yes
+      hardcode_minus_L=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    sysv4)
+      case $host_vendor in
+	sni)
+	  archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  hardcode_direct=yes # is this really true???
+	;;
+	siemens)
+	  ## LD is ld it makes a PLAMLIB
+	  ## CC just makes a GrossModule.
+	  archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+	  reload_cmds='$CC -r -o $output$reload_objs'
+	  hardcode_direct=no
+        ;;
+	motorola)
+	  archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+	;;
+      esac
+      runpath_var='LD_RUN_PATH'
+      hardcode_shlibpath_var=no
+      ;;
+
+    sysv4.3*)
+      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_shlibpath_var=no
+      export_dynamic_flag_spec='-Bexport'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	hardcode_shlibpath_var=no
+	runpath_var=LD_RUN_PATH
+	hardcode_runpath_var=yes
+	ld_shlibs=yes
+      fi
+      ;;
+
+    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+      no_undefined_flag='$wl-z,text'
+      archive_cmds_need_lc=no
+      hardcode_shlibpath_var=no
+      runpath_var='LD_RUN_PATH'
+
+      if test yes = "$GCC"; then
+	archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6*)
+      # Note: We CANNOT use -z defs as we might desire, because we do not
+      # link with -lc, and that would cause any symbols used from libc to
+      # always be unresolved, which means just about no library would
+      # ever link correctly.  If we're not using GNU ld we use -z text
+      # though, which does catch some bad symbols but isn't as heavy-handed
+      # as -z defs.
+      no_undefined_flag='$wl-z,text'
+      allow_undefined_flag='$wl-z,nodefs'
+      archive_cmds_need_lc=no
+      hardcode_shlibpath_var=no
+      hardcode_libdir_flag_spec='$wl-R,$libdir'
+      hardcode_libdir_separator=':'
+      link_all_deplibs=yes
+      export_dynamic_flag_spec='$wl-Bexport'
+      runpath_var='LD_RUN_PATH'
+
+      if test yes = "$GCC"; then
+	archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    uts4*)
+      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_libdir_flag_spec='-L$libdir'
+      hardcode_shlibpath_var=no
+      ;;
+
+    *)
+      ld_shlibs=no
+      ;;
+    esac
+
+    if test sni = "$host_vendor"; then
+      case $host in
+      sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+	export_dynamic_flag_spec='$wl-Blargedynsym'
+	;;
+      esac
+    fi
+  fi
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5
+printf "%s\n" "$ld_shlibs" >&6; }
+test no = "$ld_shlibs" && can_build_shared=no
+
+with_gnu_ld=$with_gnu_ld
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$archive_cmds_need_lc" in
+x|xyes)
+  # Assume -lc should be added
+  archive_cmds_need_lc=yes
+
+  if test yes,yes = "$GCC,$enable_shared"; then
+    case $archive_cmds in
+    *'~'*)
+      # FIXME: we may have to deal with multi-command sequences.
+      ;;
+    '$CC '*)
+      # Test whether the compiler implicitly links with -lc since on some
+      # systems, -lgcc has to come before -lc. If gcc already passes -lc
+      # to ld, don't add -lc before -lgcc.
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
+printf %s "checking whether -lc should be explicitly linked in... " >&6; }
+if test ${lt_cv_archive_cmds_need_lc+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  $RM conftest*
+	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+	if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } 2>conftest.err; then
+	  soname=conftest
+	  lib=conftest
+	  libobjs=conftest.$ac_objext
+	  deplibs=
+	  wl=$lt_prog_compiler_wl
+	  pic_flag=$lt_prog_compiler_pic
+	  compiler_flags=-v
+	  linker_flags=-v
+	  verstring=
+	  output_objdir=.
+	  libname=conftest
+	  lt_save_allow_undefined_flag=$allow_undefined_flag
+	  allow_undefined_flag=
+	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+  (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+	  then
+	    lt_cv_archive_cmds_need_lc=no
+	  else
+	    lt_cv_archive_cmds_need_lc=yes
+	  fi
+	  allow_undefined_flag=$lt_save_allow_undefined_flag
+	else
+	  cat conftest.err 1>&5
+	fi
+	$RM conftest*
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5
+printf "%s\n" "$lt_cv_archive_cmds_need_lc" >&6; }
+      archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc
+      ;;
+    esac
+  fi
+  ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
+printf %s "checking dynamic linker characteristics... " >&6; }
+
+if test yes = "$GCC"; then
+  case $host_os in
+    darwin*) lt_awk_arg='/^libraries:/,/LR/' ;;
+    *) lt_awk_arg='/^libraries:/' ;;
+  esac
+  case $host_os in
+    mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;;
+    *) lt_sed_strip_eq='s|=/|/|g' ;;
+  esac
+  lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+  case $lt_search_path_spec in
+  *\;*)
+    # if the path contains ";" then we assume it to be the separator
+    # otherwise default to the standard path separator (i.e. ":") - it is
+    # assumed that no part of a normal pathname contains ";" but that should
+    # okay in the real world where ";" in dirpaths is itself problematic.
+    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
+    ;;
+  *)
+    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
+    ;;
+  esac
+  # Ok, now we have the path, separated by spaces, we can step through it
+  # and add multilib dir if necessary...
+  lt_tmp_lt_search_path_spec=
+  lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+  # ...but if some path component already ends with the multilib dir we assume
+  # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer).
+  case "$lt_multi_os_dir; $lt_search_path_spec " in
+  "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*)
+    lt_multi_os_dir=
+    ;;
+  esac
+  for lt_sys_path in $lt_search_path_spec; do
+    if test -d "$lt_sys_path$lt_multi_os_dir"; then
+      lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir"
+    elif test -n "$lt_multi_os_dir"; then
+      test -d "$lt_sys_path" && \
+	lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+    fi
+  done
+  lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
+BEGIN {RS = " "; FS = "/|\n";} {
+  lt_foo = "";
+  lt_count = 0;
+  for (lt_i = NF; lt_i > 0; lt_i--) {
+    if ($lt_i != "" && $lt_i != ".") {
+      if ($lt_i == "..") {
+        lt_count++;
+      } else {
+        if (lt_count == 0) {
+          lt_foo = "/" $lt_i lt_foo;
+        } else {
+          lt_count--;
+        }
+      }
+    }
+  }
+  if (lt_foo != "") { lt_freq[lt_foo]++; }
+  if (lt_freq[lt_foo] == 1) { print lt_foo; }
+}'`
+  # AWK program above erroneously prepends '/' to C:/dos/paths
+  # for these hosts.
+  case $host_os in
+    mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+      $SED 's|/\([A-Za-z]:\)|\1|g'` ;;
+  esac
+  sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
+else
+  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+fi
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=.so
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+
+
+case $host_os in
+aix3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname.a'
+  shlibpath_var=LIBPATH
+
+  # AIX 3 has no versioning support, so we append a major version to the name.
+  soname_spec='$libname$release$shared_ext$major'
+  ;;
+
+aix[4-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  hardcode_into_libs=yes
+  if test ia64 = "$host_cpu"; then
+    # AIX 5 supports IA64
+    library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext'
+    shlibpath_var=LD_LIBRARY_PATH
+  else
+    # With GCC up to 2.95.x, collect2 would create an import file
+    # for dependence libraries.  The import file would start with
+    # the line '#! .'.  This would cause the generated library to
+    # depend on '.', always an invalid library.  This was fixed in
+    # development snapshots of GCC prior to 3.0.
+    case $host_os in
+      aix4 | aix4.[01] | aix4.[01].*)
+      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+	   echo ' yes '
+	   echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then
+	:
+      else
+	can_build_shared=no
+      fi
+      ;;
+    esac
+    # Using Import Files as archive members, it is possible to support
+    # filename-based versioning of shared library archives on AIX. While
+    # this would work for both with and without runtime linking, it will
+    # prevent static linking of such archives. So we do filename-based
+    # shared library versioning with .so extension only, which is used
+    # when both runtime linking and shared linking is enabled.
+    # Unfortunately, runtime linking may impact performance, so we do
+    # not want this to be the default eventually. Also, we use the
+    # versioned .so libs for executables only if there is the -brtl
+    # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only.
+    # To allow for filename-based versioning support, we need to create
+    # libNAME.so.V as an archive file, containing:
+    # *) an Import File, referring to the versioned filename of the
+    #    archive as well as the shared archive member, telling the
+    #    bitwidth (32 or 64) of that shared object, and providing the
+    #    list of exported symbols of that shared object, eventually
+    #    decorated with the 'weak' keyword
+    # *) the shared object with the F_LOADONLY flag set, to really avoid
+    #    it being seen by the linker.
+    # At run time we better use the real file rather than another symlink,
+    # but for link time we create the symlink libNAME.so -> libNAME.so.V
+
+    case $with_aix_soname,$aix_use_runtimelinking in
+    # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct
+    # soname into executable. Probably we can add versioning support to
+    # collect2, so additional links can be useful in future.
+    aix,yes) # traditional libtool
+      dynamic_linker='AIX unversionable lib.so'
+      # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+      # instead of lib<name>.a to let people know that these are not
+      # typical AIX shared libraries.
+      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+      ;;
+    aix,no) # traditional AIX only
+      dynamic_linker='AIX lib.a(lib.so.V)'
+      # We preserve .a as extension for shared libraries through AIX4.2
+      # and later when we are not doing run time linking.
+      library_names_spec='$libname$release.a $libname.a'
+      soname_spec='$libname$release$shared_ext$major'
+      ;;
+    svr4,*) # full svr4 only
+      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)"
+      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+      # We do not specify a path in Import Files, so LIBPATH fires.
+      shlibpath_overrides_runpath=yes
+      ;;
+    *,yes) # both, prefer svr4
+      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)"
+      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+      # unpreferred sharedlib libNAME.a needs extra handling
+      postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"'
+      postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"'
+      # We do not specify a path in Import Files, so LIBPATH fires.
+      shlibpath_overrides_runpath=yes
+      ;;
+    *,no) # both, prefer aix
+      dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)"
+      library_names_spec='$libname$release.a $libname.a'
+      soname_spec='$libname$release$shared_ext$major'
+      # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling
+      postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)'
+      postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"'
+      ;;
+    esac
+    shlibpath_var=LIBPATH
+  fi
+  ;;
+
+amigaos*)
+  case $host_cpu in
+  powerpc)
+    # Since July 2007 AmigaOS4 officially supports .so libraries.
+    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    ;;
+  m68k)
+    library_names_spec='$libname.ixlibrary $libname.a'
+    # Create ${libname}_ixlibrary.a entries in /sys/libs.
+    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+    ;;
+  esac
+  ;;
+
+beos*)
+  library_names_spec='$libname$shared_ext'
+  dynamic_linker="$host_os ld.so"
+  shlibpath_var=LIBRARY_PATH
+  ;;
+
+bsdi[45]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+  # the default ld.so.conf also contains /usr/contrib/lib and
+  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+  # libtool to hard-code these into programs
+  ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+  version_type=windows
+  shrext_cmds=.dll
+  need_version=no
+  need_lib_prefix=no
+
+  case $GCC,$cc_basename in
+  yes,*)
+    # gcc
+    library_names_spec='$libname.dll.a'
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \$file`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname~
+      chmod a+x \$dldir/$dlname~
+      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+      fi'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+
+    case $host_os in
+    cygwin*)
+      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+      soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+
+      sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"
+      ;;
+    mingw* | cegcc*)
+      # MinGW DLLs use traditional 'lib' prefix
+      soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+      ;;
+    pw32*)
+      # pw32 DLLs use 'pw' prefix rather than 'lib'
+      library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+      ;;
+    esac
+    dynamic_linker='Win32 ld.exe'
+    ;;
+
+  *,cl*)
+    # Native MSVC
+    libname_spec='$name'
+    soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+    library_names_spec='$libname.dll.lib'
+
+    case $build_os in
+    mingw*)
+      sys_lib_search_path_spec=
+      lt_save_ifs=$IFS
+      IFS=';'
+      for lt_path in $LIB
+      do
+        IFS=$lt_save_ifs
+        # Let DOS variable expansion print the short 8.3 style file name.
+        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+      done
+      IFS=$lt_save_ifs
+      # Convert to MSYS style.
+      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+      ;;
+    cygwin*)
+      # Convert to unix form, then to dos form, then back to unix form
+      # but this time dos style (no spaces!) so that the unix form looks
+      # like /cygdrive/c/PROGRA~1:/cygdr...
+      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      ;;
+    *)
+      sys_lib_search_path_spec=$LIB
+      if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+        # It is most probably a Windows format PATH.
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+      else
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      fi
+      # FIXME: find the short name or the path components, as spaces are
+      # common. (e.g. "Program Files" -> "PROGRA~1")
+      ;;
+    esac
+
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \$file`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+    dynamic_linker='Win32 link.exe'
+    ;;
+
+  *)
+    # Assume MSVC wrapper
+    library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib'
+    dynamic_linker='Win32 ld.exe'
+    ;;
+  esac
+  # FIXME: first we should search . and the directory the executable is in
+  shlibpath_var=PATH
+  ;;
+
+darwin* | rhapsody*)
+  dynamic_linker="$host_os dyld"
+  version_type=darwin
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$major$shared_ext $libname$shared_ext'
+  soname_spec='$libname$release$major$shared_ext'
+  shlibpath_overrides_runpath=yes
+  shlibpath_var=DYLD_LIBRARY_PATH
+  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+
+  sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"
+  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+  ;;
+
+dgux*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+freebsd* | dragonfly*)
+  # DragonFly does not have aout.  When/if they implement a new
+  # versioning mechanism, adjust this.
+  if test -x /usr/bin/objformat; then
+    objformat=`/usr/bin/objformat`
+  else
+    case $host_os in
+    freebsd[23].*) objformat=aout ;;
+    *) objformat=elf ;;
+    esac
+  fi
+  version_type=freebsd-$objformat
+  case $version_type in
+    freebsd-elf*)
+      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+      soname_spec='$libname$release$shared_ext$major'
+      need_version=no
+      need_lib_prefix=no
+      ;;
+    freebsd-*)
+      library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+      need_version=yes
+      ;;
+  esac
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_os in
+  freebsd2.*)
+    shlibpath_overrides_runpath=yes
+    ;;
+  freebsd3.[01]* | freebsdelf3.[01]*)
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
+  freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
+    shlibpath_overrides_runpath=no
+    hardcode_into_libs=yes
+    ;;
+  *) # from 4.6 on, and DragonFly
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  esac
+  ;;
+
+haiku*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  dynamic_linker="$host_os runtime_loader"
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+  hardcode_into_libs=yes
+  ;;
+
+hpux9* | hpux10* | hpux11*)
+  # Give a soname corresponding to the major version so that dld.sl refuses to
+  # link against other versions.
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  case $host_cpu in
+  ia64*)
+    shrext_cmds='.so'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.so"
+    shlibpath_var=LD_LIBRARY_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    if test 32 = "$HPUX_IA64_MODE"; then
+      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+      sys_lib_dlsearch_path_spec=/usr/lib/hpux32
+    else
+      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+      sys_lib_dlsearch_path_spec=/usr/lib/hpux64
+    fi
+    ;;
+  hppa*64*)
+    shrext_cmds='.sl'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  *)
+    shrext_cmds='.sl'
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=SHLIB_PATH
+    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    ;;
+  esac
+  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+  postinstall_cmds='chmod 555 $lib'
+  # or fails outright, so override atomically:
+  install_override_mode=555
+  ;;
+
+interix[3-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $host_os in
+    nonstopux*) version_type=nonstopux ;;
+    *)
+	if test yes = "$lt_cv_prog_gnu_ld"; then
+		version_type=linux # correct to gnu/linux during the next big refactor
+	else
+		version_type=irix
+	fi ;;
+  esac
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='$libname$release$shared_ext$major'
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext'
+  case $host_os in
+  irix5* | nonstopux*)
+    libsuff= shlibsuff=
+    ;;
+  *)
+    case $LD in # libtool.m4 will add one of these switches to LD
+    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+      libsuff= shlibsuff= libmagic=32-bit;;
+    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+      libsuff=32 shlibsuff=N32 libmagic=N32;;
+    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+      libsuff=64 shlibsuff=64 libmagic=64-bit;;
+    *) libsuff= shlibsuff= libmagic=never-match;;
+    esac
+    ;;
+  esac
+  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff"
+  sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff"
+  hardcode_into_libs=yes
+  ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+  dynamic_linker=no
+  ;;
+
+linux*android*)
+  version_type=none # Android doesn't support versioned libraries.
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext'
+  soname_spec='$libname$release$shared_ext'
+  finish_cmds=
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  dynamic_linker='Android linker'
+  # Don't embed -rpath directories since the linker doesn't support them.
+  hardcode_libdir_flag_spec='-L$libdir'
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+
+  # Some binutils ld are patched to set DT_RUNPATH
+  if test ${lt_cv_shlibpath_overrides_runpath+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_shlibpath_overrides_runpath=no
+    save_LDFLAGS=$LDFLAGS
+    save_libdir=$libdir
+    eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \
+	 LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\""
+    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null
+then :
+  lt_cv_shlibpath_overrides_runpath=yes
+fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+    LDFLAGS=$save_LDFLAGS
+    libdir=$save_libdir
+
+fi
+
+  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  # Ideally, we could use ldconfig to report *all* directores which are
+  # searched for libraries, however this is still not possible.  Aside from not
+  # being certain /sbin/ldconfig is available, command
+  # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64,
+  # even though it is searched at run-time.  Try to do the best guess by
+  # appending ld.so.conf contents (and includes) to the search path.
+  if test -f /etc/ld.so.conf; then
+    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+    sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+  fi
+
+  # We used to test for /lib/ld.so.1 and disable shared libraries on
+  # powerpc, because MkLinux only supported shared libraries with the
+  # GNU dynamic linker.  Since this was broken with cross compilers,
+  # most powerpc-linux boxes support dynamic linking these days and
+  # people can always --disable-shared, the test was removed, and we
+  # assume the GNU/Linux dynamic linker is in use.
+  dynamic_linker='GNU/Linux ld.so'
+  ;;
+
+netbsd*)
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+    dynamic_linker='NetBSD (a.out) ld.so'
+  else
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    dynamic_linker='NetBSD ld.elf_so'
+  fi
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  ;;
+
+newsos6)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+*nto* | *qnx*)
+  version_type=qnx
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='ldqnx.so'
+  ;;
+
+openbsd* | bitrig*)
+  version_type=sunos
+  sys_lib_dlsearch_path_spec=/usr/lib
+  need_lib_prefix=no
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+    need_version=no
+  else
+    need_version=yes
+  fi
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+os2*)
+  libname_spec='$name'
+  version_type=windows
+  shrext_cmds=.dll
+  need_version=no
+  need_lib_prefix=no
+  # OS/2 can only load a DLL with a base name of 8 characters or less.
+  soname_spec='`test -n "$os2dllname" && libname="$os2dllname";
+    v=$($ECHO $release$versuffix | tr -d .-);
+    n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _);
+    $ECHO $n$v`$shared_ext'
+  library_names_spec='${libname}_dll.$libext'
+  dynamic_linker='OS/2 ld.exe'
+  shlibpath_var=BEGINLIBPATH
+  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+  postinstall_cmds='base_file=`basename \$file`~
+    dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~
+    dldir=$destdir/`dirname \$dlpath`~
+    test -d \$dldir || mkdir -p \$dldir~
+    $install_prog $dir/$dlname \$dldir/$dlname~
+    chmod a+x \$dldir/$dlname~
+    if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+      eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+    fi'
+  postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~
+    dlpath=$dir/\$dldll~
+    $RM \$dlpath'
+  ;;
+
+osf3* | osf4* | osf5*)
+  version_type=osf
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='$libname$release$shared_ext$major'
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+  ;;
+
+rdos*)
+  dynamic_linker=no
+  ;;
+
+solaris*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  # ldd complains unless libraries are executable
+  postinstall_cmds='chmod +x $lib'
+  ;;
+
+sunos4*)
+  version_type=sunos
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  if test yes = "$with_gnu_ld"; then
+    need_lib_prefix=no
+  fi
+  need_version=yes
+  ;;
+
+sysv4 | sysv4.3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_vendor in
+    sni)
+      shlibpath_overrides_runpath=no
+      need_lib_prefix=no
+      runpath_var=LD_RUN_PATH
+      ;;
+    siemens)
+      need_lib_prefix=no
+      ;;
+    motorola)
+      need_lib_prefix=no
+      need_version=no
+      shlibpath_overrides_runpath=no
+      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+      ;;
+  esac
+  ;;
+
+sysv4*MP*)
+  if test -d /usr/nec; then
+    version_type=linux # correct to gnu/linux during the next big refactor
+    library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext'
+    soname_spec='$libname$shared_ext.$major'
+    shlibpath_var=LD_LIBRARY_PATH
+  fi
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  version_type=sco
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  if test yes = "$with_gnu_ld"; then
+    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+  else
+    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+    case $host_os in
+      sco3.2v5*)
+        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+	;;
+    esac
+  fi
+  sys_lib_dlsearch_path_spec='/usr/lib'
+  ;;
+
+tpf*)
+  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+uts4*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+*)
+  dynamic_linker=no
+  ;;
+esac
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
+printf "%s\n" "$dynamic_linker" >&6; }
+test no = "$dynamic_linker" && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test yes = "$GCC"; then
+  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then
+  sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec
+fi
+
+if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then
+  sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec
+fi
+
+# remember unaugmented sys_lib_dlsearch_path content for libtool script decls...
+configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec
+
+# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code
+func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH"
+
+# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool
+configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
+printf %s "checking how to hardcode library paths into programs... " >&6; }
+hardcode_action=
+if test -n "$hardcode_libdir_flag_spec" ||
+   test -n "$runpath_var" ||
+   test yes = "$hardcode_automatic"; then
+
+  # We can hardcode non-existent directories.
+  if test no != "$hardcode_direct" &&
+     # If the only mechanism to avoid hardcoding is shlibpath_var, we
+     # have to relink, otherwise we might link with an installed library
+     # when we should be linking with a yet-to-be-installed one
+     ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" &&
+     test no != "$hardcode_minus_L"; then
+    # Linking always hardcodes the temporary library directory.
+    hardcode_action=relink
+  else
+    # We can link without hardcoding, and we can hardcode nonexisting dirs.
+    hardcode_action=immediate
+  fi
+else
+  # We cannot hardcode anything, or else we can only hardcode existing
+  # directories.
+  hardcode_action=unsupported
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5
+printf "%s\n" "$hardcode_action" >&6; }
+
+if test relink = "$hardcode_action" ||
+   test yes = "$inherit_rpath"; then
+  # Fast installation is not supported
+  enable_fast_install=no
+elif test yes = "$shlibpath_overrides_runpath" ||
+     test no = "$enable_shared"; then
+  # Fast installation is not necessary
+  enable_fast_install=needless
+fi
+
+
+
+
+
+
+  if test yes != "$enable_dlopen"; then
+  enable_dlopen=unknown
+  enable_dlopen_self=unknown
+  enable_dlopen_self_static=unknown
+else
+  lt_cv_dlopen=no
+  lt_cv_dlopen_libs=
+
+  case $host_os in
+  beos*)
+    lt_cv_dlopen=load_add_on
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=yes
+    ;;
+
+  mingw* | pw32* | cegcc*)
+    lt_cv_dlopen=LoadLibrary
+    lt_cv_dlopen_libs=
+    ;;
+
+  cygwin*)
+    lt_cv_dlopen=dlopen
+    lt_cv_dlopen_libs=
+    ;;
+
+  darwin*)
+    # if libdl is installed we need to link against it
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
+printf %s "checking for dlopen in -ldl... " >&6; }
+if test ${ac_cv_lib_dl_dlopen+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+char dlopen ();
+int
+main (void)
+{
+return dlopen ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  ac_cv_lib_dl_dlopen=yes
+else $as_nop
+  ac_cv_lib_dl_dlopen=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
+printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; }
+if test "x$ac_cv_lib_dl_dlopen" = xyes
+then :
+  lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl
+else $as_nop
+
+    lt_cv_dlopen=dyld
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=yes
+
+fi
+
+    ;;
+
+  tpf*)
+    # Don't try to run any link tests for TPF.  We know it's impossible
+    # because TPF is a cross-compiler, and we know how we open DSOs.
+    lt_cv_dlopen=dlopen
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=no
+    ;;
+
+  *)
+    ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load"
+if test "x$ac_cv_func_shl_load" = xyes
+then :
+  lt_cv_dlopen=shl_load
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5
+printf %s "checking for shl_load in -ldld... " >&6; }
+if test ${ac_cv_lib_dld_shl_load+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+char shl_load ();
+int
+main (void)
+{
+return shl_load ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  ac_cv_lib_dld_shl_load=yes
+else $as_nop
+  ac_cv_lib_dld_shl_load=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5
+printf "%s\n" "$ac_cv_lib_dld_shl_load" >&6; }
+if test "x$ac_cv_lib_dld_shl_load" = xyes
+then :
+  lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld
+else $as_nop
+  ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen"
+if test "x$ac_cv_func_dlopen" = xyes
+then :
+  lt_cv_dlopen=dlopen
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
+printf %s "checking for dlopen in -ldl... " >&6; }
+if test ${ac_cv_lib_dl_dlopen+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+char dlopen ();
+int
+main (void)
+{
+return dlopen ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  ac_cv_lib_dl_dlopen=yes
+else $as_nop
+  ac_cv_lib_dl_dlopen=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
+printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; }
+if test "x$ac_cv_lib_dl_dlopen" = xyes
+then :
+  lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5
+printf %s "checking for dlopen in -lsvld... " >&6; }
+if test ${ac_cv_lib_svld_dlopen+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-lsvld  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+char dlopen ();
+int
+main (void)
+{
+return dlopen ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  ac_cv_lib_svld_dlopen=yes
+else $as_nop
+  ac_cv_lib_svld_dlopen=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5
+printf "%s\n" "$ac_cv_lib_svld_dlopen" >&6; }
+if test "x$ac_cv_lib_svld_dlopen" = xyes
+then :
+  lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld
+else $as_nop
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5
+printf %s "checking for dld_link in -ldld... " >&6; }
+if test ${ac_cv_lib_dld_dld_link+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+char dld_link ();
+int
+main (void)
+{
+return dld_link ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"
+then :
+  ac_cv_lib_dld_dld_link=yes
+else $as_nop
+  ac_cv_lib_dld_dld_link=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5
+printf "%s\n" "$ac_cv_lib_dld_dld_link" >&6; }
+if test "x$ac_cv_lib_dld_dld_link" = xyes
+then :
+  lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+    ;;
+  esac
+
+  if test no = "$lt_cv_dlopen"; then
+    enable_dlopen=no
+  else
+    enable_dlopen=yes
+  fi
+
+  case $lt_cv_dlopen in
+  dlopen)
+    save_CPPFLAGS=$CPPFLAGS
+    test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+    save_LDFLAGS=$LDFLAGS
+    wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+    save_LIBS=$LIBS
+    LIBS="$lt_cv_dlopen_libs $LIBS"
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5
+printf %s "checking whether a program can dlopen itself... " >&6; }
+if test ${lt_cv_dlopen_self+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  	  if test yes = "$cross_compiling"; then :
+  lt_cv_dlopen_self=cross
+else
+  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+  lt_status=$lt_dlunknown
+  cat > conftest.$ac_ext <<_LT_EOF
+#line $LINENO "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+#  define LT_DLGLOBAL		RTLD_GLOBAL
+#else
+#  ifdef DL_GLOBAL
+#    define LT_DLGLOBAL		DL_GLOBAL
+#  else
+#    define LT_DLGLOBAL		0
+#  endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+   find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+#  ifdef RTLD_LAZY
+#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
+#  else
+#    ifdef DL_LAZY
+#      define LT_DLLAZY_OR_NOW		DL_LAZY
+#    else
+#      ifdef RTLD_NOW
+#        define LT_DLLAZY_OR_NOW	RTLD_NOW
+#      else
+#        ifdef DL_NOW
+#          define LT_DLLAZY_OR_NOW	DL_NOW
+#        else
+#          define LT_DLLAZY_OR_NOW	0
+#        endif
+#      endif
+#    endif
+#  endif
+#endif
+
+/* When -fvisibility=hidden is used, assume the code has been annotated
+   correspondingly for the symbols needed.  */
+#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
+int main ()
+{
+  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+  int status = $lt_dlunknown;
+
+  if (self)
+    {
+      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
+      else
+        {
+	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
+          else puts (dlerror ());
+	}
+      /* dlclose (self); */
+    }
+  else
+    puts (dlerror ());
+
+  return status;
+}
+_LT_EOF
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+  (eval $ac_link) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then
+    (./conftest; exit; ) >&5 2>/dev/null
+    lt_status=$?
+    case x$lt_status in
+      x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;;
+      x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;;
+      x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;;
+    esac
+  else :
+    # compilation failed
+    lt_cv_dlopen_self=no
+  fi
+fi
+rm -fr conftest*
+
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5
+printf "%s\n" "$lt_cv_dlopen_self" >&6; }
+
+    if test yes = "$lt_cv_dlopen_self"; then
+      wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5
+printf %s "checking whether a statically linked program can dlopen itself... " >&6; }
+if test ${lt_cv_dlopen_self_static+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  	  if test yes = "$cross_compiling"; then :
+  lt_cv_dlopen_self_static=cross
+else
+  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+  lt_status=$lt_dlunknown
+  cat > conftest.$ac_ext <<_LT_EOF
+#line $LINENO "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+#  define LT_DLGLOBAL		RTLD_GLOBAL
+#else
+#  ifdef DL_GLOBAL
+#    define LT_DLGLOBAL		DL_GLOBAL
+#  else
+#    define LT_DLGLOBAL		0
+#  endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+   find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+#  ifdef RTLD_LAZY
+#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
+#  else
+#    ifdef DL_LAZY
+#      define LT_DLLAZY_OR_NOW		DL_LAZY
+#    else
+#      ifdef RTLD_NOW
+#        define LT_DLLAZY_OR_NOW	RTLD_NOW
+#      else
+#        ifdef DL_NOW
+#          define LT_DLLAZY_OR_NOW	DL_NOW
+#        else
+#          define LT_DLLAZY_OR_NOW	0
+#        endif
+#      endif
+#    endif
+#  endif
+#endif
+
+/* When -fvisibility=hidden is used, assume the code has been annotated
+   correspondingly for the symbols needed.  */
+#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
+int main ()
+{
+  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+  int status = $lt_dlunknown;
+
+  if (self)
+    {
+      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
+      else
+        {
+	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
+          else puts (dlerror ());
+	}
+      /* dlclose (self); */
+    }
+  else
+    puts (dlerror ());
+
+  return status;
+}
+_LT_EOF
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+  (eval $ac_link) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then
+    (./conftest; exit; ) >&5 2>/dev/null
+    lt_status=$?
+    case x$lt_status in
+      x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;;
+      x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;;
+      x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;;
+    esac
+  else :
+    # compilation failed
+    lt_cv_dlopen_self_static=no
+  fi
+fi
+rm -fr conftest*
+
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5
+printf "%s\n" "$lt_cv_dlopen_self_static" >&6; }
+    fi
+
+    CPPFLAGS=$save_CPPFLAGS
+    LDFLAGS=$save_LDFLAGS
+    LIBS=$save_LIBS
+    ;;
+  esac
+
+  case $lt_cv_dlopen_self in
+  yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+  *) enable_dlopen_self=unknown ;;
+  esac
+
+  case $lt_cv_dlopen_self_static in
+  yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+  *) enable_dlopen_self_static=unknown ;;
+  esac
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+striplib=
+old_striplib=
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5
+printf %s "checking whether stripping libraries is possible... " >&6; }
+if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+  test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+  test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+printf "%s\n" "yes" >&6; }
+else
+# FIXME - insert some real tests, host_os isn't really good enough
+  case $host_os in
+  darwin*)
+    if test -n "$STRIP"; then
+      striplib="$STRIP -x"
+      old_striplib="$STRIP -S"
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+printf "%s\n" "yes" >&6; }
+    else
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+    fi
+    ;;
+  *)
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+    ;;
+  esac
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+  # Report what library types will actually be built
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5
+printf %s "checking if libtool supports shared libraries... " >&6; }
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5
+printf "%s\n" "$can_build_shared" >&6; }
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5
+printf %s "checking whether to build shared libraries... " >&6; }
+  test no = "$can_build_shared" && enable_shared=no
+
+  # On AIX, shared libraries and static libraries use the same namespace, and
+  # are all built from PIC.
+  case $host_os in
+  aix3*)
+    test yes = "$enable_shared" && enable_static=no
+    if test -n "$RANLIB"; then
+      archive_cmds="$archive_cmds~\$RANLIB \$lib"
+      postinstall_cmds='$RANLIB $lib'
+    fi
+    ;;
+
+  aix[4-9]*)
+    if test ia64 != "$host_cpu"; then
+      case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
+      yes,aix,yes) ;;			# shared object as lib.so file only
+      yes,svr4,*) ;;			# shared object as lib.so archive member only
+      yes,*) enable_static=no ;;	# shared object in lib.a archive as well
+      esac
+    fi
+    ;;
+  esac
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5
+printf "%s\n" "$enable_shared" >&6; }
+
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5
+printf %s "checking whether to build static libraries... " >&6; }
+  # Make sure either enable_shared or enable_static is yes.
+  test yes = "$enable_shared" || enable_static=yes
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5
+printf "%s\n" "$enable_static" >&6; }
+
+
+
+
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+CC=$lt_save_CC
+
+      if test -n "$CXX" && ( test no != "$CXX" &&
+    ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) ||
+    (test g++ != "$CXX"))); then
+  ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5
+printf %s "checking how to run the C++ preprocessor... " >&6; }
+if test -z "$CXXCPP"; then
+  if test ${ac_cv_prog_CXXCPP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+      # Double quotes because $CXX needs to be expanded
+    for CXXCPP in "$CXX -E" cpp /lib/cpp
+    do
+      ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
+  # not just through cpp. "Syntax error" is here to catch this case.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <limits.h>
+		     Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"
+then :
+
+else $as_nop
+  # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+  # OK, works on sane cases.  Now check whether nonexistent headers
+  # can be detected and how.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"
+then :
+  # Broken: success on invalid input.
+continue
+else $as_nop
+  # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok
+then :
+  break
+fi
+
+    done
+    ac_cv_prog_CXXCPP=$CXXCPP
+
+fi
+  CXXCPP=$ac_cv_prog_CXXCPP
+else
+  ac_cv_prog_CXXCPP=$CXXCPP
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5
+printf "%s\n" "$CXXCPP" >&6; }
+ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
+  # not just through cpp. "Syntax error" is here to catch this case.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <limits.h>
+		     Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"
+then :
+
+else $as_nop
+  # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+  # OK, works on sane cases.  Now check whether nonexistent headers
+  # can be detected and how.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"
+then :
+  # Broken: success on invalid input.
+continue
+else $as_nop
+  # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok
+then :
+
+else $as_nop
+  { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+else
+  _lt_caught_CXX_error=yes
+fi
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+archive_cmds_need_lc_CXX=no
+allow_undefined_flag_CXX=
+always_export_symbols_CXX=no
+archive_expsym_cmds_CXX=
+compiler_needs_object_CXX=no
+export_dynamic_flag_spec_CXX=
+hardcode_direct_CXX=no
+hardcode_direct_absolute_CXX=no
+hardcode_libdir_flag_spec_CXX=
+hardcode_libdir_separator_CXX=
+hardcode_minus_L_CXX=no
+hardcode_shlibpath_var_CXX=unsupported
+hardcode_automatic_CXX=no
+inherit_rpath_CXX=no
+module_cmds_CXX=
+module_expsym_cmds_CXX=
+link_all_deplibs_CXX=unknown
+old_archive_cmds_CXX=$old_archive_cmds
+reload_flag_CXX=$reload_flag
+reload_cmds_CXX=$reload_cmds
+no_undefined_flag_CXX=
+whole_archive_flag_spec_CXX=
+enable_shared_with_static_runtimes_CXX=no
+
+# Source file extension for C++ test sources.
+ac_ext=cpp
+
+# Object file extension for compiled C++ test sources.
+objext=o
+objext_CXX=$objext
+
+# No sense in running all these tests if we already determined that
+# the CXX compiler isn't working.  Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test yes != "$_lt_caught_CXX_error"; then
+  # Code to be used in simple compile tests
+  lt_simple_compile_test_code="int some_variable = 0;"
+
+  # Code to be used in simple link tests
+  lt_simple_link_test_code='int main(int, char *[]) { return(0); }'
+
+  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+
+  # save warnings/boilerplate of simple test code
+  ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+
+  ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+
+
+  # Allow CC to be a program name with arguments.
+  lt_save_CC=$CC
+  lt_save_CFLAGS=$CFLAGS
+  lt_save_LD=$LD
+  lt_save_GCC=$GCC
+  GCC=$GXX
+  lt_save_with_gnu_ld=$with_gnu_ld
+  lt_save_path_LD=$lt_cv_path_LD
+  if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
+    lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
+  else
+    $as_unset lt_cv_prog_gnu_ld
+  fi
+  if test -n "${lt_cv_path_LDCXX+set}"; then
+    lt_cv_path_LD=$lt_cv_path_LDCXX
+  else
+    $as_unset lt_cv_path_LD
+  fi
+  test -z "${LDCXX+set}" || LD=$LDCXX
+  CC=${CXX-"c++"}
+  CFLAGS=$CXXFLAGS
+  compiler=$CC
+  compiler_CXX=$CC
+  func_cc_basename $compiler
+cc_basename=$func_cc_basename_result
+
+
+  if test -n "$compiler"; then
+    # We don't want -fno-exception when compiling C++ code, so set the
+    # no_builtin_flag separately
+    if test yes = "$GXX"; then
+      lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin'
+    else
+      lt_prog_compiler_no_builtin_flag_CXX=
+    fi
+
+    if test yes = "$GXX"; then
+      # Set up default GNU C++ configuration
+
+
+
+# Check whether --with-gnu-ld was given.
+if test ${with_gnu_ld+y}
+then :
+  withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes
+else $as_nop
+  with_gnu_ld=no
+fi
+
+ac_prog=ld
+if test yes = "$GCC"; then
+  # Check if gcc -print-prog-name=ld gives a path.
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
+printf %s "checking for ld used by $CC... " >&6; }
+  case $host in
+  *-*-mingw*)
+    # gcc leaves a trailing carriage return, which upsets mingw
+    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+  *)
+    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+  esac
+  case $ac_prog in
+    # Accept absolute paths.
+    [\\/]* | ?:[\\/]*)
+      re_direlt='/[^/][^/]*/\.\./'
+      # Canonicalize the pathname of ld
+      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+      done
+      test -z "$LD" && LD=$ac_prog
+      ;;
+  "")
+    # If it fails, then pretend we aren't using GCC.
+    ac_prog=ld
+    ;;
+  *)
+    # If it is relative, then search for the first ld in PATH.
+    with_gnu_ld=unknown
+    ;;
+  esac
+elif test yes = "$with_gnu_ld"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
+printf %s "checking for GNU ld... " >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
+printf %s "checking for non-GNU ld... " >&6; }
+fi
+if test ${lt_cv_path_LD+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if test -z "$LD"; then
+  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+  for ac_dir in $PATH; do
+    IFS=$lt_save_ifs
+    test -z "$ac_dir" && ac_dir=.
+    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+      lt_cv_path_LD=$ac_dir/$ac_prog
+      # Check to see if the program is GNU ld.  I'd rather use --version,
+      # but apparently some variants of GNU ld only accept -v.
+      # Break only if it was the GNU/non-GNU ld that we prefer.
+      case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+      *GNU* | *'with BFD'*)
+	test no != "$with_gnu_ld" && break
+	;;
+      *)
+	test yes != "$with_gnu_ld" && break
+	;;
+      esac
+    fi
+  done
+  IFS=$lt_save_ifs
+else
+  lt_cv_path_LD=$LD # Let the user override the test with a path.
+fi
+fi
+
+LD=$lt_cv_path_LD
+if test -n "$LD"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
+printf "%s\n" "$LD" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
+printf %s "checking if the linker ($LD) is GNU ld... " >&6; }
+if test ${lt_cv_prog_gnu_ld+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  # I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+  lt_cv_prog_gnu_ld=yes
+  ;;
+*)
+  lt_cv_prog_gnu_ld=no
+  ;;
+esac
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5
+printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; }
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+
+
+
+
+
+
+      # Check if GNU C++ uses GNU ld as the underlying linker, since the
+      # archiving commands below assume that GNU ld is being used.
+      if test yes = "$with_gnu_ld"; then
+        archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+        archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+
+        hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
+        export_dynamic_flag_spec_CXX='$wl--export-dynamic'
+
+        # If archive_cmds runs LD, not CC, wlarc should be empty
+        # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
+        #     investigate it a little bit more. (MM)
+        wlarc='$wl'
+
+        # ancient GNU ld didn't support --whole-archive et. al.
+        if eval "`$CC -print-prog-name=ld` --help 2>&1" |
+	  $GREP 'no-whole-archive' > /dev/null; then
+          whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
+        else
+          whole_archive_flag_spec_CXX=
+        fi
+      else
+        with_gnu_ld=no
+        wlarc=
+
+        # A generic and very simple default shared library creation
+        # command for GNU C++ for the case where it uses the native
+        # linker, instead of GNU ld.  If possible, this setting should
+        # overridden to take advantage of the native linker features on
+        # the platform it is being used on.
+        archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+      fi
+
+      # Commands to make compiler produce verbose output that lists
+      # what "hidden" libraries, object files and flags are used when
+      # linking a shared library.
+      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+    else
+      GXX=no
+      with_gnu_ld=no
+      wlarc=
+    fi
+
+    # PORTME: fill in a description of your system's C++ link characteristics
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+    ld_shlibs_CXX=yes
+    case $host_os in
+      aix3*)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+        ;;
+      aix[4-9]*)
+        if test ia64 = "$host_cpu"; then
+          # On IA64, the linker does run time linking by default, so we don't
+          # have to do anything special.
+          aix_use_runtimelinking=no
+          exp_sym_flag='-Bexport'
+          no_entry_flag=
+        else
+          aix_use_runtimelinking=no
+
+          # Test if we are trying to use run time linking or normal
+          # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+          # have runtime linking enabled, and use it for executables.
+          # For shared libraries, we enable/disable runtime linking
+          # depending on the kind of the shared library created -
+          # when "with_aix_soname,aix_use_runtimelinking" is:
+          # "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
+          # "aix,yes"  lib.so          shared, rtl:yes, for executables
+          #            lib.a           static archive
+          # "both,no"  lib.so.V(shr.o) shared, rtl:yes
+          #            lib.a(lib.so.V) shared, rtl:no,  for executables
+          # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
+          #            lib.a(lib.so.V) shared, rtl:no
+          # "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
+          #            lib.a           static archive
+          case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+	    for ld_flag in $LDFLAGS; do
+	      case $ld_flag in
+	      *-brtl*)
+	        aix_use_runtimelinking=yes
+	        break
+	        ;;
+	      esac
+	    done
+	    if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
+	      # With aix-soname=svr4, we create the lib.so.V shared archives only,
+	      # so we don't have lib.a shared libs to link our executables.
+	      # We have to force runtime linking in this case.
+	      aix_use_runtimelinking=yes
+	      LDFLAGS="$LDFLAGS -Wl,-brtl"
+	    fi
+	    ;;
+          esac
+
+          exp_sym_flag='-bexport'
+          no_entry_flag='-bnoentry'
+        fi
+
+        # When large executables or shared objects are built, AIX ld can
+        # have problems creating the table of contents.  If linking a library
+        # or program results in "error TOC overflow" add -mminimal-toc to
+        # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+        # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+        archive_cmds_CXX=''
+        hardcode_direct_CXX=yes
+        hardcode_direct_absolute_CXX=yes
+        hardcode_libdir_separator_CXX=':'
+        link_all_deplibs_CXX=yes
+        file_list_spec_CXX='$wl-f,'
+        case $with_aix_soname,$aix_use_runtimelinking in
+        aix,*) ;;	# no import file
+        svr4,* | *,yes) # use import file
+          # The Import File defines what to hardcode.
+          hardcode_direct_CXX=no
+          hardcode_direct_absolute_CXX=no
+          ;;
+        esac
+
+        if test yes = "$GXX"; then
+          case $host_os in aix4.[012]|aix4.[012].*)
+          # We only want to do this on AIX 4.2 and lower, the check
+          # below for broken collect2 doesn't work under 4.3+
+	  collect2name=`$CC -print-prog-name=collect2`
+	  if test -f "$collect2name" &&
+	     strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+	  then
+	    # We have reworked collect2
+	    :
+	  else
+	    # We have old collect2
+	    hardcode_direct_CXX=unsupported
+	    # It fails to find uninstalled libraries when the uninstalled
+	    # path is not listed in the libpath.  Setting hardcode_minus_L
+	    # to unsupported forces relinking
+	    hardcode_minus_L_CXX=yes
+	    hardcode_libdir_flag_spec_CXX='-L$libdir'
+	    hardcode_libdir_separator_CXX=
+	  fi
+          esac
+          shared_flag='-shared'
+	  if test yes = "$aix_use_runtimelinking"; then
+	    shared_flag=$shared_flag' $wl-G'
+	  fi
+	  # Need to ensure runtime linking is disabled for the traditional
+	  # shared library, or the linker may eventually find shared libraries
+	  # /with/ Import File - we do not want to mix them.
+	  shared_flag_aix='-shared'
+	  shared_flag_svr4='-shared $wl-G'
+        else
+          # not using gcc
+          if test ia64 = "$host_cpu"; then
+	  # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+	  # chokes on -Wl,-G. The following line is correct:
+	  shared_flag='-G'
+          else
+	    if test yes = "$aix_use_runtimelinking"; then
+	      shared_flag='$wl-G'
+	    else
+	      shared_flag='$wl-bM:SRE'
+	    fi
+	    shared_flag_aix='$wl-bM:SRE'
+	    shared_flag_svr4='$wl-G'
+          fi
+        fi
+
+        export_dynamic_flag_spec_CXX='$wl-bexpall'
+        # It seems that -bexpall does not export symbols beginning with
+        # underscore (_), so it is better to generate a list of symbols to
+	# export.
+        always_export_symbols_CXX=yes
+	if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
+          # Warning - without using the other runtime loading flags (-brtl),
+          # -berok will link without error, but may produce a broken library.
+          # The "-G" linker flag allows undefined symbols.
+          no_undefined_flag_CXX='-bernotok'
+          # Determine the default libpath from the value encoded in an empty
+          # executable.
+          if test set = "${lt_cv_aix_libpath+set}"; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if test ${lt_cv_aix_libpath__CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"
+then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath__CXX"; then
+    lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath__CXX"; then
+    lt_cv_aix_libpath__CXX=/usr/lib:/lib
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath__CXX
+fi
+
+          hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath"
+
+          archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
+        else
+          if test ia64 = "$host_cpu"; then
+	    hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib'
+	    allow_undefined_flag_CXX="-z nodefs"
+	    archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
+          else
+	    # Determine the default libpath from the value encoded in an
+	    # empty executable.
+	    if test set = "${lt_cv_aix_libpath+set}"; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if test ${lt_cv_aix_libpath__CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"
+then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath__CXX"; then
+    lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath__CXX"; then
+    lt_cv_aix_libpath__CXX=/usr/lib:/lib
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath__CXX
+fi
+
+	    hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath"
+	    # Warning - without using the other run time loading flags,
+	    # -berok will link without error, but may produce a broken library.
+	    no_undefined_flag_CXX=' $wl-bernotok'
+	    allow_undefined_flag_CXX=' $wl-berok'
+	    if test yes = "$with_gnu_ld"; then
+	      # We only use this code for GNU lds that support --whole-archive.
+	      whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive'
+	    else
+	      # Exported symbols can be pulled into shared objects from archives
+	      whole_archive_flag_spec_CXX='$convenience'
+	    fi
+	    archive_cmds_need_lc_CXX=yes
+	    archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
+	    # -brtl affects multiple linker settings, -berok does not and is overridden later
+	    compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`'
+	    if test svr4 != "$with_aix_soname"; then
+	      # This is similar to how AIX traditionally builds its shared
+	      # libraries. Need -bnortl late, we may have -brtl in LDFLAGS.
+	      archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
+	    fi
+	    if test aix != "$with_aix_soname"; then
+	      archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
+	    else
+	      # used by -dlpreopen to get the symbols
+	      archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
+	    fi
+	    archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d'
+          fi
+        fi
+        ;;
+
+      beos*)
+	if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	  allow_undefined_flag_CXX=unsupported
+	  # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+	  # support --undefined.  This deserves some investigation.  FIXME
+	  archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	else
+	  ld_shlibs_CXX=no
+	fi
+	;;
+
+      chorus*)
+        case $cc_basename in
+          *)
+	  # FIXME: insert proper C++ library support
+	  ld_shlibs_CXX=no
+	  ;;
+        esac
+        ;;
+
+      cygwin* | mingw* | pw32* | cegcc*)
+	case $GXX,$cc_basename in
+	,cl* | no,cl*)
+	  # Native MSVC
+	  # hardcode_libdir_flag_spec is actually meaningless, as there is
+	  # no search path for DLLs.
+	  hardcode_libdir_flag_spec_CXX=' '
+	  allow_undefined_flag_CXX=unsupported
+	  always_export_symbols_CXX=yes
+	  file_list_spec_CXX='@'
+	  # Tell ltmain to make .lib files, not .a files.
+	  libext=lib
+	  # Tell ltmain to make .dll files, not .so files.
+	  shrext_cmds=.dll
+	  # FIXME: Setting linknames here is a bad hack.
+	  archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
+	  archive_expsym_cmds_CXX='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
+              cp "$export_symbols" "$output_objdir/$soname.def";
+              echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
+            else
+              $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
+            fi~
+            $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+            linknames='
+	  # The linker will not automatically build a static lib if we build a DLL.
+	  # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true'
+	  enable_shared_with_static_runtimes_CXX=yes
+	  # Don't use ranlib
+	  old_postinstall_cmds_CXX='chmod 644 $oldlib'
+	  postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~
+            lt_tool_outputfile="@TOOL_OUTPUT@"~
+            case $lt_outputfile in
+              *.exe|*.EXE) ;;
+              *)
+                lt_outputfile=$lt_outputfile.exe
+                lt_tool_outputfile=$lt_tool_outputfile.exe
+                ;;
+            esac~
+            func_to_tool_file "$lt_outputfile"~
+            if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
+              $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+              $RM "$lt_outputfile.manifest";
+            fi'
+	  ;;
+	*)
+	  # g++
+	  # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless,
+	  # as there is no search path for DLLs.
+	  hardcode_libdir_flag_spec_CXX='-L$libdir'
+	  export_dynamic_flag_spec_CXX='$wl--export-all-symbols'
+	  allow_undefined_flag_CXX=unsupported
+	  always_export_symbols_CXX=no
+	  enable_shared_with_static_runtimes_CXX=yes
+
+	  if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+	    archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	    # If the export-symbols file already is a .def file, use it as
+	    # is; otherwise, prepend EXPORTS...
+	    archive_expsym_cmds_CXX='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
+              cp $export_symbols $output_objdir/$soname.def;
+            else
+              echo EXPORTS > $output_objdir/$soname.def;
+              cat $export_symbols >> $output_objdir/$soname.def;
+            fi~
+            $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	  else
+	    ld_shlibs_CXX=no
+	  fi
+	  ;;
+	esac
+	;;
+      darwin* | rhapsody*)
+
+
+  archive_cmds_need_lc_CXX=no
+  hardcode_direct_CXX=no
+  hardcode_automatic_CXX=yes
+  hardcode_shlibpath_var_CXX=unsupported
+  if test yes = "$lt_cv_ld_force_load"; then
+    whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+
+  else
+    whole_archive_flag_spec_CXX=''
+  fi
+  link_all_deplibs_CXX=yes
+  allow_undefined_flag_CXX=$_lt_dar_allow_undefined
+  case $cc_basename in
+     ifort*|nagfor*) _lt_dar_can_shared=yes ;;
+     *) _lt_dar_can_shared=$GCC ;;
+  esac
+  if test yes = "$_lt_dar_can_shared"; then
+    output_verbose_link_cmd=func_echo_all
+    archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil"
+    module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil"
+    archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil"
+    module_expsym_cmds_CXX="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil"
+       if test yes != "$lt_cv_apple_cc_single_mod"; then
+      archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil"
+      archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil"
+    fi
+
+  else
+  ld_shlibs_CXX=no
+  fi
+
+	;;
+
+      os2*)
+	hardcode_libdir_flag_spec_CXX='-L$libdir'
+	hardcode_minus_L_CXX=yes
+	allow_undefined_flag_CXX=unsupported
+	shrext_cmds=.dll
+	archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	  $ECHO EXPORTS >> $output_objdir/$libname.def~
+	  emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	  emximp -o $lib $output_objdir/$libname.def'
+	archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+	  $ECHO EXPORTS >> $output_objdir/$libname.def~
+	  prefix_cmds="$SED"~
+	  if test EXPORTS = "`$SED 1q $export_symbols`"; then
+	    prefix_cmds="$prefix_cmds -e 1d";
+	  fi~
+	  prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+	  cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+	  emximp -o $lib $output_objdir/$libname.def'
+	old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+	enable_shared_with_static_runtimes_CXX=yes
+	;;
+
+      dgux*)
+        case $cc_basename in
+          ec++*)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          ghcx*)
+	    # Green Hills C++ Compiler
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+        esac
+        ;;
+
+      freebsd2.*)
+        # C++ shared libraries reported to be fairly broken before
+	# switch to ELF
+        ld_shlibs_CXX=no
+        ;;
+
+      freebsd-elf*)
+        archive_cmds_need_lc_CXX=no
+        ;;
+
+      freebsd* | dragonfly*)
+        # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
+        # conventions
+        ld_shlibs_CXX=yes
+        ;;
+
+      haiku*)
+        archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+        link_all_deplibs_CXX=yes
+        ;;
+
+      hpux9*)
+        hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir'
+        hardcode_libdir_separator_CXX=:
+        export_dynamic_flag_spec_CXX='$wl-E'
+        hardcode_direct_CXX=yes
+        hardcode_minus_L_CXX=yes # Not in the search PATH,
+				             # but as the default
+				             # location of the library.
+
+        case $cc_basename in
+          CC*)
+            # FIXME: insert proper C++ library support
+            ld_shlibs_CXX=no
+            ;;
+          aCC*)
+            archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+            # Commands to make compiler produce verbose output that lists
+            # what "hidden" libraries, object files and flags are used when
+            # linking a shared library.
+            #
+            # There doesn't appear to be a way to prevent this compiler from
+            # explicitly linking system object files so we need to strip them
+            # from the output so that they don't get included in the library
+            # dependencies.
+            output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+            ;;
+          *)
+            if test yes = "$GXX"; then
+              archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+            else
+              # FIXME: insert proper C++ library support
+              ld_shlibs_CXX=no
+            fi
+            ;;
+        esac
+        ;;
+
+      hpux10*|hpux11*)
+        if test no = "$with_gnu_ld"; then
+	  hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir'
+	  hardcode_libdir_separator_CXX=:
+
+          case $host_cpu in
+            hppa*64*|ia64*)
+              ;;
+            *)
+	      export_dynamic_flag_spec_CXX='$wl-E'
+              ;;
+          esac
+        fi
+        case $host_cpu in
+          hppa*64*|ia64*)
+            hardcode_direct_CXX=no
+            hardcode_shlibpath_var_CXX=no
+            ;;
+          *)
+            hardcode_direct_CXX=yes
+            hardcode_direct_absolute_CXX=yes
+            hardcode_minus_L_CXX=yes # Not in the search PATH,
+					         # but as the default
+					         # location of the library.
+            ;;
+        esac
+
+        case $cc_basename in
+          CC*)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          aCC*)
+	    case $host_cpu in
+	      hppa*64*)
+	        archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	      ia64*)
+	        archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	      *)
+	        archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	    esac
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+	    ;;
+          *)
+	    if test yes = "$GXX"; then
+	      if test no = "$with_gnu_ld"; then
+	        case $host_cpu in
+	          hppa*64*)
+	            archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	          ia64*)
+	            archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	          *)
+	            archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	        esac
+	      fi
+	    else
+	      # FIXME: insert proper C++ library support
+	      ld_shlibs_CXX=no
+	    fi
+	    ;;
+        esac
+        ;;
+
+      interix[3-9]*)
+	hardcode_direct_CXX=no
+	hardcode_shlibpath_var_CXX=no
+	hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
+	export_dynamic_flag_spec_CXX='$wl-E'
+	# Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+	# Instead, shared libraries are loaded at an image base (0x10000000 by
+	# default) and relocated if they conflict, which is a slow very memory
+	# consuming and fragmenting process.  To avoid this, we pick a random,
+	# 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+	# time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+	archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+	archive_expsym_cmds_CXX='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+	;;
+      irix5* | irix6*)
+        case $cc_basename in
+          CC*)
+	    # SGI C++
+	    archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -ar", where "CC" is the IRIX C++ compiler.  This is
+	    # necessary to make sure instantiated templates are included
+	    # in the archive.
+	    old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs'
+	    ;;
+          *)
+	    if test yes = "$GXX"; then
+	      if test no = "$with_gnu_ld"; then
+	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+	      else
+	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib'
+	      fi
+	    fi
+	    link_all_deplibs_CXX=yes
+	    ;;
+        esac
+        hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
+        hardcode_libdir_separator_CXX=:
+        inherit_rpath_CXX=yes
+        ;;
+
+      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+        case $cc_basename in
+          KCC*)
+	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+	    # KCC will only create a shared library if the output file
+	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+	    # to its proper name (with version) after linking.
+	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+	    archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib'
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+
+	    hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
+	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -Bstatic", where "CC" is the KAI C++ compiler.
+	    old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs'
+	    ;;
+	  icpc* | ecpc* )
+	    # Intel C++
+	    with_gnu_ld=yes
+	    # version 8.0 and above of icpc choke on multiply defined symbols
+	    # if we add $predep_objects and $postdep_objects, however 7.1 and
+	    # earlier do not add the objects themselves.
+	    case `$CC -V 2>&1` in
+	      *"Version 7."*)
+	        archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+		archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+		;;
+	      *)  # Version 8.0 or newer
+	        tmp_idyn=
+	        case $host_cpu in
+		  ia64*) tmp_idyn=' -i_dynamic';;
+		esac
+	        archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+		archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+		;;
+	    esac
+	    archive_cmds_need_lc_CXX=no
+	    hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
+	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
+	    whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive'
+	    ;;
+          pgCC* | pgcpp*)
+            # Portland Group C++ compiler
+	    case `$CC -V` in
+	    *pgCC\ [1-5].* | *pgcpp\ [1-5].*)
+	      prelink_cmds_CXX='tpldir=Template.dir~
+               rm -rf $tpldir~
+               $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
+               compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
+	      old_archive_cmds_CXX='tpldir=Template.dir~
+                rm -rf $tpldir~
+                $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
+                $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
+                $RANLIB $oldlib'
+	      archive_cmds_CXX='tpldir=Template.dir~
+                rm -rf $tpldir~
+                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+	      archive_expsym_cmds_CXX='tpldir=Template.dir~
+                rm -rf $tpldir~
+                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+	      ;;
+	    *) # Version 6 and above use weak symbols
+	      archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+	      archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+	      ;;
+	    esac
+
+	    hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir'
+	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
+	    whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+            ;;
+	  cxx*)
+	    # Compaq C++
+	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+	    archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname  -o $lib $wl-retain-symbols-file $wl$export_symbols'
+
+	    runpath_var=LD_RUN_PATH
+	    hardcode_libdir_flag_spec_CXX='-rpath $libdir'
+	    hardcode_libdir_separator_CXX=:
+
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
+	    ;;
+	  xl* | mpixl* | bgxl*)
+	    # IBM XL 8.0 on PPC, with GNU ld
+	    hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
+	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
+	    archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+	    if test yes = "$supports_anon_versioning"; then
+	      archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~
+                cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+                echo "local: *; };" >> $output_objdir/$libname.ver~
+                $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
+	    fi
+	    ;;
+	  *)
+	    case `$CC -V 2>&1 | sed 5q` in
+	    *Sun\ C*)
+	      # Sun C++ 5.9
+	      no_undefined_flag_CXX=' -zdefs'
+	      archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	      archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols'
+	      hardcode_libdir_flag_spec_CXX='-R$libdir'
+	      whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+	      compiler_needs_object_CXX=yes
+
+	      # Not sure whether something based on
+	      # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
+	      # would be better.
+	      output_verbose_link_cmd='func_echo_all'
+
+	      # Archives containing C++ object files must be created using
+	      # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+	      # necessary to make sure instantiated templates are included
+	      # in the archive.
+	      old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs'
+	      ;;
+	    esac
+	    ;;
+	esac
+	;;
+
+      lynxos*)
+        # FIXME: insert proper C++ library support
+	ld_shlibs_CXX=no
+	;;
+
+      m88k*)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+	;;
+
+      mvs*)
+        case $cc_basename in
+          cxx*)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+	  *)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+	esac
+	;;
+
+      netbsd*)
+        if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	  archive_cmds_CXX='$LD -Bshareable  -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
+	  wlarc=
+	  hardcode_libdir_flag_spec_CXX='-R$libdir'
+	  hardcode_direct_CXX=yes
+	  hardcode_shlibpath_var_CXX=no
+	fi
+	# Workaround some broken pre-1.5 toolchains
+	output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
+	;;
+
+      *nto* | *qnx*)
+        ld_shlibs_CXX=yes
+	;;
+
+      openbsd* | bitrig*)
+	if test -f /usr/libexec/ld.so; then
+	  hardcode_direct_CXX=yes
+	  hardcode_shlibpath_var_CXX=no
+	  hardcode_direct_absolute_CXX=yes
+	  archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+	  hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
+	  if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then
+	    archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib'
+	    export_dynamic_flag_spec_CXX='$wl-E'
+	    whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
+	  fi
+	  output_verbose_link_cmd=func_echo_all
+	else
+	  ld_shlibs_CXX=no
+	fi
+	;;
+
+      osf3* | osf4* | osf5*)
+        case $cc_basename in
+          KCC*)
+	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+	    # KCC will only create a shared library if the output file
+	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+	    # to its proper name (with version) after linking.
+	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+
+	    hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
+	    hardcode_libdir_separator_CXX=:
+
+	    # Archives containing C++ object files must be created using
+	    # the KAI C++ compiler.
+	    case $host in
+	      osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;;
+	      *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;;
+	    esac
+	    ;;
+          RCC*)
+	    # Rational C++ 2.4.1
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          cxx*)
+	    case $host in
+	      osf3*)
+	        allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*'
+	        archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+	        hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
+		;;
+	      *)
+	        allow_undefined_flag_CXX=' -expect_unresolved \*'
+	        archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+	        archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
+                  echo "-hidden">> $lib.exp~
+                  $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp  `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~
+                  $RM $lib.exp'
+	        hardcode_libdir_flag_spec_CXX='-rpath $libdir'
+		;;
+	    esac
+
+	    hardcode_libdir_separator_CXX=:
+
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+	    ;;
+	  *)
+	    if test yes,no = "$GXX,$with_gnu_ld"; then
+	      allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*'
+	      case $host in
+	        osf3*)
+	          archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+		  ;;
+	        *)
+	          archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+		  ;;
+	      esac
+
+	      hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
+	      hardcode_libdir_separator_CXX=:
+
+	      # Commands to make compiler produce verbose output that lists
+	      # what "hidden" libraries, object files and flags are used when
+	      # linking a shared library.
+	      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+	    else
+	      # FIXME: insert proper C++ library support
+	      ld_shlibs_CXX=no
+	    fi
+	    ;;
+        esac
+        ;;
+
+      psos*)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+        ;;
+
+      sunos4*)
+        case $cc_basename in
+          CC*)
+	    # Sun C++ 4.x
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          lcc*)
+	    # Lucid
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+        esac
+        ;;
+
+      solaris*)
+        case $cc_basename in
+          CC* | sunCC*)
+	    # Sun C++ 4.2, 5.x and Centerline C++
+            archive_cmds_need_lc_CXX=yes
+	    no_undefined_flag_CXX=' -zdefs'
+	    archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	    archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+              $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	    hardcode_libdir_flag_spec_CXX='-R$libdir'
+	    hardcode_shlibpath_var_CXX=no
+	    case $host_os in
+	      solaris2.[0-5] | solaris2.[0-5].*) ;;
+	      *)
+		# The compiler driver will combine and reorder linker options,
+		# but understands '-z linker_flag'.
+	        # Supported since Solaris 2.6 (maybe 2.5.1?)
+		whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract'
+	        ;;
+	    esac
+	    link_all_deplibs_CXX=yes
+
+	    output_verbose_link_cmd='func_echo_all'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+	    # necessary to make sure instantiated templates are included
+	    # in the archive.
+	    old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs'
+	    ;;
+          gcx*)
+	    # Green Hills C++ Compiler
+	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
+
+	    # The C++ compiler must be used to create the archive.
+	    old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
+	    ;;
+          *)
+	    # GNU C++ compiler with Solaris linker
+	    if test yes,no = "$GXX,$with_gnu_ld"; then
+	      no_undefined_flag_CXX=' $wl-z ${wl}defs'
+	      if $CC --version | $GREP -v '^2\.7' > /dev/null; then
+	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
+	        archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+                  $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	        # Commands to make compiler produce verbose output that lists
+	        # what "hidden" libraries, object files and flags are used when
+	        # linking a shared library.
+	        output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+	      else
+	        # g++ 2.7 appears to require '-G' NOT '-shared' on this
+	        # platform.
+	        archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
+	        archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+                  $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	        # Commands to make compiler produce verbose output that lists
+	        # what "hidden" libraries, object files and flags are used when
+	        # linking a shared library.
+	        output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+	      fi
+
+	      hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir'
+	      case $host_os in
+		solaris2.[0-5] | solaris2.[0-5].*) ;;
+		*)
+		  whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
+		  ;;
+	      esac
+	    fi
+	    ;;
+        esac
+        ;;
+
+    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+      no_undefined_flag_CXX='$wl-z,text'
+      archive_cmds_need_lc_CXX=no
+      hardcode_shlibpath_var_CXX=no
+      runpath_var='LD_RUN_PATH'
+
+      case $cc_basename in
+        CC*)
+	  archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	  archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+      esac
+      ;;
+
+      sysv5* | sco3.2v5* | sco5v6*)
+	# Note: We CANNOT use -z defs as we might desire, because we do not
+	# link with -lc, and that would cause any symbols used from libc to
+	# always be unresolved, which means just about no library would
+	# ever link correctly.  If we're not using GNU ld we use -z text
+	# though, which does catch some bad symbols but isn't as heavy-handed
+	# as -z defs.
+	no_undefined_flag_CXX='$wl-z,text'
+	allow_undefined_flag_CXX='$wl-z,nodefs'
+	archive_cmds_need_lc_CXX=no
+	hardcode_shlibpath_var_CXX=no
+	hardcode_libdir_flag_spec_CXX='$wl-R,$libdir'
+	hardcode_libdir_separator_CXX=':'
+	link_all_deplibs_CXX=yes
+	export_dynamic_flag_spec_CXX='$wl-Bexport'
+	runpath_var='LD_RUN_PATH'
+
+	case $cc_basename in
+          CC*)
+	    archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~
+              '"$old_archive_cmds_CXX"
+	    reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~
+              '"$reload_cmds_CXX"
+	    ;;
+	  *)
+	    archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    ;;
+	esac
+      ;;
+
+      tandem*)
+        case $cc_basename in
+          NCC*)
+	    # NonStop-UX NCC 3.20
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+        esac
+        ;;
+
+      vxworks*)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+        ;;
+
+      *)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+        ;;
+    esac
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
+printf "%s\n" "$ld_shlibs_CXX" >&6; }
+    test no = "$ld_shlibs_CXX" && can_build_shared=no
+
+    GCC_CXX=$GXX
+    LD_CXX=$LD
+
+    ## CAVEAT EMPTOR:
+    ## There is no encapsulation within the following macros, do not change
+    ## the running order or otherwise move them around unless you know exactly
+    ## what you are doing...
+    # Dependencies to place before and after the object being linked:
+predep_objects_CXX=
+postdep_objects_CXX=
+predeps_CXX=
+postdeps_CXX=
+compiler_lib_search_path_CXX=
+
+cat > conftest.$ac_ext <<_LT_EOF
+class Foo
+{
+public:
+  Foo (void) { a = 0; }
+private:
+  int a;
+};
+_LT_EOF
+
+
+_lt_libdeps_save_CFLAGS=$CFLAGS
+case "$CC $CFLAGS " in #(
+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+esac
+
+if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+  # Parse the compiler output and extract the necessary
+  # objects, libraries and library flags.
+
+  # Sentinel used to keep track of whether or not we are before
+  # the conftest object file.
+  pre_test_object_deps_done=no
+
+  for p in `eval "$output_verbose_link_cmd"`; do
+    case $prev$p in
+
+    -L* | -R* | -l*)
+       # Some compilers place space between "-{L,R}" and the path.
+       # Remove the space.
+       if test x-L = "$p" ||
+          test x-R = "$p"; then
+	 prev=$p
+	 continue
+       fi
+
+       # Expand the sysroot to ease extracting the directories later.
+       if test -z "$prev"; then
+         case $p in
+         -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+         -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+         -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+         esac
+       fi
+       case $p in
+       =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+       esac
+       if test no = "$pre_test_object_deps_done"; then
+	 case $prev in
+	 -L | -R)
+	   # Internal compiler library paths should come after those
+	   # provided the user.  The postdeps already come after the
+	   # user supplied libs so there is no need to process them.
+	   if test -z "$compiler_lib_search_path_CXX"; then
+	     compiler_lib_search_path_CXX=$prev$p
+	   else
+	     compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p"
+	   fi
+	   ;;
+	 # The "-l" case would never come before the object being
+	 # linked, so don't bother handling this case.
+	 esac
+       else
+	 if test -z "$postdeps_CXX"; then
+	   postdeps_CXX=$prev$p
+	 else
+	   postdeps_CXX="${postdeps_CXX} $prev$p"
+	 fi
+       fi
+       prev=
+       ;;
+
+    *.lto.$objext) ;; # Ignore GCC LTO objects
+    *.$objext)
+       # This assumes that the test object file only shows up
+       # once in the compiler output.
+       if test "$p" = "conftest.$objext"; then
+	 pre_test_object_deps_done=yes
+	 continue
+       fi
+
+       if test no = "$pre_test_object_deps_done"; then
+	 if test -z "$predep_objects_CXX"; then
+	   predep_objects_CXX=$p
+	 else
+	   predep_objects_CXX="$predep_objects_CXX $p"
+	 fi
+       else
+	 if test -z "$postdep_objects_CXX"; then
+	   postdep_objects_CXX=$p
+	 else
+	   postdep_objects_CXX="$postdep_objects_CXX $p"
+	 fi
+       fi
+       ;;
+
+    *) ;; # Ignore the rest.
+
+    esac
+  done
+
+  # Clean up.
+  rm -f a.out a.exe
+else
+  echo "libtool.m4: error: problem compiling CXX test program"
+fi
+
+$RM -f confest.$objext
+CFLAGS=$_lt_libdeps_save_CFLAGS
+
+# PORTME: override above test on systems where it is broken
+case $host_os in
+interix[3-9]*)
+  # Interix 3.5 installs completely hosed .la files for C++, so rather than
+  # hack all around it, let's just trust "g++" to DTRT.
+  predep_objects_CXX=
+  postdep_objects_CXX=
+  postdeps_CXX=
+  ;;
+esac
+
+
+case " $postdeps_CXX " in
+*" -lc "*) archive_cmds_need_lc_CXX=no ;;
+esac
+ compiler_lib_search_dirs_CXX=
+if test -n "${compiler_lib_search_path_CXX}"; then
+ compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'`
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    lt_prog_compiler_wl_CXX=
+lt_prog_compiler_pic_CXX=
+lt_prog_compiler_static_CXX=
+
+
+  # C++ specific cases for pic, static, wl, etc.
+  if test yes = "$GXX"; then
+    lt_prog_compiler_wl_CXX='-Wl,'
+    lt_prog_compiler_static_CXX='-static'
+
+    case $host_os in
+    aix*)
+      # All AIX code is PIC.
+      if test ia64 = "$host_cpu"; then
+	# AIX 5 now supports IA64 processor
+	lt_prog_compiler_static_CXX='-Bstatic'
+      fi
+      lt_prog_compiler_pic_CXX='-fPIC'
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            lt_prog_compiler_pic_CXX='-fPIC'
+        ;;
+      m68k)
+            # FIXME: we need at least 68020 code to build shared libraries, but
+            # adding the '-m68020' flag to GCC prevents building anything better,
+            # like '-m68040'.
+            lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4'
+        ;;
+      esac
+      ;;
+
+    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+      # PIC is the default for these OSes.
+      ;;
+    mingw* | cygwin* | os2* | pw32* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+      # (--disable-auto-import) libraries
+      lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
+      case $host_os in
+      os2*)
+	lt_prog_compiler_static_CXX='$wl-static'
+	;;
+      esac
+      ;;
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      lt_prog_compiler_pic_CXX='-fno-common'
+      ;;
+    *djgpp*)
+      # DJGPP does not support shared libraries at all
+      lt_prog_compiler_pic_CXX=
+      ;;
+    haiku*)
+      # PIC is the default for Haiku.
+      # The "-static" flag exists, but is broken.
+      lt_prog_compiler_static_CXX=
+      ;;
+    interix[3-9]*)
+      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+      # Instead, we relocate shared libraries at runtime.
+      ;;
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	lt_prog_compiler_pic_CXX=-Kconform_pic
+      fi
+      ;;
+    hpux*)
+      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+      # sets the default TLS model and affects inlining.
+      case $host_cpu in
+      hppa*64*)
+	;;
+      *)
+	lt_prog_compiler_pic_CXX='-fPIC'
+	;;
+      esac
+      ;;
+    *qnx* | *nto*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      lt_prog_compiler_pic_CXX='-fPIC -shared'
+      ;;
+    *)
+      lt_prog_compiler_pic_CXX='-fPIC'
+      ;;
+    esac
+  else
+    case $host_os in
+      aix[4-9]*)
+	# All AIX code is PIC.
+	if test ia64 = "$host_cpu"; then
+	  # AIX 5 now supports IA64 processor
+	  lt_prog_compiler_static_CXX='-Bstatic'
+	else
+	  lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp'
+	fi
+	;;
+      chorus*)
+	case $cc_basename in
+	cxch68*)
+	  # Green Hills C++ Compiler
+	  # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
+	  ;;
+	esac
+	;;
+      mingw* | cygwin* | os2* | pw32* | cegcc*)
+	# This hack is so that the source file can tell whether it is being
+	# built for inclusion in a dll (and should export symbols for example).
+	lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
+	;;
+      dgux*)
+	case $cc_basename in
+	  ec++*)
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    ;;
+	  ghcx*)
+	    # Green Hills C++ Compiler
+	    lt_prog_compiler_pic_CXX='-pic'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      freebsd* | dragonfly*)
+	# FreeBSD uses GNU C++
+	;;
+      hpux9* | hpux10* | hpux11*)
+	case $cc_basename in
+	  CC*)
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_static_CXX='$wl-a ${wl}archive'
+	    if test ia64 != "$host_cpu"; then
+	      lt_prog_compiler_pic_CXX='+Z'
+	    fi
+	    ;;
+	  aCC*)
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_static_CXX='$wl-a ${wl}archive'
+	    case $host_cpu in
+	    hppa*64*|ia64*)
+	      # +Z the default
+	      ;;
+	    *)
+	      lt_prog_compiler_pic_CXX='+Z'
+	      ;;
+	    esac
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      interix*)
+	# This is c89, which is MS Visual C++ (no shared libs)
+	# Anyone wants to do a port?
+	;;
+      irix5* | irix6* | nonstopux*)
+	case $cc_basename in
+	  CC*)
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_static_CXX='-non_shared'
+	    # CC pic flag -KPIC is the default.
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+	case $cc_basename in
+	  KCC*)
+	    # KAI C++ Compiler
+	    lt_prog_compiler_wl_CXX='--backend -Wl,'
+	    lt_prog_compiler_pic_CXX='-fPIC'
+	    ;;
+	  ecpc* )
+	    # old Intel C++ for x86_64, which still supported -KPIC.
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    lt_prog_compiler_static_CXX='-static'
+	    ;;
+	  icpc* )
+	    # Intel C++, used to be incompatible with GCC.
+	    # ICC 10 doesn't accept -KPIC any more.
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-fPIC'
+	    lt_prog_compiler_static_CXX='-static'
+	    ;;
+	  pgCC* | pgcpp*)
+	    # Portland Group C++ compiler
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-fpic'
+	    lt_prog_compiler_static_CXX='-Bstatic'
+	    ;;
+	  cxx*)
+	    # Compaq C++
+	    # Make sure the PIC flag is empty.  It appears that all Alpha
+	    # Linux and Compaq Tru64 Unix objects are PIC.
+	    lt_prog_compiler_pic_CXX=
+	    lt_prog_compiler_static_CXX='-non_shared'
+	    ;;
+	  xlc* | xlC* | bgxl[cC]* | mpixl[cC]*)
+	    # IBM XL 8.0, 9.0 on PPC and BlueGene
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-qpic'
+	    lt_prog_compiler_static_CXX='-qstaticlink'
+	    ;;
+	  *)
+	    case `$CC -V 2>&1 | sed 5q` in
+	    *Sun\ C*)
+	      # Sun C++ 5.9
+	      lt_prog_compiler_pic_CXX='-KPIC'
+	      lt_prog_compiler_static_CXX='-Bstatic'
+	      lt_prog_compiler_wl_CXX='-Qoption ld '
+	      ;;
+	    esac
+	    ;;
+	esac
+	;;
+      lynxos*)
+	;;
+      m88k*)
+	;;
+      mvs*)
+	case $cc_basename in
+	  cxx*)
+	    lt_prog_compiler_pic_CXX='-W c,exportall'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      netbsd*)
+	;;
+      *qnx* | *nto*)
+        # QNX uses GNU C++, but need to define -shared option too, otherwise
+        # it will coredump.
+        lt_prog_compiler_pic_CXX='-fPIC -shared'
+        ;;
+      osf3* | osf4* | osf5*)
+	case $cc_basename in
+	  KCC*)
+	    lt_prog_compiler_wl_CXX='--backend -Wl,'
+	    ;;
+	  RCC*)
+	    # Rational C++ 2.4.1
+	    lt_prog_compiler_pic_CXX='-pic'
+	    ;;
+	  cxx*)
+	    # Digital/Compaq C++
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    # Make sure the PIC flag is empty.  It appears that all Alpha
+	    # Linux and Compaq Tru64 Unix objects are PIC.
+	    lt_prog_compiler_pic_CXX=
+	    lt_prog_compiler_static_CXX='-non_shared'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      psos*)
+	;;
+      solaris*)
+	case $cc_basename in
+	  CC* | sunCC*)
+	    # Sun C++ 4.2, 5.x and Centerline C++
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    lt_prog_compiler_static_CXX='-Bstatic'
+	    lt_prog_compiler_wl_CXX='-Qoption ld '
+	    ;;
+	  gcx*)
+	    # Green Hills C++ Compiler
+	    lt_prog_compiler_pic_CXX='-PIC'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      sunos4*)
+	case $cc_basename in
+	  CC*)
+	    # Sun C++ 4.x
+	    lt_prog_compiler_pic_CXX='-pic'
+	    lt_prog_compiler_static_CXX='-Bstatic'
+	    ;;
+	  lcc*)
+	    # Lucid
+	    lt_prog_compiler_pic_CXX='-pic'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+	case $cc_basename in
+	  CC*)
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    lt_prog_compiler_static_CXX='-Bstatic'
+	    ;;
+	esac
+	;;
+      tandem*)
+	case $cc_basename in
+	  NCC*)
+	    # NonStop-UX NCC 3.20
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      vxworks*)
+	;;
+      *)
+	lt_prog_compiler_can_build_shared_CXX=no
+	;;
+    esac
+  fi
+
+case $host_os in
+  # For platforms that do not support PIC, -DPIC is meaningless:
+  *djgpp*)
+    lt_prog_compiler_pic_CXX=
+    ;;
+  *)
+    lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC"
+    ;;
+esac
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+printf %s "checking for $compiler option to produce PIC... " >&6; }
+if test ${lt_cv_prog_compiler_pic_CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5
+printf "%s\n" "$lt_cv_prog_compiler_pic_CXX" >&6; }
+lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$lt_prog_compiler_pic_CXX"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5
+printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; }
+if test ${lt_cv_prog_compiler_pic_works_CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_pic_works_CXX=no
+   ac_outfile=conftest.$ac_objext
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+   lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC"  ## exclude from sc_useless_quotes_in_assignment
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   # The option is referenced via a variable to avoid confusing sed.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>conftest.err)
+   ac_status=$?
+   cat conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s "$ac_outfile"; then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings other than the usual output.
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_pic_works_CXX=yes
+     fi
+   fi
+   $RM conftest*
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5
+printf "%s\n" "$lt_cv_prog_compiler_pic_works_CXX" >&6; }
+
+if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then
+    case $lt_prog_compiler_pic_CXX in
+     "" | " "*) ;;
+     *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;;
+     esac
+else
+    lt_prog_compiler_pic_CXX=
+     lt_prog_compiler_can_build_shared_CXX=no
+fi
+
+fi
+
+
+
+
+
+#
+# Check to make sure the static flag actually works.
+#
+wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\"
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+if test ${lt_cv_prog_compiler_static_works_CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_static_works_CXX=no
+   save_LDFLAGS=$LDFLAGS
+   LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+     # The linker can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     if test -s conftest.err; then
+       # Append any errors to the config.log.
+       cat conftest.err 1>&5
+       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         lt_cv_prog_compiler_static_works_CXX=yes
+       fi
+     else
+       lt_cv_prog_compiler_static_works_CXX=yes
+     fi
+   fi
+   $RM -r conftest*
+   LDFLAGS=$save_LDFLAGS
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5
+printf "%s\n" "$lt_cv_prog_compiler_static_works_CXX" >&6; }
+
+if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then
+    :
+else
+    lt_prog_compiler_static_CXX=
+fi
+
+
+
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if test ${lt_cv_prog_compiler_c_o_CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_c_o_CXX=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o_CXX=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
+printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; }
+
+
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if test ${lt_cv_prog_compiler_c_o_CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_prog_compiler_c_o_CXX=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o_CXX=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
+printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; }
+
+
+
+
+hard_links=nottested
+if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then
+  # do not overwrite the value of need_locks provided by the user
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
+printf %s "checking if we can lock with hard links... " >&6; }
+  hard_links=yes
+  $RM conftest*
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  touch conftest.a
+  ln conftest.a conftest.b 2>&5 || hard_links=no
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
+printf "%s\n" "$hard_links" >&6; }
+  if test no = "$hard_links"; then
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5
+printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;}
+    need_locks=warn
+  fi
+else
+  need_locks=no
+fi
+
+
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+
+  export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+  exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+  case $host_os in
+  aix[4-9]*)
+    # If we're using GNU nm, then we don't want the "-C" option.
+    # -C means demangle to GNU nm, but means don't demangle to AIX nm.
+    # Without the "-l" option, or with the "-B" option, AIX nm treats
+    # weak defined symbols like other global defined symbols, whereas
+    # GNU nm marks them as "W".
+    # While the 'weak' keyword is ignored in the Export File, we need
+    # it in the Import File for the 'aix-soname' feature, so we have
+    # to replace the "-B" option with "-P" for AIX nm.
+    if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+      export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
+    else
+      export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
+    fi
+    ;;
+  pw32*)
+    export_symbols_cmds_CXX=$ltdll_cmds
+    ;;
+  cygwin* | mingw* | cegcc*)
+    case $cc_basename in
+    cl*)
+      exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+      ;;
+    *)
+      export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+      exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
+      ;;
+    esac
+    ;;
+  *)
+    export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+    ;;
+  esac
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
+printf "%s\n" "$ld_shlibs_CXX" >&6; }
+test no = "$ld_shlibs_CXX" && can_build_shared=no
+
+with_gnu_ld_CXX=$with_gnu_ld
+
+
+
+
+
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$archive_cmds_need_lc_CXX" in
+x|xyes)
+  # Assume -lc should be added
+  archive_cmds_need_lc_CXX=yes
+
+  if test yes,yes = "$GCC,$enable_shared"; then
+    case $archive_cmds_CXX in
+    *'~'*)
+      # FIXME: we may have to deal with multi-command sequences.
+      ;;
+    '$CC '*)
+      # Test whether the compiler implicitly links with -lc since on some
+      # systems, -lgcc has to come before -lc. If gcc already passes -lc
+      # to ld, don't add -lc before -lgcc.
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
+printf %s "checking whether -lc should be explicitly linked in... " >&6; }
+if test ${lt_cv_archive_cmds_need_lc_CXX+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  $RM conftest*
+	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+	if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } 2>conftest.err; then
+	  soname=conftest
+	  lib=conftest
+	  libobjs=conftest.$ac_objext
+	  deplibs=
+	  wl=$lt_prog_compiler_wl_CXX
+	  pic_flag=$lt_prog_compiler_pic_CXX
+	  compiler_flags=-v
+	  linker_flags=-v
+	  verstring=
+	  output_objdir=.
+	  libname=conftest
+	  lt_save_allow_undefined_flag=$allow_undefined_flag_CXX
+	  allow_undefined_flag_CXX=
+	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+  (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+  ac_status=$?
+  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+	  then
+	    lt_cv_archive_cmds_need_lc_CXX=no
+	  else
+	    lt_cv_archive_cmds_need_lc_CXX=yes
+	  fi
+	  allow_undefined_flag_CXX=$lt_save_allow_undefined_flag
+	else
+	  cat conftest.err 1>&5
+	fi
+	$RM conftest*
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5
+printf "%s\n" "$lt_cv_archive_cmds_need_lc_CXX" >&6; }
+      archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX
+      ;;
+    esac
+  fi
+  ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
+printf %s "checking dynamic linker characteristics... " >&6; }
+
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=.so
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+
+
+case $host_os in
+aix3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname.a'
+  shlibpath_var=LIBPATH
+
+  # AIX 3 has no versioning support, so we append a major version to the name.
+  soname_spec='$libname$release$shared_ext$major'
+  ;;
+
+aix[4-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  hardcode_into_libs=yes
+  if test ia64 = "$host_cpu"; then
+    # AIX 5 supports IA64
+    library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext'
+    shlibpath_var=LD_LIBRARY_PATH
+  else
+    # With GCC up to 2.95.x, collect2 would create an import file
+    # for dependence libraries.  The import file would start with
+    # the line '#! .'.  This would cause the generated library to
+    # depend on '.', always an invalid library.  This was fixed in
+    # development snapshots of GCC prior to 3.0.
+    case $host_os in
+      aix4 | aix4.[01] | aix4.[01].*)
+      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+	   echo ' yes '
+	   echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then
+	:
+      else
+	can_build_shared=no
+      fi
+      ;;
+    esac
+    # Using Import Files as archive members, it is possible to support
+    # filename-based versioning of shared library archives on AIX. While
+    # this would work for both with and without runtime linking, it will
+    # prevent static linking of such archives. So we do filename-based
+    # shared library versioning with .so extension only, which is used
+    # when both runtime linking and shared linking is enabled.
+    # Unfortunately, runtime linking may impact performance, so we do
+    # not want this to be the default eventually. Also, we use the
+    # versioned .so libs for executables only if there is the -brtl
+    # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only.
+    # To allow for filename-based versioning support, we need to create
+    # libNAME.so.V as an archive file, containing:
+    # *) an Import File, referring to the versioned filename of the
+    #    archive as well as the shared archive member, telling the
+    #    bitwidth (32 or 64) of that shared object, and providing the
+    #    list of exported symbols of that shared object, eventually
+    #    decorated with the 'weak' keyword
+    # *) the shared object with the F_LOADONLY flag set, to really avoid
+    #    it being seen by the linker.
+    # At run time we better use the real file rather than another symlink,
+    # but for link time we create the symlink libNAME.so -> libNAME.so.V
+
+    case $with_aix_soname,$aix_use_runtimelinking in
+    # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct
+    # soname into executable. Probably we can add versioning support to
+    # collect2, so additional links can be useful in future.
+    aix,yes) # traditional libtool
+      dynamic_linker='AIX unversionable lib.so'
+      # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+      # instead of lib<name>.a to let people know that these are not
+      # typical AIX shared libraries.
+      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+      ;;
+    aix,no) # traditional AIX only
+      dynamic_linker='AIX lib.a(lib.so.V)'
+      # We preserve .a as extension for shared libraries through AIX4.2
+      # and later when we are not doing run time linking.
+      library_names_spec='$libname$release.a $libname.a'
+      soname_spec='$libname$release$shared_ext$major'
+      ;;
+    svr4,*) # full svr4 only
+      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)"
+      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+      # We do not specify a path in Import Files, so LIBPATH fires.
+      shlibpath_overrides_runpath=yes
+      ;;
+    *,yes) # both, prefer svr4
+      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)"
+      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+      # unpreferred sharedlib libNAME.a needs extra handling
+      postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"'
+      postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"'
+      # We do not specify a path in Import Files, so LIBPATH fires.
+      shlibpath_overrides_runpath=yes
+      ;;
+    *,no) # both, prefer aix
+      dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)"
+      library_names_spec='$libname$release.a $libname.a'
+      soname_spec='$libname$release$shared_ext$major'
+      # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling
+      postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)'
+      postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"'
+      ;;
+    esac
+    shlibpath_var=LIBPATH
+  fi
+  ;;
+
+amigaos*)
+  case $host_cpu in
+  powerpc)
+    # Since July 2007 AmigaOS4 officially supports .so libraries.
+    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    ;;
+  m68k)
+    library_names_spec='$libname.ixlibrary $libname.a'
+    # Create ${libname}_ixlibrary.a entries in /sys/libs.
+    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+    ;;
+  esac
+  ;;
+
+beos*)
+  library_names_spec='$libname$shared_ext'
+  dynamic_linker="$host_os ld.so"
+  shlibpath_var=LIBRARY_PATH
+  ;;
+
+bsdi[45]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+  # the default ld.so.conf also contains /usr/contrib/lib and
+  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+  # libtool to hard-code these into programs
+  ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+  version_type=windows
+  shrext_cmds=.dll
+  need_version=no
+  need_lib_prefix=no
+
+  case $GCC,$cc_basename in
+  yes,*)
+    # gcc
+    library_names_spec='$libname.dll.a'
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \$file`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname~
+      chmod a+x \$dldir/$dlname~
+      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+      fi'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+
+    case $host_os in
+    cygwin*)
+      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+      soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+
+      ;;
+    mingw* | cegcc*)
+      # MinGW DLLs use traditional 'lib' prefix
+      soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+      ;;
+    pw32*)
+      # pw32 DLLs use 'pw' prefix rather than 'lib'
+      library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+      ;;
+    esac
+    dynamic_linker='Win32 ld.exe'
+    ;;
+
+  *,cl*)
+    # Native MSVC
+    libname_spec='$name'
+    soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+    library_names_spec='$libname.dll.lib'
+
+    case $build_os in
+    mingw*)
+      sys_lib_search_path_spec=
+      lt_save_ifs=$IFS
+      IFS=';'
+      for lt_path in $LIB
+      do
+        IFS=$lt_save_ifs
+        # Let DOS variable expansion print the short 8.3 style file name.
+        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+      done
+      IFS=$lt_save_ifs
+      # Convert to MSYS style.
+      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+      ;;
+    cygwin*)
+      # Convert to unix form, then to dos form, then back to unix form
+      # but this time dos style (no spaces!) so that the unix form looks
+      # like /cygdrive/c/PROGRA~1:/cygdr...
+      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      ;;
+    *)
+      sys_lib_search_path_spec=$LIB
+      if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+        # It is most probably a Windows format PATH.
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+      else
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      fi
+      # FIXME: find the short name or the path components, as spaces are
+      # common. (e.g. "Program Files" -> "PROGRA~1")
+      ;;
+    esac
+
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \$file`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+    dynamic_linker='Win32 link.exe'
+    ;;
+
+  *)
+    # Assume MSVC wrapper
+    library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib'
+    dynamic_linker='Win32 ld.exe'
+    ;;
+  esac
+  # FIXME: first we should search . and the directory the executable is in
+  shlibpath_var=PATH
+  ;;
+
+darwin* | rhapsody*)
+  dynamic_linker="$host_os dyld"
+  version_type=darwin
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$major$shared_ext $libname$shared_ext'
+  soname_spec='$libname$release$major$shared_ext'
+  shlibpath_overrides_runpath=yes
+  shlibpath_var=DYLD_LIBRARY_PATH
+  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+
+  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+  ;;
+
+dgux*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+freebsd* | dragonfly*)
+  # DragonFly does not have aout.  When/if they implement a new
+  # versioning mechanism, adjust this.
+  if test -x /usr/bin/objformat; then
+    objformat=`/usr/bin/objformat`
+  else
+    case $host_os in
+    freebsd[23].*) objformat=aout ;;
+    *) objformat=elf ;;
+    esac
+  fi
+  version_type=freebsd-$objformat
+  case $version_type in
+    freebsd-elf*)
+      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+      soname_spec='$libname$release$shared_ext$major'
+      need_version=no
+      need_lib_prefix=no
+      ;;
+    freebsd-*)
+      library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+      need_version=yes
+      ;;
+  esac
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_os in
+  freebsd2.*)
+    shlibpath_overrides_runpath=yes
+    ;;
+  freebsd3.[01]* | freebsdelf3.[01]*)
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
+  freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
+    shlibpath_overrides_runpath=no
+    hardcode_into_libs=yes
+    ;;
+  *) # from 4.6 on, and DragonFly
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  esac
+  ;;
+
+haiku*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  dynamic_linker="$host_os runtime_loader"
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+  hardcode_into_libs=yes
+  ;;
+
+hpux9* | hpux10* | hpux11*)
+  # Give a soname corresponding to the major version so that dld.sl refuses to
+  # link against other versions.
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  case $host_cpu in
+  ia64*)
+    shrext_cmds='.so'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.so"
+    shlibpath_var=LD_LIBRARY_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    if test 32 = "$HPUX_IA64_MODE"; then
+      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+      sys_lib_dlsearch_path_spec=/usr/lib/hpux32
+    else
+      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+      sys_lib_dlsearch_path_spec=/usr/lib/hpux64
+    fi
+    ;;
+  hppa*64*)
+    shrext_cmds='.sl'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  *)
+    shrext_cmds='.sl'
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=SHLIB_PATH
+    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    ;;
+  esac
+  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+  postinstall_cmds='chmod 555 $lib'
+  # or fails outright, so override atomically:
+  install_override_mode=555
+  ;;
+
+interix[3-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $host_os in
+    nonstopux*) version_type=nonstopux ;;
+    *)
+	if test yes = "$lt_cv_prog_gnu_ld"; then
+		version_type=linux # correct to gnu/linux during the next big refactor
+	else
+		version_type=irix
+	fi ;;
+  esac
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='$libname$release$shared_ext$major'
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext'
+  case $host_os in
+  irix5* | nonstopux*)
+    libsuff= shlibsuff=
+    ;;
+  *)
+    case $LD in # libtool.m4 will add one of these switches to LD
+    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+      libsuff= shlibsuff= libmagic=32-bit;;
+    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+      libsuff=32 shlibsuff=N32 libmagic=N32;;
+    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+      libsuff=64 shlibsuff=64 libmagic=64-bit;;
+    *) libsuff= shlibsuff= libmagic=never-match;;
+    esac
+    ;;
+  esac
+  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff"
+  sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff"
+  hardcode_into_libs=yes
+  ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+  dynamic_linker=no
+  ;;
+
+linux*android*)
+  version_type=none # Android doesn't support versioned libraries.
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext'
+  soname_spec='$libname$release$shared_ext'
+  finish_cmds=
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  dynamic_linker='Android linker'
+  # Don't embed -rpath directories since the linker doesn't support them.
+  hardcode_libdir_flag_spec_CXX='-L$libdir'
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+
+  # Some binutils ld are patched to set DT_RUNPATH
+  if test ${lt_cv_shlibpath_overrides_runpath+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  lt_cv_shlibpath_overrides_runpath=no
+    save_LDFLAGS=$LDFLAGS
+    save_libdir=$libdir
+    eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \
+	 LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\""
+    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main (void)
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"
+then :
+  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null
+then :
+  lt_cv_shlibpath_overrides_runpath=yes
+fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext conftest.$ac_ext
+    LDFLAGS=$save_LDFLAGS
+    libdir=$save_libdir
+
+fi
+
+  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  # Ideally, we could use ldconfig to report *all* directores which are
+  # searched for libraries, however this is still not possible.  Aside from not
+  # being certain /sbin/ldconfig is available, command
+  # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64,
+  # even though it is searched at run-time.  Try to do the best guess by
+  # appending ld.so.conf contents (and includes) to the search path.
+  if test -f /etc/ld.so.conf; then
+    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+    sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+  fi
+
+  # We used to test for /lib/ld.so.1 and disable shared libraries on
+  # powerpc, because MkLinux only supported shared libraries with the
+  # GNU dynamic linker.  Since this was broken with cross compilers,
+  # most powerpc-linux boxes support dynamic linking these days and
+  # people can always --disable-shared, the test was removed, and we
+  # assume the GNU/Linux dynamic linker is in use.
+  dynamic_linker='GNU/Linux ld.so'
+  ;;
+
+netbsd*)
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+    dynamic_linker='NetBSD (a.out) ld.so'
+  else
+    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+    soname_spec='$libname$release$shared_ext$major'
+    dynamic_linker='NetBSD ld.elf_so'
+  fi
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  ;;
+
+newsos6)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+*nto* | *qnx*)
+  version_type=qnx
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='ldqnx.so'
+  ;;
+
+openbsd* | bitrig*)
+  version_type=sunos
+  sys_lib_dlsearch_path_spec=/usr/lib
+  need_lib_prefix=no
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+    need_version=no
+  else
+    need_version=yes
+  fi
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+os2*)
+  libname_spec='$name'
+  version_type=windows
+  shrext_cmds=.dll
+  need_version=no
+  need_lib_prefix=no
+  # OS/2 can only load a DLL with a base name of 8 characters or less.
+  soname_spec='`test -n "$os2dllname" && libname="$os2dllname";
+    v=$($ECHO $release$versuffix | tr -d .-);
+    n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _);
+    $ECHO $n$v`$shared_ext'
+  library_names_spec='${libname}_dll.$libext'
+  dynamic_linker='OS/2 ld.exe'
+  shlibpath_var=BEGINLIBPATH
+  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+  postinstall_cmds='base_file=`basename \$file`~
+    dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~
+    dldir=$destdir/`dirname \$dlpath`~
+    test -d \$dldir || mkdir -p \$dldir~
+    $install_prog $dir/$dlname \$dldir/$dlname~
+    chmod a+x \$dldir/$dlname~
+    if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+      eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+    fi'
+  postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~
+    dlpath=$dir/\$dldll~
+    $RM \$dlpath'
+  ;;
+
+osf3* | osf4* | osf5*)
+  version_type=osf
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='$libname$release$shared_ext$major'
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+  ;;
+
+rdos*)
+  dynamic_linker=no
+  ;;
+
+solaris*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  # ldd complains unless libraries are executable
+  postinstall_cmds='chmod +x $lib'
+  ;;
+
+sunos4*)
+  version_type=sunos
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  if test yes = "$with_gnu_ld"; then
+    need_lib_prefix=no
+  fi
+  need_version=yes
+  ;;
+
+sysv4 | sysv4.3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_vendor in
+    sni)
+      shlibpath_overrides_runpath=no
+      need_lib_prefix=no
+      runpath_var=LD_RUN_PATH
+      ;;
+    siemens)
+      need_lib_prefix=no
+      ;;
+    motorola)
+      need_lib_prefix=no
+      need_version=no
+      shlibpath_overrides_runpath=no
+      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+      ;;
+  esac
+  ;;
+
+sysv4*MP*)
+  if test -d /usr/nec; then
+    version_type=linux # correct to gnu/linux during the next big refactor
+    library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext'
+    soname_spec='$libname$shared_ext.$major'
+    shlibpath_var=LD_LIBRARY_PATH
+  fi
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  version_type=sco
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  if test yes = "$with_gnu_ld"; then
+    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+  else
+    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+    case $host_os in
+      sco3.2v5*)
+        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+	;;
+    esac
+  fi
+  sys_lib_dlsearch_path_spec='/usr/lib'
+  ;;
+
+tpf*)
+  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+uts4*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+  soname_spec='$libname$release$shared_ext$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+*)
+  dynamic_linker=no
+  ;;
+esac
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
+printf "%s\n" "$dynamic_linker" >&6; }
+test no = "$dynamic_linker" && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test yes = "$GCC"; then
+  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then
+  sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec
+fi
+
+if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then
+  sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec
+fi
+
+# remember unaugmented sys_lib_dlsearch_path content for libtool script decls...
+configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec
+
+# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code
+func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH"
+
+# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool
+configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
+printf %s "checking how to hardcode library paths into programs... " >&6; }
+hardcode_action_CXX=
+if test -n "$hardcode_libdir_flag_spec_CXX" ||
+   test -n "$runpath_var_CXX" ||
+   test yes = "$hardcode_automatic_CXX"; then
+
+  # We can hardcode non-existent directories.
+  if test no != "$hardcode_direct_CXX" &&
+     # If the only mechanism to avoid hardcoding is shlibpath_var, we
+     # have to relink, otherwise we might link with an installed library
+     # when we should be linking with a yet-to-be-installed one
+     ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" &&
+     test no != "$hardcode_minus_L_CXX"; then
+    # Linking always hardcodes the temporary library directory.
+    hardcode_action_CXX=relink
+  else
+    # We can link without hardcoding, and we can hardcode nonexisting dirs.
+    hardcode_action_CXX=immediate
+  fi
+else
+  # We cannot hardcode anything, or else we can only hardcode existing
+  # directories.
+  hardcode_action_CXX=unsupported
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5
+printf "%s\n" "$hardcode_action_CXX" >&6; }
+
+if test relink = "$hardcode_action_CXX" ||
+   test yes = "$inherit_rpath_CXX"; then
+  # Fast installation is not supported
+  enable_fast_install=no
+elif test yes = "$shlibpath_overrides_runpath" ||
+     test no = "$enable_shared"; then
+  # Fast installation is not necessary
+  enable_fast_install=needless
+fi
+
+
+
+
+
+
+
+  fi # test -n "$compiler"
+
+  CC=$lt_save_CC
+  CFLAGS=$lt_save_CFLAGS
+  LDCXX=$LD
+  LD=$lt_save_LD
+  GCC=$lt_save_GCC
+  with_gnu_ld=$lt_save_with_gnu_ld
+  lt_cv_path_LDCXX=$lt_cv_path_LD
+  lt_cv_path_LD=$lt_save_path_LD
+  lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
+  lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
+fi # test yes != "$_lt_caught_CXX_error"
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+        ac_config_commands="$ac_config_commands libtool"
+
+
+
+
+# Only expand once:
+
+
+# Check whether --enable-silent-rules was given.
+if test ${enable_silent_rules+y}
+then :
+  enableval=$enable_silent_rules;
+fi
+
+case $enable_silent_rules in # (((
+  yes) AM_DEFAULT_VERBOSITY=0;;
+   no) AM_DEFAULT_VERBOSITY=1;;
+    *) AM_DEFAULT_VERBOSITY=1;;
+esac
+am_make=${MAKE-make}
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
+printf %s "checking whether $am_make supports nested variables... " >&6; }
+if test ${am_cv_make_support_nested_variables+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if printf "%s\n" 'TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+	@$(TRUE)
+.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then
+  am_cv_make_support_nested_variables=yes
+else
+  am_cv_make_support_nested_variables=no
+fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
+printf "%s\n" "$am_cv_make_support_nested_variables" >&6; }
+if test $am_cv_make_support_nested_variables = yes; then
+    AM_V='$(V)'
+  AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+  AM_V=$AM_DEFAULT_VERBOSITY
+  AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AM_BACKSLASH='\'
+
+# Autoupdate added the next two lines to ensure that your configure
+# script's behavior did not change.  They are probably safe to remove.
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+printf %s "checking for egrep... " >&6; }
+if test ${ac_cv_path_EGREP+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+   then ac_cv_path_EGREP="$GREP -E"
+   else
+     if test -z "$EGREP"; then
+  ac_path_EGREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_prog in egrep
+   do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_EGREP="$as_dir$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_EGREP" || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+  # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+  ac_count=0
+  printf %s 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    printf "%s\n" 'EGREP' >> "conftest.nl"
+    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_EGREP="$ac_path_EGREP"
+      ac_path_EGREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_EGREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_EGREP"; then
+    as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_EGREP=$EGREP
+fi
+
+   fi
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+printf "%s\n" "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether utime accepts a null argument" >&5
+printf %s "checking whether utime accepts a null argument... " >&6; }
+if test ${ac_cv_func_utime_null+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  rm -f conftest.data; >conftest.data
+# Sequent interprets utime(file, 0) to mean use start of epoch.  Wrong.
+if test "$cross_compiling" = yes
+then :
+  ac_cv_func_utime_null='guessing yes'
+else $as_nop
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$ac_includes_default
+	       #ifdef HAVE_UTIME_H
+	       # include <utime.h>
+	       #endif
+int
+main (void)
+{
+struct stat s, t;
+  return ! (stat ("conftest.data", &s) == 0
+	    && utime ("conftest.data", 0) == 0
+	    && stat ("conftest.data", &t) == 0
+	    && t.st_mtime >= s.st_mtime
+	    && t.st_mtime - s.st_mtime < 120);
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"
+then :
+  ac_cv_func_utime_null=yes
+else $as_nop
+  ac_cv_func_utime_null=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+  conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_utime_null" >&5
+printf "%s\n" "$ac_cv_func_utime_null" >&6; }
+if test "x$ac_cv_func_utime_null" != xno; then
+  ac_cv_func_utime_null=yes
+
+printf "%s\n" "#define HAVE_UTIME_NULL 1" >>confdefs.h
+
+fi
+rm -f conftest.data
+
+
+## Set Fortran compiler behaviour
+#if test "x$FCFLAGS" == "x"; then
+#  FCFLAGS="-O2"
+#fi
+## Try to respect users' Fortran compiler variables
+#if test "x$FC" == "x"; then
+#  if test "x$F77" == "x"; then
+#FC="$GFORTRAN"
+#FC=gfortran
+#  else
+#    FC="$F77"
+#  fi
+#fi
+
+
+CFLAGS=" -I/opt/local/include $CFLAGS "
+CXXFLAGS=" -I/opt/local/include $CXXFLAGS "
+LDFLAGS=" -L/opt/local/lib $LDFLAGS -lpthread "
+
+# check for zlib
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for library containing gzopen" >&5
+printf %s "checking for library containing gzopen... " >&6; }
+if test ${ac_cv_search_gzopen+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+char gzopen ();
+int
+main (void)
+{
+return gzopen ();
+  ;
+  return 0;
+}
+_ACEOF
+for ac_lib in '' z
+do
+  if test -z "$ac_lib"; then
+    ac_res="none required"
+  else
+    ac_res=-l$ac_lib
+    LIBS="-l$ac_lib  $ac_func_search_save_LIBS"
+  fi
+  if ac_fn_c_try_link "$LINENO"
+then :
+  ac_cv_search_gzopen=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.beam \
+    conftest$ac_exeext
+  if test ${ac_cv_search_gzopen+y}
+then :
+  break
+fi
+done
+if test ${ac_cv_search_gzopen+y}
+then :
+
+else $as_nop
+  ac_cv_search_gzopen=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gzopen" >&5
+printf "%s\n" "$ac_cv_search_gzopen" >&6; }
+ac_res=$ac_cv_search_gzopen
+if test "$ac_res" != no
+then :
+  test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+fi
+
+
+# AX_CHECK_ZLIB([ZLIBTEST=true], ZLIBTEST="")
+# AX_CHECK_ZLIB()
+# AC_MSG_NOTICE([ZLIBTEST is set to $ZLIBTEST])
+# AM_CONDITIONAL(HAVE_LIBZ, test "x$ZLIBTEST" != "x")
+
+
+
+# Check whether --with-root was given.
+if test ${with_root+y}
+then :
+  withval=$with_root;
+else $as_nop
+  with_root=yes
+fi
+
+
+USEROOT=
+if test "x$with_root" != xno
+then :
+  USEROOT=1
+
+else $as_nop
+  USEROOT=0
+
+fi
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking compile with root" >&5
+printf %s "checking compile with root... " >&6; }
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_root" >&5
+printf "%s\n" "$with_root" >&6; }
+
+
+ROOT_CXXFLAGS=
+ROOT_LDFLAGS=
+
+if test "$USEROOT" == "1"; then
+
+# Extract the first word of "root", so it can be a program name with args.
+set dummy root; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_path_ROOTPATH+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  case $ROOTPATH in
+  [\\/]* | ?:[\\/]*)
+  ac_cv_path_ROOTPATH="$ROOTPATH" # Let the user override the test with a path.
+  ;;
+  *)
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_path_ROOTPATH="$as_dir$ac_word$ac_exec_ext"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_path_ROOTPATH" && ac_cv_path_ROOTPATH="no"
+  ;;
+esac
+fi
+ROOTPATH=$ac_cv_path_ROOTPATH
+if test -n "$ROOTPATH"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ROOTPATH" >&5
+printf "%s\n" "$ROOTPATH" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+if test "$ROOTPATH" = "no"; then
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: **************************" >&5
+printf "%s\n" "$as_me: WARNING: **************************" >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: " >&5
+printf "%s\n" "$as_me: WARNING: " >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: root was not found " >&5
+printf "%s\n" "$as_me: WARNING: root was not found " >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: " >&5
+printf "%s\n" "$as_me: WARNING: " >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: **************************" >&5
+printf "%s\n" "$as_me: WARNING: **************************" >&2;}
+    USEROOT=0
+
+    echo "" > appl_grid/appl_root.h
+else
+    echo "#define USEROOT" > appl_grid/appl_root.h
+    ROOT_CXXFLAGS=$(root-config --cflags)
+    ROOT_LDFLAGS=$(root-config --ldflags)
+    ROOT_LDFLAGS+=" "
+    ROOT_LDFLAGS+=$(root-config --libs)
+
+printf "%s\n" "#define HAVE_CERNROOT 1" >>confdefs.h
+
+fi
+else
+    echo "" > appl_grid/appl_root.h
+fi
+
+if test "$USEROOT" != "1"; then
+    echo "" > appl_grid/appl_root.h
+
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: ****************************************************************" >&5
+printf "%s\n" "$as_me: WARNING: ****************************************************************" >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING:  compiling without root" >&5
+printf "%s\n" "$as_me: WARNING:  compiling without root" >&2;}
+    if ( echo $CXXFLAGS | grep -q "std=c++" ); then
+       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING:  using $CXXFLAGS " >&5
+printf "%s\n" "$as_me: WARNING:  using $CXXFLAGS " >&2;}
+    else
+       touch /tmp/t.cxx
+       ( g++ -std=c++11 -c /tmp/t.cxx ) && COMP=" -std=c++11 "
+       ( g++ -std=c++1y -c /tmp/t.cxx ) && COMP=" -std=c++1y "
+       ( g++ -std=c++1z -c /tmp/t.cxx ) && COMP=" -std=c++1z "
+       CXXFLAGS+=$COMP
+       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING:  defaulting to $COMP : for a different c++ version " >&5
+printf "%s\n" "$as_me: WARNING:  defaulting to $COMP : for a different c++ version " >&2;}
+       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING:  should set CXXFLAGS+=\" -std=<c++_version> \" and run " >&5
+printf "%s\n" "$as_me: WARNING:  should set CXXFLAGS+=\" -std=<c++_version> \" and run " >&2;}
+       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING:  configure again " >&5
+printf "%s\n" "$as_me: WARNING:  configure again " >&2;}
+    fi
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: ****************************************************************" >&5
+printf "%s\n" "$as_me: WARNING: ****************************************************************" >&2;}
+fi
+
+( echo $CXXFLAGS | grep -q "m64" ) || CXXFLAGS+=" -m64 "
+
+
+
+
+
+
+ if test "$USEROOT" == "1" ; then
+  USE_ROOT_TRUE=
+  USE_ROOT_FALSE='#'
+else
+  USE_ROOT_TRUE='#'
+  USE_ROOT_FALSE=
+fi
+
+
+
+# lhapdf isn't technically needed
+# AH_TEMPLATE([HAVE_LHAPDF],[Define 1 if lhapdf is installed])
+# AC_PATH_PROG(LHAPDFPATH,lhapdf-config,no)
+
+HOPPET_LDFLAGS=
+HOPPET_CXXFLAGS=
+
+# Extract the first word of "hoppet-config", so it can be a program name with args.
+set dummy hoppet-config; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_path_HOPPETPATH+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  case $HOPPETPATH in
+  [\\/]* | ?:[\\/]*)
+  ac_cv_path_HOPPETPATH="$HOPPETPATH" # Let the user override the test with a path.
+  ;;
+  *)
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_path_HOPPETPATH="$as_dir$ac_word$ac_exec_ext"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_path_HOPPETPATH" && ac_cv_path_HOPPETPATH="no"
+  ;;
+esac
+fi
+HOPPETPATH=$ac_cv_path_HOPPETPATH
+if test -n "$HOPPETPATH"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $HOPPETPATH" >&5
+printf "%s\n" "$HOPPETPATH" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+if test "$HOPPETPATH" = "no"; then
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: ********************************************************************" >&5
+printf "%s\n" "$as_me: WARNING: ********************************************************************" >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: " >&5
+printf "%s\n" "$as_me: WARNING: " >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING:    HOPPET was not found - you will not be able to arbirarily" >&5
+printf "%s\n" "$as_me: WARNING:    HOPPET was not found - you will not be able to arbirarily" >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING:    change the factorisation scale in the convolution" >&5
+printf "%s\n" "$as_me: WARNING:    change the factorisation scale in the convolution" >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: " >&5
+printf "%s\n" "$as_me: WARNING: " >&2;}
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: ********************************************************************" >&5
+printf "%s\n" "$as_me: WARNING: ********************************************************************" >&2;}
+else
+     HOPPET_LDFLAGS=$(hoppet-config --libs)
+     HOPPET_CXXFLAGS=$(hoppet-config --cxxflags)
+
+printf "%s\n" "#define HAVE_HOPPET 1" >>confdefs.h
+
+fi
+
+
+
+ if test "$HOPPETPATH" != "no" ; then
+  USE_HOPPET_TRUE=
+  USE_HOPPET_FALSE='#'
+else
+  USE_HOPPET_TRUE='#'
+  USE_HOPPET_FALSE=
+fi
+
+
+
+# check for the location of the libgfortran.{so,a} libraries
+# would not be needed but some installations have only libgfortran.a
+# and some only libgfortran.so, so we can'r simply use -print-file-name
+# to get the path.
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: Checking for gfortran library " >&5
+printf "%s\n" "$as_me: Checking for gfortran library " >&6;}
+
+FRTLIB=$(gfortran --print-file-name=libgfortran.so )
+FRTLPATH=$( echo $FRTLIB | sed 's|libgfortran.so||')
+
+if test "$FRTLPATH" != ""; then
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: found libgfortran.so" >&5
+printf "%s\n" "$as_me: found libgfortran.so" >&6;}
+     if test "$FRTLPATH" != ""; then
+  USE_FRTLSO_TRUE=
+  USE_FRTLSO_FALSE='#'
+else
+  USE_FRTLSO_TRUE='#'
+  USE_FRTLSO_FALSE=
+fi
+
+    FRTLLIB=" -L$(\cd $FRTLPATH && \pwd) -lgfortran "
+
+else
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: libgfortran.so not found - checking for libgfotran.a" >&5
+printf "%s\n" "$as_me: WARNING: libgfortran.so not found - checking for libgfotran.a" >&2;}
+    FRTLIB=$(gfortran --print-file-name=libgfortran.a)
+    FRTLPATH=$(echo $FRTLIB | sed 's|libgfortran.a||')
+    if test "$FRTLPATH" != ""; then
+         if ! test "$FRTLPATH" != ""; then
+  USE_FRTLSO_TRUE=
+  USE_FRTLSO_FALSE='#'
+else
+  USE_FRTLSO_TRUE='#'
+  USE_FRTLSO_FALSE=
+fi
+
+        { printf "%s\n" "$as_me:${as_lineno-$LINENO}: found libgfortran.a" >&5
+printf "%s\n" "$as_me: found libgfortran.a" >&6;}
+        FRTLLIB=" -L$(\cd $FRTLPATH && \pwd) -lgfortran "
+
+    else
+         if test "x$FRTLPATH" != "x"; then
+  USE_FRTLSO_TRUE=
+  USE_FRTLSO_FALSE='#'
+else
+  USE_FRTLSO_TRUE='#'
+  USE_FRTLSO_FALSE=
+fi
+
+        FRTLLIB=""
+
+        { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: not found gfortran library - may not be able to link against fortran code or hoppet" >&5
+printf "%s\n" "$as_me: WARNING: not found gfortran library - may not be able to link against fortran code or hoppet" >&2;}
+    fi
+fi
+
+
+
+
+
+LHAPDF_MAJOR_VERSION=0
+LHAPDF_CXXFLAGS=
+
+# Extract the first word of "lhapdf-config", so it can be a program name with args.
+set dummy lhapdf-config; ac_word=$2
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+printf %s "checking for $ac_word... " >&6; }
+if test ${ac_cv_path_LHAPDFPATH+y}
+then :
+  printf %s "(cached) " >&6
+else $as_nop
+  case $LHAPDFPATH in
+  [\\/]* | ?:[\\/]*)
+  ac_cv_path_LHAPDFPATH="$LHAPDFPATH" # Let the user override the test with a path.
+  ;;
+  *)
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+    ac_cv_path_LHAPDFPATH="$as_dir$ac_word$ac_exec_ext"
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_path_LHAPDFPATH" && ac_cv_path_LHAPDFPATH="no"
+  ;;
+esac
+fi
+LHAPDFPATH=$ac_cv_path_LHAPDFPATH
+if test -n "$LHAPDFPATH"; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LHAPDFPATH" >&5
+printf "%s\n" "$LHAPDFPATH" >&6; }
+else
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+printf "%s\n" "no" >&6; }
+fi
+
+
+if test "$LHAPDFPATH" != "no"; then
+
+    # get LHAPDF major version - do nothing is version 6 not installed
+    LHAPDF_MAJOR_VERSION=$(lhapdf-config --version | sed 's/\..*//')
+
+    if test $LHAPDF_MAJOR_VERSION -gt 5; then
+
+       # find include path
+       LHAPDF_CXXFLAGS=$(lhapdf-config --libs)
+
+
+printf "%s\n" "#define HAVE_LHAPDF 1" >>confdefs.h
+
+
+       LHAPDF_VERSION=$(lhapdf-config --version)
+
+
+
+printf "%s\n" "#define LHAPDF_VERSION \"$LHAPDF_VERSION\"" >>confdefs.h
+
+
+    fi
+fi
+
+
+ if test $LHAPDF_MAJOR_VERSION -gt 5  ; then
+  USE_LHAPDF_TRUE=
+  USE_LHAPDF_FALSE='#'
+else
+  USE_LHAPDF_TRUE='#'
+  USE_LHAPDF_FALSE=
+fi
+
+
+
+ac_config_files="$ac_config_files bin/Makefile bin/applgrid-config"
+
+
+ac_config_files="$ac_config_files Makefile src/Makefile"
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems.  If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+  for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+    eval ac_val=\$$ac_var
+    case $ac_val in #(
+    *${as_nl}*)
+      case $ac_var in #(
+      *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+      esac
+      case $ac_var in #(
+      _ | IFS | as_nl) ;; #(
+      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+      *) { eval $ac_var=; unset $ac_var;} ;;
+      esac ;;
+    esac
+  done
+
+  (set) 2>&1 |
+    case $as_nl`(ac_space=' '; set) 2>&1` in #(
+    *${as_nl}ac_space=\ *)
+      # `set' does not quote correctly, so add quotes: double-quote
+      # substitution turns \\\\ into \\, and sed turns \\ into \.
+      sed -n \
+	"s/'/'\\\\''/g;
+	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+      ;; #(
+    *)
+      # `set' quotes correctly as required by POSIX, so do not add quotes.
+      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+      ;;
+    esac |
+    sort
+) |
+  sed '
+     /^ac_cv_env_/b end
+     t clear
+     :clear
+     s/^\([^=]*\)=\(.*[{}].*\)$/test ${\1+y} || &/
+     t end
+     s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+     :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+  if test -w "$cache_file"; then
+    if test "x$cache_file" != "x/dev/null"; then
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+printf "%s\n" "$as_me: updating cache $cache_file" >&6;}
+      if test ! -f "$cache_file" || test -h "$cache_file"; then
+	cat confcache >"$cache_file"
+      else
+        case $cache_file in #(
+        */* | ?:*)
+	  mv -f confcache "$cache_file"$$ &&
+	  mv -f "$cache_file"$$ "$cache_file" ;; #(
+        *)
+	  mv -f confcache "$cache_file" ;;
+	esac
+      fi
+    fi
+  else
+    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+printf "%s\n" "$as_me: not updating unwritable cache $cache_file" >&6;}
+  fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+  # 1. Remove the extension, and $U if already installed.
+  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+  ac_i=`printf "%s\n" "$ac_i" | sed "$ac_script"`
+  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
+  #    will be set to the directory where LIBOBJS objects are built.
+  as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+  as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5
+printf %s "checking that generated files are newer than configure... " >&6; }
+   if test -n "$am_sleep_pid"; then
+     # Hide warnings about reused PIDs.
+     wait $am_sleep_pid 2>/dev/null
+   fi
+   { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: done" >&5
+printf "%s\n" "done" >&6; }
+ if test -n "$EXEEXT"; then
+  am__EXEEXT_TRUE=
+  am__EXEEXT_FALSE='#'
+else
+  am__EXEEXT_TRUE='#'
+  am__EXEEXT_FALSE=
+fi
+
+if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
+  as_fn_error $? "conditional \"AMDEP\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
+  as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
+  as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${USE_ROOT_TRUE}" && test -z "${USE_ROOT_FALSE}"; then
+  as_fn_error $? "conditional \"USE_ROOT\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${USE_HOPPET_TRUE}" && test -z "${USE_HOPPET_FALSE}"; then
+  as_fn_error $? "conditional \"USE_HOPPET\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${USE_FRTLSO_TRUE}" && test -z "${USE_FRTLSO_FALSE}"; then
+  as_fn_error $? "conditional \"USE_FRTLSO\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${USE_FRTLSO_TRUE}" && test -z "${USE_FRTLSO_FALSE}"; then
+  as_fn_error $? "conditional \"USE_FRTLSO\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${USE_FRTLSO_TRUE}" && test -z "${USE_FRTLSO_FALSE}"; then
+  as_fn_error $? "conditional \"USE_FRTLSO\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${USE_LHAPDF_TRUE}" && test -z "${USE_LHAPDF_FALSE}"; then
+  as_fn_error $? "conditional \"USE_LHAPDF\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+
+: "${CONFIG_STATUS=./config.status}"
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+printf "%s\n" "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+as_nop=:
+if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
+then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '${1+"$@"}'='"$@"'
+  setopt NO_GLOB_SUBST
+else $as_nop
+  case `(set -o) 2>/dev/null` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+
+
+
+# Reset variables that may have inherited troublesome values from
+# the environment.
+
+# IFS needs to be set, to space, tab, and newline, in precisely that order.
+# (If _AS_PATH_WALK were called with IFS unset, it would have the
+# side effect of setting IFS to empty, thus disabling word splitting.)
+# Quoting is to prevent editors from complaining about space-tab.
+as_nl='
+'
+export as_nl
+IFS=" ""	$as_nl"
+
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# Ensure predictable behavior from utilities with locale-dependent output.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# We cannot yet rely on "unset" to work, but we need these variables
+# to be unset--not just set to an empty or harmless value--now, to
+# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh).  This construct
+# also avoids known problems related to "unset" and subshell syntax
+# in other old shells (e.g. bash 2.01 and pdksh 5.2.14).
+for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH
+do eval test \${$as_var+y} \
+  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+
+# Ensure that fds 0, 1, and 2 are open.
+if (exec 3>&0) 2>/dev/null; then :; else exec 0</dev/null; fi
+if (exec 3>&1) 2>/dev/null; then :; else exec 1>/dev/null; fi
+if (exec 3>&2)            ; then :; else exec 2>/dev/null; fi
+
+# The user is always right.
+if ${PATH_SEPARATOR+false} :; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
+fi
+
+
+# Find who we are.  Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+  *[\\/]* ) as_myself=$0 ;;
+  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  case $as_dir in #(((
+    '') as_dir=./ ;;
+    */) ;;
+    *) as_dir=$as_dir/ ;;
+  esac
+    test -r "$as_dir$0" && as_myself=$as_dir$0 && break
+  done
+IFS=$as_save_IFS
+
+     ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+  as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+  printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+  exit 1
+fi
+
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+  as_status=$1; test $as_status -eq 0 && as_status=1
+  if test "$4"; then
+    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+    printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+  fi
+  printf "%s\n" "$as_me: error: $2" >&2
+  as_fn_exit $as_status
+} # as_fn_error
+
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+  return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+  set +e
+  as_fn_set_status $1
+  exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+  { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null
+then :
+  eval 'as_fn_append ()
+  {
+    eval $1+=\$2
+  }'
+else $as_nop
+  as_fn_append ()
+  {
+    eval $1=\$$1\$2
+  }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null
+then :
+  eval 'as_fn_arith ()
+  {
+    as_val=$(( $* ))
+  }'
+else $as_nop
+  as_fn_arith ()
+  {
+    as_val=`expr "$@" || test $? -eq 1`
+  }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+   test "X`expr 00001 : '.*\(...\)'`" = X001; then
+  as_expr=expr
+else
+  as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+  as_basename=basename
+else
+  as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+  as_dirname=dirname
+else
+  as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+	 X"$0" : 'X\(//\)$' \| \
+	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X/"$0" |
+    sed '/^.*\/\([^/][^/]*\)\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+# Determine whether it's possible to make 'echo' print without a newline.
+# These variables are no longer used directly by Autoconf, but are AC_SUBSTed
+# for compatibility with existing Makefiles.
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+  case `echo 'xy\c'` in
+  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
+  xy)  ECHO_C='\c';;
+  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
+       ECHO_T='	';;
+  esac;;
+*)
+  ECHO_N='-n';;
+esac
+
+# For backward compatibility with old third-party macros, we provide
+# the shell variables $as_echo and $as_echo_n.  New code should use
+# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively.
+as_echo='printf %s\n'
+as_echo_n='printf %s'
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+  rm -f conf$$.dir/conf$$.file
+else
+  rm -f conf$$.dir
+  mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+  if ln -s conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s='ln -s'
+    # ... but there are two gotchas:
+    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+    # In both cases, we have to default to `cp -pR'.
+    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+      as_ln_s='cp -pR'
+  elif ln conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s=ln
+  else
+    as_ln_s='cp -pR'
+  fi
+else
+  as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+  case $as_dir in #(
+  -*) as_dir=./$as_dir;;
+  esac
+  test -d "$as_dir" || eval $as_mkdir_p || {
+    as_dirs=
+    while :; do
+      case $as_dir in #(
+      *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+      *) as_qdir=$as_dir;;
+      esac
+      as_dirs="'$as_qdir' $as_dirs"
+      as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_dir" : 'X\(//\)[^/]' \| \
+	 X"$as_dir" : 'X\(//\)$' \| \
+	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$as_dir" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+      test -d "$as_dir" && break
+    done
+    test -z "$as_dirs" || eval "mkdir $as_dirs"
+  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+if mkdir -p . 2>/dev/null; then
+  as_mkdir_p='mkdir -p "$as_dir"'
+else
+  test -d ./-p && rmdir ./-p
+  as_mkdir_p=false
+fi
+
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+  test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by APPLgrid $as_me 1.6.36, which was
+generated by GNU Autoconf 2.71.  Invocation command line was
+
+  CONFIG_FILES    = $CONFIG_FILES
+  CONFIG_HEADERS  = $CONFIG_HEADERS
+  CONFIG_LINKS    = $CONFIG_LINKS
+  CONFIG_COMMANDS = $CONFIG_COMMANDS
+  $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+config_commands="$ac_config_commands"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration.  Unless the files
+and actions are specified as TAGs, all are instantiated by default.
+
+Usage: $0 [OPTION]... [TAG]...
+
+  -h, --help       print this help, then exit
+  -V, --version    print version number and configuration settings, then exit
+      --config     print configuration, then exit
+  -q, --quiet, --silent
+                   do not print progress messages
+  -d, --debug      don't remove temporary files
+      --recheck    update $as_me by reconfiguring in the same conditions
+      --file=FILE[:TEMPLATE]
+                   instantiate the configuration file FILE
+      --header=FILE[:TEMPLATE]
+                   instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Configuration commands:
+$config_commands
+
+Report bugs to <sutt@cern.ch>.
+APPLgrid home page: <https://applgrid.hepforge.org>."
+
+_ACEOF
+ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"`
+ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\''/g"`
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config='$ac_cs_config_escaped'
+ac_cs_version="\\
+APPLgrid config.status 1.6.36
+configured by $0, generated by GNU Autoconf 2.71,
+  with options \\"\$ac_cs_config\\"
+
+Copyright (C) 2021 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+MKDIR_P='$MKDIR_P'
+AWK='$AWK'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+  case $1 in
+  --*=?*)
+    ac_option=`expr "X$1" : 'X\([^=]*\)='`
+    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+    ac_shift=:
+    ;;
+  --*=)
+    ac_option=`expr "X$1" : 'X\([^=]*\)='`
+    ac_optarg=
+    ac_shift=:
+    ;;
+  *)
+    ac_option=$1
+    ac_optarg=$2
+    ac_shift=shift
+    ;;
+  esac
+
+  case $ac_option in
+  # Handling of the options.
+  -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+    ac_cs_recheck=: ;;
+  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+    printf "%s\n" "$ac_cs_version"; exit ;;
+  --config | --confi | --conf | --con | --co | --c )
+    printf "%s\n" "$ac_cs_config"; exit ;;
+  --debug | --debu | --deb | --de | --d | -d )
+    debug=: ;;
+  --file | --fil | --fi | --f )
+    $ac_shift
+    case $ac_optarg in
+    *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    '') as_fn_error $? "missing file argument" ;;
+    esac
+    as_fn_append CONFIG_FILES " '$ac_optarg'"
+    ac_need_defaults=false;;
+  --header | --heade | --head | --hea )
+    $ac_shift
+    case $ac_optarg in
+    *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    esac
+    as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+    ac_need_defaults=false;;
+  --he | --h)
+    # Conflict between --help and --header
+    as_fn_error $? "ambiguous option: \`$1'
+Try \`$0 --help' for more information.";;
+  --help | --hel | -h )
+    printf "%s\n" "$ac_cs_usage"; exit ;;
+  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+  | -silent | --silent | --silen | --sile | --sil | --si | --s)
+    ac_cs_silent=: ;;
+
+  # This is an error.
+  -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
+
+  *) as_fn_append ac_config_targets " $1"
+     ac_need_defaults=false ;;
+
+  esac
+  shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+  exec 6>/dev/null
+  ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+  set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+  shift
+  \printf "%s\n" "running CONFIG_SHELL=$SHELL \$*" >&6
+  CONFIG_SHELL='$SHELL'
+  export CONFIG_SHELL
+  exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+  echo
+  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+  printf "%s\n" "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#
+# INIT-COMMANDS
+#
+AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"
+
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+sed_quote_subst='$sed_quote_subst'
+double_quote_subst='$double_quote_subst'
+delay_variable_subst='$delay_variable_subst'
+macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`'
+macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`'
+enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`'
+enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`'
+pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`'
+enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`'
+shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`'
+SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`'
+ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`'
+PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`'
+host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`'
+host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`'
+host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`'
+build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`'
+build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`'
+build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`'
+SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`'
+Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`'
+GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`'
+EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`'
+FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`'
+LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`'
+NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`'
+LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`'
+max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`'
+ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`'
+exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`'
+lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`'
+lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`'
+lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`'
+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`'
+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`'
+reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`'
+reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`'
+OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`'
+deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`'
+file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`'
+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`'
+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`'
+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`'
+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`'
+AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`'
+AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`'
+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`'
+STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`'
+RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`'
+old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`'
+old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
+old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`'
+lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`'
+CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`'
+CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`'
+compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`'
+GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`'
+lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`'
+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`'
+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`'
+lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`'
+objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`'
+MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`'
+need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`'
+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`'
+DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`'
+NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`'
+LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`'
+OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`'
+OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`'
+libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`'
+shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`'
+extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
+archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`'
+export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`'
+whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`'
+compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`'
+old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
+archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`'
+archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`'
+module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`'
+module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`'
+with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`'
+allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`'
+no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`'
+hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`'
+hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`'
+hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`'
+hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`'
+inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`'
+link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`'
+always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`'
+export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`'
+exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`'
+include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`'
+prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`'
+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`'
+file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`'
+variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`'
+need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`'
+need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`'
+version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`'
+runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`'
+shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`'
+shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`'
+libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`'
+library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`'
+soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`'
+install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`'
+postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`'
+postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
+finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`'
+finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`'
+hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`'
+sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`'
+configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`'
+configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`'
+hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`'
+enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`'
+enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`'
+enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`'
+old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`'
+striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`'
+predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`'
+postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`'
+predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`'
+postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`'
+LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`'
+reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`'
+reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`'
+GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`'
+archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`'
+export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
+whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`'
+old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`'
+allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
+no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`'
+inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`'
+link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`'
+always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`'
+export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
+include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
+prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`'
+predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`'
+postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`'
+predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`'
+postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`'
+
+LTCC='$LTCC'
+LTCFLAGS='$LTCFLAGS'
+compiler='$compiler_DEFAULT'
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+\$1
+_LTECHO_EOF'
+}
+
+# Quote evaled strings.
+for var in SHELL \
+ECHO \
+PATH_SEPARATOR \
+SED \
+GREP \
+EGREP \
+FGREP \
+LD \
+NM \
+LN_S \
+lt_SP2NL \
+lt_NL2SP \
+reload_flag \
+OBJDUMP \
+deplibs_check_method \
+file_magic_cmd \
+file_magic_glob \
+want_nocaseglob \
+DLLTOOL \
+sharedlib_from_linklib_cmd \
+AR \
+AR_FLAGS \
+archiver_list_spec \
+STRIP \
+RANLIB \
+CC \
+CFLAGS \
+compiler \
+lt_cv_sys_global_symbol_pipe \
+lt_cv_sys_global_symbol_to_cdecl \
+lt_cv_sys_global_symbol_to_import \
+lt_cv_sys_global_symbol_to_c_name_address \
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \
+lt_cv_nm_interface \
+nm_file_list_spec \
+lt_cv_truncate_bin \
+lt_prog_compiler_no_builtin_flag \
+lt_prog_compiler_pic \
+lt_prog_compiler_wl \
+lt_prog_compiler_static \
+lt_cv_prog_compiler_c_o \
+need_locks \
+MANIFEST_TOOL \
+DSYMUTIL \
+NMEDIT \
+LIPO \
+OTOOL \
+OTOOL64 \
+shrext_cmds \
+export_dynamic_flag_spec \
+whole_archive_flag_spec \
+compiler_needs_object \
+with_gnu_ld \
+allow_undefined_flag \
+no_undefined_flag \
+hardcode_libdir_flag_spec \
+hardcode_libdir_separator \
+exclude_expsyms \
+include_expsyms \
+file_list_spec \
+variables_saved_for_relink \
+libname_spec \
+library_names_spec \
+soname_spec \
+install_override_mode \
+finish_eval \
+old_striplib \
+striplib \
+compiler_lib_search_dirs \
+predep_objects \
+postdep_objects \
+predeps \
+postdeps \
+compiler_lib_search_path \
+LD_CXX \
+reload_flag_CXX \
+compiler_CXX \
+lt_prog_compiler_no_builtin_flag_CXX \
+lt_prog_compiler_pic_CXX \
+lt_prog_compiler_wl_CXX \
+lt_prog_compiler_static_CXX \
+lt_cv_prog_compiler_c_o_CXX \
+export_dynamic_flag_spec_CXX \
+whole_archive_flag_spec_CXX \
+compiler_needs_object_CXX \
+with_gnu_ld_CXX \
+allow_undefined_flag_CXX \
+no_undefined_flag_CXX \
+hardcode_libdir_flag_spec_CXX \
+hardcode_libdir_separator_CXX \
+exclude_expsyms_CXX \
+include_expsyms_CXX \
+file_list_spec_CXX \
+compiler_lib_search_dirs_CXX \
+predep_objects_CXX \
+postdep_objects_CXX \
+predeps_CXX \
+postdeps_CXX \
+compiler_lib_search_path_CXX; do
+    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+    *[\\\\\\\`\\"\\\$]*)
+      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
+      ;;
+    *)
+      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+      ;;
+    esac
+done
+
+# Double-quote double-evaled strings.
+for var in reload_cmds \
+old_postinstall_cmds \
+old_postuninstall_cmds \
+old_archive_cmds \
+extract_expsyms_cmds \
+old_archive_from_new_cmds \
+old_archive_from_expsyms_cmds \
+archive_cmds \
+archive_expsym_cmds \
+module_cmds \
+module_expsym_cmds \
+export_symbols_cmds \
+prelink_cmds \
+postlink_cmds \
+postinstall_cmds \
+postuninstall_cmds \
+finish_cmds \
+sys_lib_search_path_spec \
+configure_time_dlsearch_path \
+configure_time_lt_sys_library_path \
+reload_cmds_CXX \
+old_archive_cmds_CXX \
+old_archive_from_new_cmds_CXX \
+old_archive_from_expsyms_cmds_CXX \
+archive_cmds_CXX \
+archive_expsym_cmds_CXX \
+module_cmds_CXX \
+module_expsym_cmds_CXX \
+export_symbols_cmds_CXX \
+prelink_cmds_CXX \
+postlink_cmds_CXX; do
+    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+    *[\\\\\\\`\\"\\\$]*)
+      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
+      ;;
+    *)
+      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+      ;;
+    esac
+done
+
+ac_aux_dir='$ac_aux_dir'
+
+# See if we are running on zsh, and set the options that allow our
+# commands through without removal of \ escapes INIT.
+if test -n "\${ZSH_VERSION+set}"; then
+   setopt NO_GLOB_SUBST
+fi
+
+
+    PACKAGE='$PACKAGE'
+    VERSION='$VERSION'
+    RM='$RM'
+    ofile='$ofile'
+
+
+
+
+
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+  case $ac_config_target in
+    "amconfig.h") CONFIG_HEADERS="$CONFIG_HEADERS amconfig.h:amconfig.in" ;;
+    "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
+    "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;;
+    "bin/Makefile") CONFIG_FILES="$CONFIG_FILES bin/Makefile" ;;
+    "bin/applgrid-config") CONFIG_FILES="$CONFIG_FILES bin/applgrid-config" ;;
+    "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+    "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;;
+
+  *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+  esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used.  Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+  test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files
+  test ${CONFIG_HEADERS+y} || CONFIG_HEADERS=$config_headers
+  test ${CONFIG_COMMANDS+y} || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience.  Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+  tmp= ac_tmp=
+  trap 'exit_status=$?
+  : "${ac_tmp:=$tmp}"
+  { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
+' 0
+  trap 'as_fn_exit 1' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+  test -d "$tmp"
+}  ||
+{
+  tmp=./conf$$-$RANDOM
+  (umask 077 && mkdir "$tmp")
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+  eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+  ac_cs_awk_cr='\\r'
+else
+  ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+  echo "cat >conf$$subs.awk <<_ACEOF" &&
+  echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+  echo "_ACEOF"
+} >conf$$subs.sh ||
+  as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+  . ./conf$$subs.sh ||
+    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+
+  ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+  if test $ac_delim_n = $ac_delim_num; then
+    break
+  elif $ac_last_try; then
+    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+  else
+    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+  fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+  N
+  s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
+  for (key in S) S_is_set[key] = 1
+  FS = ""
+
+}
+{
+  line = $ 0
+  nfields = split(line, field, "@")
+  substed = 0
+  len = length(field[1])
+  for (i = 2; i < nfields; i++) {
+    key = field[i]
+    keylen = length(key)
+    if (S_is_set[key]) {
+      value = S[key]
+      line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+      len += length(value) + length(field[++i])
+      substed = 1
+    } else
+      len += 1 + keylen
+  }
+
+  print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+  sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+  cat
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+  || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+  ac_vpsub='/^[	 ]*VPATH[	 ]*=[	 ]*/{
+h
+s///
+s/^/:/
+s/[	 ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
+s/:*$//
+x
+s/\(=[	 ]*\).*/\1/
+G
+s/\n//
+s/^[^=]*=[	 ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+  ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
+  if test -z "$ac_tt"; then
+    break
+  elif $ac_last_try; then
+    as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
+  else
+    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+  fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any.  Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[	 ]*#[	 ]*define[	 ][	 ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+  for (key in D) D_is_set[key] = 1
+  FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+  line = \$ 0
+  split(line, arg, " ")
+  if (arg[1] == "#") {
+    defundef = arg[2]
+    mac1 = arg[3]
+  } else {
+    defundef = substr(arg[1], 2)
+    mac1 = arg[2]
+  }
+  split(mac1, mac2, "(") #)
+  macro = mac2[1]
+  prefix = substr(line, 1, index(line, defundef) - 1)
+  if (D_is_set[macro]) {
+    # Preserve the white space surrounding the "#".
+    print prefix "define", macro P[macro] D[macro]
+    next
+  } else {
+    # Replace #undef with comments.  This is necessary, for example,
+    # in the case of _POSIX_SOURCE, which is predefined and required
+    # on some systems where configure will not decide to define it.
+    if (defundef == "undef") {
+      print "/*", prefix defundef, macro, "*/"
+      next
+    }
+  }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+  as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X "  :F $CONFIG_FILES  :H $CONFIG_HEADERS    :C $CONFIG_COMMANDS"
+shift
+for ac_tag
+do
+  case $ac_tag in
+  :[FHLC]) ac_mode=$ac_tag; continue;;
+  esac
+  case $ac_mode$ac_tag in
+  :[FHL]*:*);;
+  :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
+  :[FH]-) ac_tag=-:-;;
+  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+  esac
+  ac_save_IFS=$IFS
+  IFS=:
+  set x $ac_tag
+  IFS=$ac_save_IFS
+  shift
+  ac_file=$1
+  shift
+
+  case $ac_mode in
+  :L) ac_source=$1;;
+  :[FH])
+    ac_file_inputs=
+    for ac_f
+    do
+      case $ac_f in
+      -) ac_f="$ac_tmp/stdin";;
+      *) # Look for the file first in the build tree, then in the source tree
+	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
+	 # because $ac_f cannot contain `:'.
+	 test -f "$ac_f" ||
+	   case $ac_f in
+	   [\\/$]*) false;;
+	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+	   esac ||
+	   as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+      esac
+      case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+      as_fn_append ac_file_inputs " '$ac_f'"
+    done
+
+    # Let's still pretend it is `configure' which instantiates (i.e., don't
+    # use $as_me), people would be surprised to read:
+    #    /* config.h.  Generated by config.status.  */
+    configure_input='Generated from '`
+	  printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+	`' by configure.'
+    if test x"$ac_file" != x-; then
+      configure_input="$ac_file.  $configure_input"
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+printf "%s\n" "$as_me: creating $ac_file" >&6;}
+    fi
+    # Neutralize special characters interpreted by sed in replacement strings.
+    case $configure_input in #(
+    *\&* | *\|* | *\\* )
+       ac_sed_conf_input=`printf "%s\n" "$configure_input" |
+       sed 's/[\\\\&|]/\\\\&/g'`;; #(
+    *) ac_sed_conf_input=$configure_input;;
+    esac
+
+    case $ac_tag in
+    *:-:* | *:-) cat >"$ac_tmp/stdin" \
+      || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+    esac
+    ;;
+  esac
+
+  ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$ac_file" : 'X\(//\)[^/]' \| \
+	 X"$ac_file" : 'X\(//\)$' \| \
+	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$ac_file" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+  as_dir="$ac_dir"; as_fn_mkdir_p
+  ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+  ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'`
+  # A ".." for each directory in $ac_dir_suffix.
+  ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+  case $ac_top_builddir_sub in
+  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+  esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+  .)  # We are building in place.
+    ac_srcdir=.
+    ac_top_srcdir=$ac_top_builddir_sub
+    ac_abs_top_srcdir=$ac_pwd ;;
+  [\\/]* | ?:[\\/]* )  # Absolute name.
+    ac_srcdir=$srcdir$ac_dir_suffix;
+    ac_top_srcdir=$srcdir
+    ac_abs_top_srcdir=$srcdir ;;
+  *) # Relative name.
+    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+    ac_top_srcdir=$ac_top_build_prefix$srcdir
+    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+  case $ac_mode in
+  :F)
+  #
+  # CONFIG_FILE
+  #
+
+  case $INSTALL in
+  [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+  *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+  esac
+  ac_MKDIR_P=$MKDIR_P
+  case $MKDIR_P in
+  [\\/$]* | ?:[\\/]* ) ;;
+  */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+  esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+  p
+  q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+  ac_datarootdir_hack='
+  s&@datadir@&$datadir&g
+  s&@docdir@&$docdir&g
+  s&@infodir@&$infodir&g
+  s&@localedir@&$localedir&g
+  s&@mandir@&$mandir&g
+  s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+  >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+  { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' \
+      "$ac_tmp/out"`; test -z "$ac_out"; } &&
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined.  Please make sure it is defined" >&5
+printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined.  Please make sure it is defined" >&2;}
+
+  rm -f "$ac_tmp/stdin"
+  case $ac_file in
+  -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+  *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
+  esac \
+  || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ ;;
+  :H)
+  #
+  # CONFIG_HEADER
+  #
+  if test x"$ac_file" != x-; then
+    {
+      printf "%s\n" "/* $configure_input  */" >&1 \
+      && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+    } >"$ac_tmp/config.h" \
+      || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+    if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
+      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+printf "%s\n" "$as_me: $ac_file is unchanged" >&6;}
+    else
+      rm -f "$ac_file"
+      mv "$ac_tmp/config.h" "$ac_file" \
+	|| as_fn_error $? "could not create $ac_file" "$LINENO" 5
+    fi
+  else
+    printf "%s\n" "/* $configure_input  */" >&1 \
+      && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
+      || as_fn_error $? "could not create -" "$LINENO" 5
+  fi
+# Compute "$ac_file"'s index in $config_headers.
+_am_arg="$ac_file"
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+  case $_am_header in
+    $_am_arg | $_am_arg:* )
+      break ;;
+    * )
+      _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+  esac
+done
+echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
+$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$_am_arg" : 'X\(//\)[^/]' \| \
+	 X"$_am_arg" : 'X\(//\)$' \| \
+	 X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$_am_arg" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`/stamp-h$_am_stamp_count
+ ;;
+
+  :C)  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
+printf "%s\n" "$as_me: executing $ac_file commands" >&6;}
+ ;;
+  esac
+
+
+  case $ac_file$ac_mode in
+    "depfiles":C) test x"$AMDEP_TRUE" != x"" || {
+  # Older Autoconf quotes --file arguments for eval, but not when files
+  # are listed without --file.  Let's play safe and only enable the eval
+  # if we detect the quoting.
+  # TODO: see whether this extra hack can be removed once we start
+  # requiring Autoconf 2.70 or later.
+  case $CONFIG_FILES in #(
+  *\'*) :
+    eval set x "$CONFIG_FILES" ;; #(
+  *) :
+    set x $CONFIG_FILES ;; #(
+  *) :
+     ;;
+esac
+  shift
+  # Used to flag and report bootstrapping failures.
+  am_rc=0
+  for am_mf
+  do
+    # Strip MF so we end up with the name of the file.
+    am_mf=`printf "%s\n" "$am_mf" | sed -e 's/:.*$//'`
+    # Check whether this is an Automake generated Makefile which includes
+    # dependency-tracking related rules and includes.
+    # Grep'ing the whole file directly is not great: AIX grep has a line
+    # limit of 2048, but all sed's we know have understand at least 4000.
+    sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \
+      || continue
+    am_dirpart=`$as_dirname -- "$am_mf" ||
+$as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$am_mf" : 'X\(//\)[^/]' \| \
+	 X"$am_mf" : 'X\(//\)$' \| \
+	 X"$am_mf" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X"$am_mf" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+    am_filepart=`$as_basename -- "$am_mf" ||
+$as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \
+	 X"$am_mf" : 'X\(//\)$' \| \
+	 X"$am_mf" : 'X\(/\)' \| . 2>/dev/null ||
+printf "%s\n" X/"$am_mf" |
+    sed '/^.*\/\([^/][^/]*\)\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+    { echo "$as_me:$LINENO: cd "$am_dirpart" \
+      && sed -e '/# am--include-marker/d' "$am_filepart" \
+        | $MAKE -f - am--depfiles" >&5
+   (cd "$am_dirpart" \
+      && sed -e '/# am--include-marker/d' "$am_filepart" \
+        | $MAKE -f - am--depfiles) >&5 2>&5
+   ac_status=$?
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   (exit $ac_status); } || am_rc=$?
+  done
+  if test $am_rc -ne 0; then
+    { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "Something went wrong bootstrapping makefile fragments
+    for automatic dependency tracking.  If GNU make was not used, consider
+    re-running the configure script with MAKE=\"gmake\" (or whatever is
+    necessary).  You can also try re-running configure with the
+    '--disable-dependency-tracking' option to at least be able to build
+    the package (albeit without support for automatic dependency tracking).
+See \`config.log' for more details" "$LINENO" 5; }
+  fi
+  { am_dirpart=; unset am_dirpart;}
+  { am_filepart=; unset am_filepart;}
+  { am_mf=; unset am_mf;}
+  { am_rc=; unset am_rc;}
+  rm -f conftest-deps.mk
+}
+ ;;
+    "libtool":C)
+
+    # See if we are running on zsh, and set the options that allow our
+    # commands through without removal of \ escapes.
+    if test -n "${ZSH_VERSION+set}"; then
+      setopt NO_GLOB_SUBST
+    fi
+
+    cfgfile=${ofile}T
+    trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+    $RM "$cfgfile"
+
+    cat <<_LT_EOF >> "$cfgfile"
+#! $SHELL
+# Generated automatically by $as_me ($PACKAGE) $VERSION
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+
+# Provide generalized library-building support services.
+# Written by Gordon Matzigkeit, 1996
+
+# Copyright (C) 2014 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# GNU Libtool is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of of the License, or
+# (at your option) any later version.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program or library that is built
+# using GNU Libtool, you may include this file under the  same
+# distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+
+# The names of the tagged configurations supported by this script.
+available_tags='CXX '
+
+# Configured defaults for sys_lib_dlsearch_path munging.
+: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"}
+
+# ### BEGIN LIBTOOL CONFIG
+
+# Which release of libtool.m4 was used?
+macro_version=$macro_version
+macro_revision=$macro_revision
+
+# Whether or not to build shared libraries.
+build_libtool_libs=$enable_shared
+
+# Whether or not to build static libraries.
+build_old_libs=$enable_static
+
+# What type of objects to build.
+pic_mode=$pic_mode
+
+# Whether or not to optimize for fast installation.
+fast_install=$enable_fast_install
+
+# Shared archive member basename,for filename based shared library versioning on AIX.
+shared_archive_member_spec=$shared_archive_member_spec
+
+# Shell to use when invoking shell scripts.
+SHELL=$lt_SHELL
+
+# An echo program that protects backslashes.
+ECHO=$lt_ECHO
+
+# The PATH separator for the build system.
+PATH_SEPARATOR=$lt_PATH_SEPARATOR
+
+# The host system.
+host_alias=$host_alias
+host=$host
+host_os=$host_os
+
+# The build system.
+build_alias=$build_alias
+build=$build
+build_os=$build_os
+
+# A sed program that does not truncate output.
+SED=$lt_SED
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="\$SED -e 1s/^X//"
+
+# A grep program that handles long lines.
+GREP=$lt_GREP
+
+# An ERE matcher.
+EGREP=$lt_EGREP
+
+# A literal string matcher.
+FGREP=$lt_FGREP
+
+# A BSD- or MS-compatible name lister.
+NM=$lt_NM
+
+# Whether we need soft or hard links.
+LN_S=$lt_LN_S
+
+# What is the maximum length of a command?
+max_cmd_len=$max_cmd_len
+
+# Object file suffix (normally "o").
+objext=$ac_objext
+
+# Executable file suffix (normally "").
+exeext=$exeext
+
+# whether the shell understands "unset".
+lt_unset=$lt_unset
+
+# turn spaces into newlines.
+SP2NL=$lt_lt_SP2NL
+
+# turn newlines into spaces.
+NL2SP=$lt_lt_NL2SP
+
+# convert \$build file names to \$host format.
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+
+# convert \$build files to toolchain format.
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+
+# An object symbol dumper.
+OBJDUMP=$lt_OBJDUMP
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method=$lt_deplibs_check_method
+
+# Command to use when deplibs_check_method = "file_magic".
+file_magic_cmd=$lt_file_magic_cmd
+
+# How to find potential files when deplibs_check_method = "file_magic".
+file_magic_glob=$lt_file_magic_glob
+
+# Find potential files using nocaseglob when deplibs_check_method = "file_magic".
+want_nocaseglob=$lt_want_nocaseglob
+
+# DLL creation program.
+DLLTOOL=$lt_DLLTOOL
+
+# Command to associate shared and link libraries.
+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd
+
+# The archiver.
+AR=$lt_AR
+
+# Flags to create an archive.
+AR_FLAGS=$lt_AR_FLAGS
+
+# How to feed a file listing to the archiver.
+archiver_list_spec=$lt_archiver_list_spec
+
+# A symbol stripping program.
+STRIP=$lt_STRIP
+
+# Commands used to install an old-style archive.
+RANLIB=$lt_RANLIB
+old_postinstall_cmds=$lt_old_postinstall_cmds
+old_postuninstall_cmds=$lt_old_postuninstall_cmds
+
+# Whether to use a lock for old archive extraction.
+lock_old_archive_extraction=$lock_old_archive_extraction
+
+# A C compiler.
+LTCC=$lt_CC
+
+# LTCC compiler flags.
+LTCFLAGS=$lt_CFLAGS
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe
+
+# Transform the output of nm in a proper C declaration.
+global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl
+
+# Transform the output of nm into a list of symbols to manually relocate.
+global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import
+
+# Transform the output of nm in a C name address pair.
+global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address
+
+# Transform the output of nm in a C name address pair when lib prefix is needed.
+global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix
+
+# The name lister interface.
+nm_interface=$lt_lt_cv_nm_interface
+
+# Specify filename containing input files for \$NM.
+nm_file_list_spec=$lt_nm_file_list_spec
+
+# The root where to search for dependent libraries,and where our libraries should be installed.
+lt_sysroot=$lt_sysroot
+
+# Command to truncate a binary pipe.
+lt_truncate_bin=$lt_lt_cv_truncate_bin
+
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
+
+# Used to examine libraries when file_magic_cmd begins with "file".
+MAGIC_CMD=$MAGIC_CMD
+
+# Must we lock files when doing compilation?
+need_locks=$lt_need_locks
+
+# Manifest tool.
+MANIFEST_TOOL=$lt_MANIFEST_TOOL
+
+# Tool to manipulate archived DWARF debug symbol files on Mac OS X.
+DSYMUTIL=$lt_DSYMUTIL
+
+# Tool to change global to local symbols on Mac OS X.
+NMEDIT=$lt_NMEDIT
+
+# Tool to manipulate fat objects and archives on Mac OS X.
+LIPO=$lt_LIPO
+
+# ldd/readelf like tool for Mach-O binaries on Mac OS X.
+OTOOL=$lt_OTOOL
+
+# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4.
+OTOOL64=$lt_OTOOL64
+
+# Old archive suffix (normally "a").
+libext=$libext
+
+# Shared library suffix (normally ".so").
+shrext_cmds=$lt_shrext_cmds
+
+# The commands to extract the exported symbol list from a shared archive.
+extract_expsyms_cmds=$lt_extract_expsyms_cmds
+
+# Variables whose values should be saved in libtool wrapper scripts and
+# restored at link time.
+variables_saved_for_relink=$lt_variables_saved_for_relink
+
+# Do we need the "lib" prefix for modules?
+need_lib_prefix=$need_lib_prefix
+
+# Do we need a version for libraries?
+need_version=$need_version
+
+# Library versioning type.
+version_type=$version_type
+
+# Shared library runtime path variable.
+runpath_var=$runpath_var
+
+# Shared library path variable.
+shlibpath_var=$shlibpath_var
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=$shlibpath_overrides_runpath
+
+# Format of library name prefix.
+libname_spec=$lt_libname_spec
+
+# List of archive names.  First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME
+library_names_spec=$lt_library_names_spec
+
+# The coded name of the library, if different from the real name.
+soname_spec=$lt_soname_spec
+
+# Permission mode override for installation of shared libraries.
+install_override_mode=$lt_install_override_mode
+
+# Command to use after installation of a shared archive.
+postinstall_cmds=$lt_postinstall_cmds
+
+# Command to use after uninstallation of a shared archive.
+postuninstall_cmds=$lt_postuninstall_cmds
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=$lt_finish_cmds
+
+# As "finish_cmds", except a single script fragment to be evaled but
+# not shown.
+finish_eval=$lt_finish_eval
+
+# Whether we should hardcode library paths into libraries.
+hardcode_into_libs=$hardcode_into_libs
+
+# Compile-time system search path for libraries.
+sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
+
+# Detected run-time system search path for libraries.
+sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path
+
+# Explicit LT_SYS_LIBRARY_PATH set during ./configure time.
+configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path
+
+# Whether dlopen is supported.
+dlopen_support=$enable_dlopen
+
+# Whether dlopen of programs is supported.
+dlopen_self=$enable_dlopen_self
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=$enable_dlopen_self_static
+
+# Commands to strip libraries.
+old_striplib=$lt_old_striplib
+striplib=$lt_striplib
+
+
+# The linker used to build libraries.
+LD=$lt_LD
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag
+reload_cmds=$lt_reload_cmds
+
+# Commands used to build an old-style archive.
+old_archive_cmds=$lt_old_archive_cmds
+
+# A language specific compiler.
+CC=$lt_compiler
+
+# Is the compiler the GNU compiler?
+with_gcc=$GCC
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_lt_prog_compiler_pic
+
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_lt_prog_compiler_static
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_lt_cv_prog_compiler_c_o
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$archive_cmds_need_lc
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object=$lt_compiler_needs_object
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds
+
+# Commands used to build a shared archive.
+archive_cmds=$lt_archive_cmds
+archive_expsym_cmds=$lt_archive_expsym_cmds
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds=$lt_module_cmds
+module_expsym_cmds=$lt_module_expsym_cmds
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld=$lt_with_gnu_ld
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator
+
+# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=$hardcode_direct
+
+# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting \$shlibpath_var if the
+# library is relocated.
+hardcode_direct_absolute=$hardcode_direct_absolute
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=$hardcode_minus_L
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=$hardcode_automatic
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=$inherit_rpath
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=$always_export_symbols
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=$lt_prelink_cmds
+
+# Commands necessary for finishing linking programs.
+postlink_cmds=$lt_postlink_cmds
+
+# Specify filename containing input files.
+file_list_spec=$lt_file_list_spec
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action
+
+# The directories searched by this compiler when creating a shared library.
+compiler_lib_search_dirs=$lt_compiler_lib_search_dirs
+
+# Dependencies to place before and after the objects being linked to
+# create a shared library.
+predep_objects=$lt_predep_objects
+postdep_objects=$lt_postdep_objects
+predeps=$lt_predeps
+postdeps=$lt_postdeps
+
+# The library search path used internally by the compiler when linking
+# a shared library.
+compiler_lib_search_path=$lt_compiler_lib_search_path
+
+# ### END LIBTOOL CONFIG
+
+_LT_EOF
+
+    cat <<'_LT_EOF' >> "$cfgfile"
+
+# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE
+
+# func_munge_path_list VARIABLE PATH
+# -----------------------------------
+# VARIABLE is name of variable containing _space_ separated list of
+# directories to be munged by the contents of PATH, which is string
+# having a format:
+# "DIR[:DIR]:"
+#       string "DIR[ DIR]" will be prepended to VARIABLE
+# ":DIR[:DIR]"
+#       string "DIR[ DIR]" will be appended to VARIABLE
+# "DIRP[:DIRP]::[DIRA:]DIRA"
+#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
+#       "DIRA[ DIRA]" will be appended to VARIABLE
+# "DIR[:DIR]"
+#       VARIABLE will be replaced by "DIR[ DIR]"
+func_munge_path_list ()
+{
+    case x$2 in
+    x)
+        ;;
+    *:)
+        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\"
+        ;;
+    x:*)
+        eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\"
+        ;;
+    *::*)
+        eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
+        eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\"
+        ;;
+    *)
+        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\"
+        ;;
+    esac
+}
+
+
+# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
+func_cc_basename ()
+{
+    for cc_temp in $*""; do
+      case $cc_temp in
+        compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+        distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+        \-*) ;;
+        *) break;;
+      esac
+    done
+    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+}
+
+
+# ### END FUNCTIONS SHARED WITH CONFIGURE
+
+_LT_EOF
+
+  case $host_os in
+  aix3*)
+    cat <<\_LT_EOF >> "$cfgfile"
+# AIX sometimes has problems with the GCC collect2 program.  For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test set != "${COLLECT_NAMES+set}"; then
+  COLLECT_NAMES=
+  export COLLECT_NAMES
+fi
+_LT_EOF
+    ;;
+  esac
+
+
+
+ltmain=$ac_aux_dir/ltmain.sh
+
+
+  # We use sed instead of cat because bash on DJGPP gets confused if
+  # if finds mixed CR/LF and LF-only lines.  Since sed operates in
+  # text mode, it properly converts lines to CR/LF.  This bash problem
+  # is reportedly fixed, but why not run on old versions too?
+  sed '$q' "$ltmain" >> "$cfgfile" \
+     || (rm -f "$cfgfile"; exit 1)
+
+   mv -f "$cfgfile" "$ofile" ||
+    (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+  chmod +x "$ofile"
+
+
+    cat <<_LT_EOF >> "$ofile"
+
+# ### BEGIN LIBTOOL TAG CONFIG: CXX
+
+# The linker used to build libraries.
+LD=$lt_LD_CXX
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag_CXX
+reload_cmds=$lt_reload_cmds_CXX
+
+# Commands used to build an old-style archive.
+old_archive_cmds=$lt_old_archive_cmds_CXX
+
+# A language specific compiler.
+CC=$lt_compiler_CXX
+
+# Is the compiler the GNU compiler?
+with_gcc=$GCC_CXX
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_lt_prog_compiler_pic_CXX
+
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl_CXX
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_lt_prog_compiler_static_CXX
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$archive_cmds_need_lc_CXX
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object=$lt_compiler_needs_object_CXX
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX
+
+# Commands used to build a shared archive.
+archive_cmds=$lt_archive_cmds_CXX
+archive_expsym_cmds=$lt_archive_expsym_cmds_CXX
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds=$lt_module_cmds_CXX
+module_expsym_cmds=$lt_module_expsym_cmds_CXX
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld=$lt_with_gnu_ld_CXX
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag_CXX
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag_CXX
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX
+
+# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=$hardcode_direct_CXX
+
+# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting \$shlibpath_var if the
+# library is relocated.
+hardcode_direct_absolute=$hardcode_direct_absolute_CXX
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=$hardcode_minus_L_CXX
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=$hardcode_automatic_CXX
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=$inherit_rpath_CXX
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs_CXX
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=$always_export_symbols_CXX
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds_CXX
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms_CXX
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms_CXX
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=$lt_prelink_cmds_CXX
+
+# Commands necessary for finishing linking programs.
+postlink_cmds=$lt_postlink_cmds_CXX
+
+# Specify filename containing input files.
+file_list_spec=$lt_file_list_spec_CXX
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action_CXX
+
+# The directories searched by this compiler when creating a shared library.
+compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX
+
+# Dependencies to place before and after the objects being linked to
+# create a shared library.
+predep_objects=$lt_predep_objects_CXX
+postdep_objects=$lt_postdep_objects_CXX
+predeps=$lt_predeps_CXX
+postdeps=$lt_postdeps_CXX
+
+# The library search path used internally by the compiler when linking
+# a shared library.
+compiler_lib_search_path=$lt_compiler_lib_search_path_CXX
+
+# ### END LIBTOOL TAG CONFIG: CXX
+_LT_EOF
+
+ ;;
+
+  esac
+done # for ac_tag
+
+
+as_fn_exit 0
+_ACEOF
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+  as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded.  So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status.  When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+  ac_cs_success=:
+  ac_config_status_args=
+  test "$silent" = yes &&
+    ac_config_status_args="$ac_config_status_args --quiet"
+  exec 5>/dev/null
+  $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+  exec 5>>config.log
+  # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+  # would make configure fail if this is the last instruction.
+  $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
+

Property changes on: applgrid/tags/applgrid-01-06-36/configure
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: applgrid/tags/applgrid-01-06-36/Makefile.in
===================================================================
--- applgrid/tags/applgrid-01-06-36/Makefile.in	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/Makefile.in	(revision 2010)
@@ -0,0 +1,925 @@
+# Makefile.in generated by automake 1.16.5 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2021 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+  if test -z '$(MAKELEVEL)'; then \
+    false; \
+  elif test -n '$(MAKE_HOST)'; then \
+    true; \
+  elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+    true; \
+  else \
+    false; \
+  fi; \
+}
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = .
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \
+	$(am__configure_deps) $(dist_doc_DATA) $(dist_share_DATA) \
+	$(am__DIST_COMMON)
+am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
+ configure.lineno config.status.lineno
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = amconfig.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+	ctags-recursive dvi-recursive html-recursive info-recursive \
+	install-data-recursive install-dvi-recursive \
+	install-exec-recursive install-html-recursive \
+	install-info-recursive install-pdf-recursive \
+	install-ps-recursive install-recursive installcheck-recursive \
+	installdirs-recursive pdf-recursive ps-recursive \
+	tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(docdir)" "$(DESTDIR)$(sharedir)"
+DATA = $(dist_doc_DATA) $(dist_share_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+  $(RECURSIVE_TARGETS) \
+  $(RECURSIVE_CLEAN_TARGETS) \
+  $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+	cscope distdir distdir-am dist dist-all distcheck
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) \
+	amconfig.in
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/amconfig.in COPYING \
+	INSTALL README compile config.guess config.sub depcomp \
+	install-sh ltmain.sh missing
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+am__remove_distdir = \
+  if test -d "$(distdir)"; then \
+    find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
+      && rm -rf "$(distdir)" \
+      || { sleep 5 && rm -rf "$(distdir)"; }; \
+  else :; fi
+am__post_remove_distdir = $(am__remove_distdir)
+am__relativize = \
+  dir0=`pwd`; \
+  sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+  sed_rest='s,^[^/]*/*,,'; \
+  sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+  sed_butlast='s,/*[^/]*$$,,'; \
+  while test -n "$$dir1"; do \
+    first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+    if test "$$first" != "."; then \
+      if test "$$first" = ".."; then \
+        dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+        dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+      else \
+        first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+        if test "$$first2" = "$$first"; then \
+          dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+        else \
+          dir2="../$$dir2"; \
+        fi; \
+        dir0="$$dir0"/"$$first"; \
+      fi; \
+    fi; \
+    dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+  done; \
+  reldir="$$dir2"
+DIST_ARCHIVES = $(distdir).tar.gz
+GZIP_ENV = --best
+DIST_TARGETS = dist-gzip
+# Exists only to be overridden by the user if desired.
+AM_DISTCHECK_DVI_TARGET = dvi
+distuninstallcheck_listfiles = find . -type f -print
+am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \
+  | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$'
+distcleancheck_listfiles = find . -type f -print
+ACLOCAL = aclocal
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = automake
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPPFLAGS = @CPPFLAGS@
+CSCOPE = @CSCOPE@
+CTAGS = @CTAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ETAGS = @ETAGS@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FRTLLIB = @FRTLLIB@
+GREP = @GREP@
+HOPPETPATH = @HOPPETPATH@
+HOPPET_CXXFLAGS = @HOPPET_CXXFLAGS@
+HOPPET_LDFLAGS = @HOPPET_LDFLAGS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LHAPDFPATH = @LHAPDFPATH@
+LHAPDF_CXXFLAGS = @LHAPDF_CXXFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+ROOTPATH = @ROOTPATH@
+ROOT_CXXFLAGS = @ROOT_CXXFLAGS@
+ROOT_LDFLAGS = @ROOT_LDFLAGS@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+USEROOT = @USEROOT@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = ${datadir}/doc/${PACKAGE}
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign 
+dist_doc_DATA = README INSTALL COPYING
+sharedir = ${datadir}/${PACKAGE}
+dist_share_DATA = share/*.config 
+SUBDIRS = src bin
+all: amconfig.h
+	$(MAKE) $(AM_MAKEFLAGS) all-recursive
+
+.SUFFIXES:
+am--refresh: Makefile
+	@:
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \
+	      $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    echo ' $(SHELL) ./config.status'; \
+	    $(SHELL) ./config.status;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	$(SHELL) ./config.status --recheck
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	$(am__cd) $(srcdir) && $(AUTOCONF)
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	$(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
+$(am__aclocal_m4_deps):
+
+amconfig.h: stamp-h1
+	@test -f $@ || rm -f stamp-h1
+	@test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1
+
+stamp-h1: $(srcdir)/amconfig.in $(top_builddir)/config.status
+	@rm -f stamp-h1
+	cd $(top_builddir) && $(SHELL) ./config.status amconfig.h
+$(srcdir)/amconfig.in:  $(am__configure_deps) 
+	($(am__cd) $(top_srcdir) && $(AUTOHEADER))
+	rm -f stamp-h1
+	touch $@
+
+distclean-hdr:
+	-rm -f amconfig.h stamp-h1
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+distclean-libtool:
+	-rm -f libtool config.lt
+install-dist_docDATA: $(dist_doc_DATA)
+	@$(NORMAL_INSTALL)
+	@list='$(dist_doc_DATA)'; test -n "$(docdir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(docdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(docdir)" || exit 1; \
+	fi; \
+	for p in $$list; do \
+	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+	  echo "$$d$$p"; \
+	done | $(am__base_list) | \
+	while read files; do \
+	  echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(docdir)'"; \
+	  $(INSTALL_DATA) $$files "$(DESTDIR)$(docdir)" || exit $$?; \
+	done
+
+uninstall-dist_docDATA:
+	@$(NORMAL_UNINSTALL)
+	@list='$(dist_doc_DATA)'; test -n "$(docdir)" || list=; \
+	files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+	dir='$(DESTDIR)$(docdir)'; $(am__uninstall_files_from_dir)
+install-dist_shareDATA: $(dist_share_DATA)
+	@$(NORMAL_INSTALL)
+	@list='$(dist_share_DATA)'; test -n "$(sharedir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(sharedir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(sharedir)" || exit 1; \
+	fi; \
+	for p in $$list; do \
+	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+	  echo "$$d$$p"; \
+	done | $(am__base_list) | \
+	while read files; do \
+	  echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(sharedir)'"; \
+	  $(INSTALL_DATA) $$files "$(DESTDIR)$(sharedir)" || exit $$?; \
+	done
+
+uninstall-dist_shareDATA:
+	@$(NORMAL_UNINSTALL)
+	@list='$(dist_share_DATA)'; test -n "$(sharedir)" || list=; \
+	files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+	dir='$(DESTDIR)$(sharedir)'; $(am__uninstall_files_from_dir)
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+#     (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+	@fail=; \
+	if $(am__make_keepgoing); then \
+	  failcom='fail=yes'; \
+	else \
+	  failcom='exit 1'; \
+	fi; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscope: cscope.files
+	test ! -s cscope.files \
+	  || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS)
+clean-cscope:
+	-rm -f cscope.files
+cscope.files: clean-cscope cscopelist
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+	-rm -f cscope.out cscope.in.out cscope.po.out cscope.files
+distdir: $(BUILT_SOURCES)
+	$(MAKE) $(AM_MAKEFLAGS) distdir-am
+
+distdir-am: $(DISTFILES)
+	$(am__remove_distdir)
+	test -d "$(distdir)" || mkdir "$(distdir)"
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+	@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    $(am__make_dryrun) \
+	      || test -d "$(distdir)/$$subdir" \
+	      || $(MKDIR_P) "$(distdir)/$$subdir" \
+	      || exit 1; \
+	    dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+	    $(am__relativize); \
+	    new_distdir=$$reldir; \
+	    dir1=$$subdir; dir2="$(top_distdir)"; \
+	    $(am__relativize); \
+	    new_top_distdir=$$reldir; \
+	    echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+	    echo "     am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+	    ($(am__cd) $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$new_top_distdir" \
+	        distdir="$$new_distdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+		am__skip_mode_fix=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
+	$(MAKE) $(AM_MAKEFLAGS) \
+	  top_distdir="$(top_distdir)" distdir="$(distdir)" \
+	  dist-hook
+	-test -n "$(am__skip_mode_fix)" \
+	|| find "$(distdir)" -type d ! -perm -755 \
+		-exec chmod u+rwx,go+rx {} \; -o \
+	  ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
+	  ! -type d ! -perm -400 -exec chmod a+r {} \; -o \
+	  ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
+	|| chmod -R a+r "$(distdir)"
+dist-gzip: distdir
+	tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz
+	$(am__post_remove_distdir)
+
+dist-bzip2: distdir
+	tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
+	$(am__post_remove_distdir)
+
+dist-lzip: distdir
+	tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz
+	$(am__post_remove_distdir)
+
+dist-xz: distdir
+	tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz
+	$(am__post_remove_distdir)
+
+dist-zstd: distdir
+	tardir=$(distdir) && $(am__tar) | zstd -c $${ZSTD_CLEVEL-$${ZSTD_OPT--19}} >$(distdir).tar.zst
+	$(am__post_remove_distdir)
+
+dist-tarZ: distdir
+	@echo WARNING: "Support for distribution archives compressed with" \
+		       "legacy program 'compress' is deprecated." >&2
+	@echo WARNING: "It will be removed altogether in Automake 2.0" >&2
+	tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
+	$(am__post_remove_distdir)
+
+dist-shar: distdir
+	@echo WARNING: "Support for shar distribution archives is" \
+	               "deprecated." >&2
+	@echo WARNING: "It will be removed altogether in Automake 2.0" >&2
+	shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz
+	$(am__post_remove_distdir)
+
+dist-zip: distdir
+	-rm -f $(distdir).zip
+	zip -rq $(distdir).zip $(distdir)
+	$(am__post_remove_distdir)
+
+dist dist-all:
+	$(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:'
+	$(am__post_remove_distdir)
+
+# This target untars the dist file and tries a VPATH configuration.  Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+	case '$(DIST_ARCHIVES)' in \
+	*.tar.gz*) \
+	  eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\
+	*.tar.bz2*) \
+	  bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
+	*.tar.lz*) \
+	  lzip -dc $(distdir).tar.lz | $(am__untar) ;;\
+	*.tar.xz*) \
+	  xz -dc $(distdir).tar.xz | $(am__untar) ;;\
+	*.tar.Z*) \
+	  uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
+	*.shar.gz*) \
+	  eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\
+	*.zip*) \
+	  unzip $(distdir).zip ;;\
+	*.tar.zst*) \
+	  zstd -dc $(distdir).tar.zst | $(am__untar) ;;\
+	esac
+	chmod -R a-w $(distdir)
+	chmod u+w $(distdir)
+	mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst
+	chmod a-w $(distdir)
+	test -d $(distdir)/_build || exit 0; \
+	dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
+	  && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
+	  && am__cwd=`pwd` \
+	  && $(am__cd) $(distdir)/_build/sub \
+	  && ../../configure \
+	    $(AM_DISTCHECK_CONFIGURE_FLAGS) \
+	    $(DISTCHECK_CONFIGURE_FLAGS) \
+	    --srcdir=../.. --prefix="$$dc_install_base" \
+	  && $(MAKE) $(AM_MAKEFLAGS) \
+	  && $(MAKE) $(AM_MAKEFLAGS) $(AM_DISTCHECK_DVI_TARGET) \
+	  && $(MAKE) $(AM_MAKEFLAGS) check \
+	  && $(MAKE) $(AM_MAKEFLAGS) install \
+	  && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+	  && $(MAKE) $(AM_MAKEFLAGS) uninstall \
+	  && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
+	        distuninstallcheck \
+	  && chmod -R a-w "$$dc_install_base" \
+	  && ({ \
+	       (cd ../.. && umask 077 && mkdir "$$dc_destdir") \
+	       && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
+	       && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
+	       && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
+	            distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
+	      } || { rm -rf "$$dc_destdir"; exit 1; }) \
+	  && rm -rf "$$dc_destdir" \
+	  && $(MAKE) $(AM_MAKEFLAGS) dist \
+	  && rm -rf $(DIST_ARCHIVES) \
+	  && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
+	  && cd "$$am__cwd" \
+	  || exit 1
+	$(am__post_remove_distdir)
+	@(echo "$(distdir) archives ready for distribution: "; \
+	  list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
+	  sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
+distuninstallcheck:
+	@test -n '$(distuninstallcheck_dir)' || { \
+	  echo 'ERROR: trying to run $@ with an empty' \
+	       '$$(distuninstallcheck_dir)' >&2; \
+	  exit 1; \
+	}; \
+	$(am__cd) '$(distuninstallcheck_dir)' || { \
+	  echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \
+	  exit 1; \
+	}; \
+	test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \
+	   || { echo "ERROR: files left after uninstall:" ; \
+	        if test -n "$(DESTDIR)"; then \
+	          echo "  (check DESTDIR support)"; \
+	        fi ; \
+	        $(distuninstallcheck_listfiles) ; \
+	        exit 1; } >&2
+distcleancheck: distclean
+	@if test '$(srcdir)' = . ; then \
+	  echo "ERROR: distcleancheck can only run from a VPATH build" ; \
+	  exit 1 ; \
+	fi
+	@test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
+	  || { echo "ERROR: files left in build directory after distclean:" ; \
+	       $(distcleancheck_listfiles) ; \
+	       exit 1; } >&2
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA) amconfig.h
+installdirs: installdirs-recursive
+installdirs-am:
+	for dir in "$(DESTDIR)$(docdir)" "$(DESTDIR)$(sharedir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool clean-local mostlyclean-am
+
+distclean: distclean-recursive
+	-rm -f $(am__CONFIG_DISTCLEAN_FILES)
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-hdr \
+	distclean-libtool distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+@USE_ROOT_FALSE@install-data-am: install-dist_docDATA \
+@USE_ROOT_FALSE@	install-dist_shareDATA
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+	-rm -f $(am__CONFIG_DISTCLEAN_FILES)
+	-rm -rf $(top_srcdir)/autom4te.cache
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am: uninstall-dist_docDATA uninstall-dist_shareDATA
+
+.MAKE: $(am__recursive_targets) all install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \
+	am--refresh check check-am clean clean-cscope clean-generic \
+	clean-libtool clean-local cscope cscopelist-am ctags ctags-am \
+	dist dist-all dist-bzip2 dist-gzip dist-hook dist-lzip \
+	dist-shar dist-tarZ dist-xz dist-zip dist-zstd distcheck \
+	distclean distclean-generic distclean-hdr distclean-libtool \
+	distclean-tags distcleancheck distdir distuninstallcheck dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-data install-data-am install-dist_docDATA \
+	install-dist_shareDATA install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs installdirs-am maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \
+	uninstall-am uninstall-dist_docDATA uninstall-dist_shareDATA
+
+.PRECIOUS: Makefile
+
+
+@USE_ROOT_TRUE@install-data-am:
+@USE_ROOT_TRUE@	cp src/*.pcm ${libdir}
+
+clean-local:
+	rm -rf autom4te.cache
+
+dist-hook:
+	rm -rf `find $(distdir) -name ".svn"` `find $(distdir) -name \*Dict\*`
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
Index: applgrid/tags/applgrid-01-06-36/applgrid.doxy
===================================================================
--- applgrid/tags/applgrid-01-06-36/applgrid.doxy	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/applgrid.doxy	(revision 2010)
@@ -0,0 +1,2410 @@
+# Doxyfile 1.8.11
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+# PROJECT_NAME           = "applgrid"
+PROJECT_NAME           = "  applgrid"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         = 1.5.20
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = "applgrid fast interpolation grid generation"
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+# PROJECT_LOGO           = APPLgrid-white-plain.jpg
+# PROJECT_LOGO           = applicon.jpg
+
+PROJECT_LOGO           = 
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       =
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES    = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = YES
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES                =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO, these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES, upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = NO
+
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered.
+# The default value is: NO.
+
+WARN_AS_ERROR          = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  =
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f, *.for, *.tcl,
+# *.vhd, *.vhdl, *.ucf, *.qsf, *.as and *.js.
+
+FILE_PATTERNS          =
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = NO
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP        = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE        = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR             =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sf.net) file that captures the
+# structure of the code including all documentation. Note that this feature is
+# still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED             =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
+# The default value is: NO.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS        = YES
+
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH               =
+
+# If set to YES the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT               = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK               = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH          = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS           =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH      =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH  =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP            = YES
Index: applgrid/tags/applgrid-01-06-36/src/nlojetpp_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/nlojetpp_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/nlojetpp_pdf.cxx	(revision 2010)
@@ -0,0 +1,29 @@
+/**
+ **     @file    nlojetpp_pdf.cxx
+ **
+ **     @brief   pdf transform function for nlojet                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: nlojetpp_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "nlojetpp_pdf.h"
+
+
+extern "C" void fnlojetpp_pdf__(const double* fA, const double* fB, double* H) { 
+  static nlojetpp_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/jetrad_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/jetrad_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/jetrad_pdf.h	(revision 2010)
@@ -0,0 +1,85 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    jetrad_pdf.h
+ **
+ **     @brief   pdf transform function for jetrad                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: jetrad_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#ifndef __JETRAD_PDF_H
+#define __JETRAD_PDF_H
+
+
+#include "appl_grid/appl_pdf.h" 
+
+
+
+// jetrad pdf combination class
+class jetrad_pdf : public appl::appl_pdf {
+
+public:
+
+  jetrad_pdf() : appl_pdf("jetrad") { m_Nproc=7; } 
+
+  void evaluate(const double* _fA, const double* _fB, double* H) const;
+
+};  
+  
+
+
+inline void jetrad_pdf::evaluate(const double* _fA, const double* _fB, double* H) const {  
+
+  // remapping from pdg -6..6 convention to u..t ubar..tbar g internal
+  // jetrad convention
+  static int index_map[13] = { 2, 1, 3, 4, 5, 6, -2, -1, -3, -4, -5, -6, 0 }; 
+
+  double f1[13];
+  double f2[13];
+
+  const double* _f1tmp = _fA+6;
+  const double* _f2tmp = _fB+6;
+
+  for ( int i=0 ; i<13 ; i++ )  f1[i] = _f1tmp[index_map[i]];
+  for ( int i=0 ; i<13 ; i++ )  f2[i] = _f2tmp[index_map[i]];
+
+
+  // now calculate the combinations
+
+  double Q1=0, Q1bar=0, Q2=0, Q2bar=0, D=0, Dbar=0; 
+
+  for ( int i=0 ; i<6 ; i++ )  {  Q1    += f1[i];   Q2    += f2[i];  }
+  for ( int i=6 ; i<12 ; i++ ) {  Q1bar += f1[i];   Q2bar += f2[i];  }
+
+  for ( int i=0 ; i<12 ; i++ ) D += f1[i]*f2[i];
+
+  for ( int i=0 ; i<6 ; i++ ) { 
+    Dbar += f1[i]*f2[i+6];  
+    Dbar += f1[i+6]*f2[i]; 
+  }
+  
+  H[0] =  D;
+  H[1] =  Q1*Q2+Q1bar*Q2bar-D ;
+  H[2] =  Dbar ;
+  H[3] =  Q1*Q2bar+Q1bar*Q2-Dbar ;
+  H[4] =  (Q1+Q1bar)*f2[12] + f1[12]*(Q2+Q2bar) ;
+  H[5] =  f1[12]*f2[12];
+
+}
+  
+
+
+// fortran callable wrapper
+extern "C" void fjetrad_pdf__(const double* fA, const double* fB, double* H);
+
+
+#endif // __JETRAD_PDF_H
+
Index: applgrid/tags/applgrid-01-06-36/src/mcfmw_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmw_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmw_pdf.h	(revision 2010)
@@ -0,0 +1,169 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    mcfmw_pdf.h
+ **
+ **     @brief   pdf transform functions for W production                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: mcfmw_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#ifndef __MCFMW_PDF_H
+#define __MCFMW_PDF_H
+
+
+#include "appl_grid/appl_pdf.h" 
+
+//
+// MCFM W production
+//
+
+
+//
+//  W+
+//
+class mcfmwp_pdf : public appl::appl_pdf { 
+
+public: 
+
+  mcfmwp_pdf(const std::string& s="mcfm-wp") : appl_pdf(s) { 
+    m_Nproc = 6; 
+    make_ckm( true ); 
+  } 
+
+  ~mcfmwp_pdf() { }
+
+  virtual void evaluate(const double* fA, const double* fB, double* H) const;
+
+};
+
+
+
+//
+//  W-
+//
+class mcfmwm_pdf : public appl::appl_pdf { 
+
+public: 
+
+  mcfmwm_pdf(const std::string& s="mcfm-wm") : appl_pdf(s) { 
+    m_Nproc = 6; 
+    make_ckm( false ); 
+  } 
+
+  ~mcfmwm_pdf() { } 
+
+  virtual void evaluate(const double* fA, const double* fB, double* H) const;
+
+};
+
+
+// actual funtion to evaluate the pdf combinations 
+// for the W+
+
+inline  void mcfmwp_pdf::evaluate(const double* fA, const double* fB, double* H) const { 
+  
+  const int nQuark = 6;
+  const int iQuark = 5; 
+  
+  // offset so we can use fA[-6]
+  // fA += 6;
+  // fB += 6;
+  
+  double GA=fA[6];
+  double GB=fB[6];
+  double QA=0; double QB=0; double QbA=0; double QbB=0;
+  
+  for(int i = 1; i <= iQuark; i++) 
+    {
+      QA += fA[nQuark + i]*m_ckmsum[nQuark + i];
+      QB += fB[nQuark + i]*m_ckmsum[nQuark + i];
+    }
+  for(int i = -iQuark; i < 0; i++) 
+    {
+      QbA += fA[nQuark + i]*m_ckmsum[nQuark + i];
+      QbB += fB[nQuark + i]*m_ckmsum[nQuark + i];
+    }
+  
+  H[2]=QbA * GB;
+  H[3]= QA * GB;
+  H[4]= GA * QbB;
+  H[5]= GA * QB;
+
+  // have to zero H[0..1] first
+  H[0]=H[1]=0; 
+  
+  for (int i1 = 3; i1 <= 5; i1 += 2)
+    {
+      for(int i2 = 8; i2 <= 10; i2 += 2)
+	{
+	  H[0] += fA[i1]*fB[i2]*m_ckm2[i1][i2];
+	  H[1] += fA[i2]*fB[i1]*m_ckm2[i2][i1];
+	}
+    }  
+}
+
+
+
+
+//
+// actual funtion to evaluate the pdf combinations 
+//   for the W- 
+//
+inline  void mcfmwm_pdf::evaluate(const double* fA, const double* fB, double* H) const { 
+  
+  const int nQuark = 6;
+  const int iQuark = 5; 
+  
+  // offset so we can use fA[-6]
+  // fA += 6;
+  // fB += 6;
+  
+  double GA=fA[6];
+  double GB=fB[6];
+  double QA=0; double QB=0; double QbA=0; double QbB=0;
+  
+  for(int i = 1; i <= iQuark; i++) 
+    {
+      QA += fA[nQuark + i]*m_ckmsum[nQuark + i];
+      QB += fB[nQuark + i]*m_ckmsum[nQuark + i];
+    }
+  for(int i = -iQuark; i < 0; i++) 
+    {
+      QbA += fA[nQuark + i]*m_ckmsum[nQuark + i];
+      QbB += fB[nQuark + i]*m_ckmsum[nQuark + i];
+    }
+  
+  H[2]=  QA * GB;
+  H[3]= QbA * GB;
+  H[4]= GA  * QB;
+  H[5]= GA  * QbB;
+
+  // have to zero H[0..1] first
+  H[0]=H[1]=0; 
+  
+  for (int i1 = 7; i1 <= 9; i1 += 2)
+    {
+      for(int i2 = 2; i2 <= 4; i2 += 2)
+	{
+	  H[0] += fA[i1]*fB[i2]*m_ckm2[i1][i2];
+	  H[1] += fA[i2]*fB[i1]*m_ckm2[i2][i1];
+	}
+    }  
+}
+  
+
+// fortran callable wrappers
+extern "C" void fmcfmwp_pdf__(const double* fA, const double* fB, double* H);
+extern "C" void fmcfmwm_pdf__(const double* fA, const double* fB, double* H);
+
+
+
+#endif //  __MCFMW_PDF_H
Index: applgrid/tags/applgrid-01-06-36/src/TFileString.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/TFileString.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/TFileString.cxx	(revision 2010)
@@ -0,0 +1,33 @@
+/**
+ **     @file    TFileString.cxx
+ **
+ **     @brief   root TObject std::string std::vector class for writing std::string std::vectors
+ **              to root files               
+ **
+ **     @author  mark sutton
+ **     @date    Sat Mar 15 19:50:15 GMT 2008 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: TFileString.cxx, v0.0   Sat Mar 15 19:50:15 GMT 2008 sutt $
+ **
+ **/
+
+
+#include "appl_grid/appl_root.h"
+
+#ifdef USEROOT
+
+#include "TFileString.h"
+
+ClassImp(TFileString)
+
+
+std::ostream& operator<<(std::ostream& s, const TFileString& fs) { 
+  s << fs.name() << ":";
+  for ( unsigned i=0 ; i<fs.size() ; i++ ) s << "\t" << fs[i];
+  return s;
+}
+
+
+#endif
Index: applgrid/tags/applgrid-01-06-36/src/mcfmz_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmz_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmz_pdf.h	(revision 2010)
@@ -0,0 +1,131 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    mcfmz_pdf.h
+ **
+ **     @brief   pdf transform functions                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: mcfmz_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+#ifndef __MCFMZ_PDF_H
+#define __MCFMZ_PDF_H
+
+#include <cmath>
+
+#include "appl_grid/appl_pdf.h" 
+
+
+//
+// MCFM Z production
+//
+class mcfmz_pdf : public appl::appl_pdf { 
+
+public: 
+
+  mcfmz_pdf() : appl_pdf("mcfm-z") { m_Nproc = 12; } 
+
+  ~mcfmz_pdf() { } 
+
+  virtual void evaluate(const double* fA, const double* fB, double* H) const;
+
+};
+
+
+// actual funtion to evaluate the pdf combinations 
+
+inline  void mcfmz_pdf::evaluate(const double* fA, const double* fB, double* H) const 
+{
+
+  //  const int nQuark = 6;
+  const int iQuark = 5; 
+  
+  // offset psd ptrs so can use [-6..6] indexing rather than [nQuark+..] indexing
+
+  fA += 6;
+  fB += 6;
+
+  //  double GA=fA[6];
+  //  double GB=fB[6];
+ double GA=fA[0];
+ double GB=fB[0];
+  
+  double UpA=0; double UpB=0; double DnA=0; double DnB=0;
+  double UpbarA=0; double UpbarB=0; double DnbarA=0; double DnbarB=0;
+  
+    
+  for(int i = 1; i <= iQuark; i++) 
+    {
+      if ((i % 2) == 0) 
+	{
+	  //	  UpA += fA[nQuark + i];
+	  //	  UpB += fB[nQuark + i];
+	  UpA += fA[i];
+	  UpB += fB[i];
+	}
+      else
+	{
+	  //	  DnA += fA[nQuark + i];
+	  //	  DnB += fB[nQuark + i];
+	  DnA += fA[i];
+	  DnB += fB[i];
+	}
+    }
+  
+  for(int i = -iQuark; i < 0; i++) 
+    {
+      if (((int)(std::abs((double)i)) % 2) == 0) 
+	{
+	  //	  UpbarA += fA[nQuark + i];
+	  //	  UpbarB += fB[nQuark + i];
+	  UpbarA += fA[i];
+	  UpbarB += fB[i];
+	}
+      else
+	{
+	  //	  DnbarA += fA[nQuark + i];
+	  //	  DnbarB += fB[nQuark + i];
+	  DnbarA += fA[i];
+	  DnbarB += fB[i];
+	}
+    }
+
+  // zero H first
+  H[0] = H[1] = H[2] = H[3] = 0;
+  
+  static int  _choice[13] = { 2, 3, 2, 3, 2, 3, 0, 1, 0, 1, 0, 1, 0 } ;
+  static int*  choice     = _choice+6;
+
+  for(int i = -iQuark; i <= iQuark; i++) 
+    {
+      if (i == 0) continue;
+       
+      //    int Choice = ( i>0 ? i%2 : abs(i%2)+2 );
+      int Choice = choice[i];
+
+      H[Choice] += fA[i] * fB[-i];
+    }
+  
+  H[4] = GA * UpB;
+  H[5] = GA * UpbarB;
+  H[6] = GA * DnB;
+  H[7] = GA * DnbarB;
+  H[8] =    UpA *GB;
+  H[9] = UpbarA *GB;
+  H[10]=    DnA *GB;
+  H[11]= DnbarA *GB;
+  
+  return;     
+}
+
+
+
+
+#endif  // __MCFMZ_PDF_H
+
Index: applgrid/tags/applgrid-01-06-36/src/appl_file.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/appl_file.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/appl_file.cxx	(revision 2010)
@@ -0,0 +1,213 @@
+/**
+ **   @file    appl_file.cxx        
+ **                   
+ **   @author  sutt
+ **   @date    Wed 30 Dec 2020 15:25:42 GMT
+ **
+ **   $Id: appl_file.cxx, v0.0   Wed 30 Dec 2020 15:25:42 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#include "appl_grid/appl_file.h"
+
+#include <sys/stat.h>
+
+
+appl::file::file( const std::string& name, const std::string& option ) : 
+  mfilename(name), mfile(0), mopen(false), msize(0) { 
+
+    mopt = option;
+
+    /// make sure it is always binary mode
+    if ( mopt.find("b")==std::string::npos ) mopt += "b";
+
+    //    std::cout << "appl::file::opening file: " << filename() << "\toptions: " << mopt << std::endl; 
+
+    /// file sizes - actual filesize and uncopmpressed size
+    int filesize = 0;
+    int usize    = 0;
+    
+    if ( mopt.find("r")!=std::string::npos ) { 
+
+      /// first get file size ...
+
+      struct stat stat_buf;
+      int rc = stat(filename().c_str(), &stat_buf);
+      if ( rc == 0 ) filesize = stat_buf.st_size;
+      else { 
+	std::cerr << "appl::file: can not determine file size for file: " << filename() << std::endl;
+	return;
+      }
+
+      /// next check if it is in zipped format ...
+      
+      FILE* tmp_file = fopen( filename().c_str(), "rb" );
+      int zip_signature = 0;
+      fread( &zip_signature, sizeof(int), 1, tmp_file );
+      if ( (zip_signature&0xffffff)==0x88b1f ) {   
+	/// if it is read the file size, and the uncompressed filesize from the file ...
+	/// uncompressed size
+	int offset = filesize - 4;
+	fseek( tmp_file, offset, SEEK_SET );
+	fread( &usize, sizeof(int), 1, tmp_file );
+      }
+      else usize = filesize;
+      
+      //      std::cout << "\tfile size:         " << filesize << std::endl;
+      //      std::cout << "\tuncompressed size: " << usize << std::endl;
+      
+      fclose( tmp_file );
+      
+    }
+      
+    /// now open the file ...
+
+    mfile = gzopen( mfilename.c_str(), mopt.c_str() );
+
+    mopen = true;
+
+    /// write header
+    if ( mopt.find("w")!=std::string::npos ) {
+
+      double v = SB::WRITEGUARD;
+      gzwrite( mfile, (void*)&v,  sizeof(SB::TYPE) );
+      mindex.add( "header", sizeof(SB::TYPE) );
+
+      msize += sizeof(SB::TYPE);
+
+      /// the index is added at the end before the file is closed
+    }
+    if ( mopt.find("r")!=std::string::npos ) { 
+
+      /// read the header ...
+
+      double v = 0;
+      gzread( mfile, (void*)&v,   sizeof(SB::TYPE) );
+      
+      /// if this one of our files ? 
+      if ( v!=SB::WRITEGUARD ) { 
+	std::cerr << "appl::file: incorrect file format file: " << filename() << std::endl;
+	Close();
+	mopen = false;
+	return;
+      }
+
+      /// yes it is so ...
+
+      /// first get the (uncompressed) file size from the last 4 bytes 
+      /// of the compressed file, then use that get the offsets to the 
+      /// index stored at the end of the file - *why* does this have to 
+      /// be soooo difficult 
+
+      //      std::cout << "opened file: " << filename() << "\tsize: " << filesize << std::endl;
+      
+      /// read the uncompressed file size from the comprfessed file
+      /// so that we can navigate through the commpressed file, 
+      /// to get the index in case we want unsequential access
+      
+      //      std::cout << "uncompressed size: " << usize << std::endl;
+      
+      /// get the total file size and offset of the index offset
+      
+      SB::TYPE vtrailer[3];
+      
+      gzseek( mfile, usize-sizeof(SB::TYPE)*3, SEEK_SET );
+      gzread( mfile, (void*)vtrailer, sizeof(double)*3 );
+
+      int index_size = (vtrailer[1]-vtrailer[0]-sizeof(SB::TYPE)*3); /// in bytes
+      
+      std::vector<SB::TYPE> vindex(index_size/sizeof(SB::TYPE)); 
+           
+      int indexptr = vtrailer[0];
+
+      ///      std::cout << "vtrailer[0]: " << vtrailer[0] << std::endl;
+      ///      std::cout << "vtrailer[1]: " << vtrailer[1] << std::endl;
+      ///      std::cout << "vtrailer[2]: " << vtrailer[2] << std::endl;
+      
+      //      gzrewind( mfile );
+      gzseek( mfile, indexptr, SEEK_SET ); 
+      gzread( mfile, (void*)&vindex[0], index_size ); 
+      
+      mindex.deserialise( vindex );
+      
+      //      std::cout << mindex << std::endl;
+      
+      /// rewind to open the file for proper reading ...
+      gzrewind( mfile );
+      /// read the header again, so that we are aligned with the 
+      /// first object in the file
+      gzread( mfile, (void*)&v,   sizeof(SB::TYPE) );
+      
+      //      std::cout << "write guard " << v << std::endl;
+      
+    }
+}
+ 
+
+appl::file::~file() { Close(); }
+
+
+void appl::file::Close() {
+    
+    if ( isopen() ) {
+
+      /// if writing the file, update the global size 
+
+      if ( mopt.find("w")!=std::string::npos ) { 
+	double content_trailer = SB::WRITEGUARD;
+	int bytes = gzwrite( mfile, (void*)&content_trailer,  sizeof(SB::TYPE) );
+	mindex.add( "trailer", bytes ); 
+	
+	msize += bytes;
+	
+	double index_offset = msize;
+
+	//	std::cout << "writing: index offset: " << index_offset << std::endl;  
+
+	/// this is a thorny one - should we add the index to the index ?
+	/// until the index has been written, it should not be in the index
+	/// but once it has been written, then it *should* be in the index
+	/// so we would need to add the index to itself, *before* writing 
+	/// to the file
+	/// basically, I think it is more sensible *not* to include the 
+	/// index itself into the index, but of course, that could could 
+	/// be changed if necessary
+	/// what we *should* do, is to add some pointer to the index at 
+	/// the start of the file, so that we can navigate to it directly 
+	/// on openning the file     
+	Write( mindex );
+	
+	/// this doesn;t work - need to work out why
+	//	gzrewind( mfile );
+	//	gzseek( mfile, sizeof(double), SEEK_SET );
+	//    	double tsize = msize;
+
+	SB::TYPE trailer[3] = { 0, 0, 0 };
+	trailer[0] = index_offset;
+	trailer[1] = msize+3*sizeof(SB::TYPE);
+	trailer[2] = SB::WRITEGUARD; 
+
+	bytes = gzwrite( mfile, (void*)&trailer, sizeof(SB::TYPE)*3 );
+	//	std::cout << "writing trailer: " << std::endl;
+	//	std::cout << "\tindex offset: " << trailer[0] << std::endl;  
+	//	std::cout << "\ttotal size:   " << trailer[1] << std::endl;  
+	//	std::cout << "\twrite guard:  " << trailer[2] << std::endl;  
+
+	mindex.add("file trailer", bytes );
+      }
+
+      gzclose(mfile);
+      mopen = false;
+
+      mindex.clear();
+      
+    }
+
+}
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/tsparse1d.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/tsparse1d.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/tsparse1d.h	(revision 2010)
@@ -0,0 +1,197 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    tsparse1d.h
+ **
+ **     @brief   very basic 2d sparse matrix class, needs to be improved, 
+ **              and extended to 3d                   
+ **
+ **     @author  mark sutton
+ **     @date    Fri Nov  9 00:17:31 CET 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id$
+ **
+ **/
+
+
+#ifndef __TSPARSE1D_H
+#define __TSPARSE1D_H
+
+#include <iostream>
+
+#include <iomanip>
+
+#include <sstream>
+
+#include <string>
+
+#include "tsparse_base.h"
+
+
+template<class T> 
+class tsparse1d : public tsparse_base {
+
+public: 
+
+  tsparse1d(int nx) : tsparse_base(nx), m_v(NULL) {
+    m_v = new T[m_Nx];
+    for ( int i=0 ; i<Nx() ; i++ ) m_v[i]=0;
+  }
+  
+  tsparse1d(int nx, int lx, int _size=1) 
+    : tsparse_base(nx, lx, _size), m_v(NULL) {
+    if ( _size ) m_v = new T[_size];
+    for ( int i=0 ; i<_size ; i++ ) m_v[i]=0;
+  }
+  
+  ~tsparse1d() { if ( m_v ) delete[] m_v; }
+ 
+ 
+  T* trim() { 
+    
+    int xmin = m_lx;  int xmax = m_ux;
+    
+    for ( ; xmin<xmax+1 && m_v[xmin-m_lx]==0 ; xmin++ ) { } 
+    for ( ; xmin<xmax   && m_v[xmax-m_lx]==0 ; xmax-- ) { } 
+
+    // don't need to change anything 
+    if ( xmin==m_lx && xmax==m_ux )  return m_v;    
+
+    // it is empty now so delete everything
+    if ( xmax<xmin ) {     
+      m_lx = xmin;
+      m_ux = xmax;      
+      delete[] m_v; 
+      return m_v = NULL; 
+    }
+
+    // create a new, smaller list
+    T* newv = new T[xmax-xmin+1];
+    T* newp = newv;
+    T* mvp  = m_v+xmin-m_lx;
+    for ( int j=xmin ; j<=xmax ; j++ ) (*newp++) = (*mvp++);
+
+    m_lx = xmin; 
+    m_ux = xmax;      
+    
+    delete[] m_v;
+    m_v = newv;     
+    
+    return m_v;
+  }
+  
+
+  void untrim() { 
+    grow(0);
+    grow(Nx()-1);
+  }
+
+  T& operator()(int i) {
+    //    range_check(i);
+    grow(i);
+    return m_v[i-m_lx];
+  }
+
+  T operator()(int i) const {
+    // range_check(i); 
+    if ( i<m_lx || i>m_ux ) return 0;
+    return m_v[i-m_lx];
+  }
+
+
+#if 1  
+  void print() const { 
+    std::cout << "   \tsize=" << size() << "\t";
+    for ( int i=0 ; i<Nx() ; i++ ) { 
+      if ( trimmed(i) ) std::cout << "o";
+      else              std::cout << " ";
+    }
+    std::cout << std::endl;
+  }
+#else
+  void print() const { 
+    std::cout << "   \tsize=" << size() << "\t";
+    for ( int i=0 ; i<Nx() ; i++ ) { 
+      if ( trimmed(i) ) {
+	cout << std::setprecision(2) << std::setw(4) << mant((*this)(i)) << "\t";
+      }
+      else              std::cout << "  -\t";
+    }
+    std::cout << std::endl;
+  }
+#endif
+
+  T* v() { return m_v; }
+
+
+  tsparse1d& operator*=(const double& d) { 
+    for ( int i=0 ; i<Nx() ; i++ ) {     
+      if ( tsparse_base::trimmed(i) ) { 
+	if ( m_v[i-m_lx] ) m_v[i-m_lx]*=d;
+      }
+    }
+    return *this;
+  }
+  
+  tsparse1d& operator+=(const tsparse1d& t) {
+    if ( Nx()!=t.Nx() ) throw out_of_range("bin mismatch");
+    for ( int i=0 ; i<Nx() ; i++ ) (*this)(i)+=t(i);
+    return *this;
+  }
+
+private:
+
+
+  void grow(int i) { 
+
+    // nothing needs to be done
+    if ( i>=m_lx && i<=m_ux ) return;
+
+    // is it empty?
+    if ( m_lx>m_ux ) { 
+      m_v = new T[1];
+      (*m_v) = 0;
+      m_lx = m_ux = i;
+      return;
+    }
+    
+    T* newv = new T[ ( i<m_lx ? m_ux-i+1 : i-m_lx+1 ) ];
+    T* newp = newv;
+    T* mvp  = m_v;
+
+    int lx = m_lx;
+    int ux = m_ux;
+    for ( ; m_lx>i ; m_lx-- )  (*newp++) = 0;
+    for ( ; lx<=ux ; lx++ )    (*newp++) = (*mvp++);
+    for ( ; m_ux<i ; m_ux++ )  (*newp++) = 0;
+    delete[] m_v;
+    m_v = newv;
+  }
+
+private:
+
+  T* m_v;
+
+};
+
+
+
+// stream IO template
+template<class T>std::ostream& operator<<(std::ostream& s, const tsparse1d<T>& sp) { 
+  for ( int i=0 ; i<sp.Nx() ; i++ ) s << "\t" << sp(i);
+  return s;
+}
+
+
+#endif  // __TSPARSE1D_H 
+
+
+
+
+
+
+
+
+
+

Property changes on: applgrid/tags/applgrid-01-06-36/src/tsparse1d.h
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+Id
\ No newline at end of property
Index: applgrid/tags/applgrid-01-06-36/src/vrapz_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/vrapz_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/vrapz_pdf.cxx	(revision 2010)
@@ -0,0 +1,42 @@
+/**
+ **     @file    vrapz_pdf.cxx
+ **
+ **     @brief   nlojet_pdf.cxx        
+ **              pdf transform function for nlojet                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: nlojet_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "vrapz_pdf.h"
+
+
+extern "C" void fvrapzLO_pdf__(const double* fA, const double* fB, double* H) { 
+  static vrapzLO_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+extern "C" void fvrapzNLO_pdf__(const double* fA, const double* fB, double* H) { 
+  static vrapzNLO_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+extern "C" void fvrapzNNLO_pdf__(const double* fA, const double* fB, double* H) { 
+  static vrapzNNLO_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/fappl_grid.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/fappl_grid.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/fappl_grid.h	(revision 2010)
@@ -0,0 +1,159 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    fappl_grid.h
+ **
+ **     @author  mark sutton
+ **     @date    Fri 25 Oct 2013 00:52:32 CEST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: fappl_grid.h, v0.0   Fri 25 Oct 2013 00:52:32 CEST sutt $
+ **
+ **/
+
+
+#pragma once
+
+
+/// externally defined alpha_s and pdf routines for fortran 
+/// callable convolution wrapper
+extern "C" double fnalphas_(const double& Q);
+extern "C" void   fnpdf_( const double& x, const double& Q, double* xf);
+
+
+
+/// create a grid
+extern "C" void bookgrid_( int& id, const int& Nobs, const double* binlims);
+
+/// delete a grid
+extern "C" void releasegrid_( const int& id );
+
+/// delete all grids
+extern "C" void releasegrids_();
+
+/// read a grid from a file
+extern "C" void readgrid_( int& id, const char* s, int _len);
+
+/// write to a file 
+extern "C" void writegrid_(const int& id, const char* s, int _len);
+
+/// add an entry 
+extern "C" void fillgrid_(const int& id, 
+			  const int& ix1, const int& ix2, const int& iQ, 
+			  const int& iobs, 
+			  const double* w,
+			  const int& iorder );  
+
+/// redefine the grid dimensions
+extern "C" void redefine_(const int& id, 
+			  const int& iobs, const int& iorder, 
+			  const int& NQ2,  const double& Q2min, const double& Q2max, 
+			  const int& Nx,   const double&  xmin, const double&  xmax); 
+
+
+/// get number of observable bins for a grid 
+extern "C" int getnbins_(const int& id);
+
+/// get the number of a bin based on the ordonate 
+extern "C" int getbinnumber_(const int& id, double& x);
+
+/// get low edge for a grid/bin number
+extern "C" double getbinlowedge_(const int& id, int& bin);
+
+/// get width for a grid/bin number
+extern "C" double getbinwidth_(const int& id, const int& bin);
+
+/// do the convolution!! hooray!!
+
+extern "C" void convolute_(const int& id, double* data);
+
+extern "C" void convolutewrap_(const int& id, double* data, 
+			       void (*pdf)(const double& , const double&, double* ),
+			       double (*alphas)(const double& ) );
+
+
+extern "C" void convoluteppbar_(const int& id, double* data);
+
+extern "C" void convoluteppbarwrap_(const int& id, double* data, 
+				    void (*pdf)(const double& , const double&, double* ),
+				    double (*alphas)(const double& ) );
+
+
+/// allow different loop order
+extern "C" void convoluteorder_(const int& id, const int& nloops, double* data);
+
+
+
+/// allow varaiation of loop order and scales
+extern "C" void fullconvolute_(const int& id, double* data, 
+			       const int& nloops,
+			       const double& rscale, const double& fscale  );
+
+extern "C" void fullconvolutewrap_(const int& id, double* data, 
+				   void (*pdf)(const double& , const double&, double* ),
+				   double (*alphas)(const double& ),
+				   const int& nloops,
+				   const double& rscale, const double& fscale  );
+
+
+
+/// allow different cms energies
+extern "C" void escaleconvolute_(const int& id, double* data, const double& Escale);
+
+extern "C" void escaleconvolutewrap_(const int& id, double* data, 
+				     void (*pdf)(const double& , const double&, double* ),
+				     double (*alphas)(const double& ), 
+				     const double& Escale );
+
+
+
+/// allow different cms eneregies and scale variation, and loop order
+extern "C" void escalefullconvolute_(const int& id, double* data, 
+				     const int& nloops,
+				     const double& rscale, const double& fscale,
+				     const double& Escale );
+
+extern "C" void escalefullconvolutewrap_(const int& id, double* data, 
+					 void (*pdf)(const double& , const double&, double* ),
+					 double (*alphas)(const double& ),
+					 const int& nloops,
+					 const double& rscale, const double& fscale,
+					 const double& Escale );
+
+
+/// set the ckm matrix - a flat vector of 9 doubles, Vud, Vus, Vub, Vcd ...
+extern "C" void setckm_( const int& id, const double* ckm );
+
+/// get the ckm matrix - a flat vector of 9 doubles, Vud, Vus, Vub, Vcd ...
+extern "C" void getckm_( const int& id, double* ckm );
+
+extern "C" void lockckm_( const int& i ); 
+
+/// print a grid
+extern "C" void printgrid_(const int& id);
+
+/// print all grids
+extern "C" void printgrids_();
+
+/// print the grid documentation
+extern "C" void printgriddoc_(const int& id);
+
+/// create grids from fastnlo
+extern "C" void readfastnlogrids_( int* ids, const char* s, int _len );
+
+
+/// grid std::map management
+
+extern "C" void ngrids_(int& n);
+
+extern "C" void gridids_(int* ids);
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/appl_timer.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/appl_timer.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/appl_timer.cxx	(revision 2010)
@@ -0,0 +1,99 @@
+/**************************************************************************
+ **
+ **   File:         appl_timer.c        
+ **
+ **   Description:    
+ **                   
+ **                   
+ ** 
+ **   Author:       M.Sutton    
+ **
+ **   Created:      Fri Jun 29 18:59:11 BST 2001
+ **   Modified:     
+ **                   
+ **                   
+ **
+ **************************************************************************/ 
+
+#include <stdlib.h>
+#include <pthread.h>
+
+#include "appl_grid/appl_timer.h"
+
+
+pthread_mutex_t time_lock = PTHREAD_MUTEX_INITIALIZER;  
+
+
+typedef struct {
+  struct timeval start_time;
+  struct timeval stop_time;
+  struct timeval since_time;  
+  struct timeval diff_time;
+} __appl_timer;
+
+
+
+int appl_timersub(struct timeval* stop_time, struct timeval* start_time, struct timeval* diff_time)
+{  
+  diff_time->tv_sec  = stop_time->tv_sec  - start_time->tv_sec;
+  diff_time->tv_usec = stop_time->tv_usec - start_time->tv_usec;
+  return 0;
+}
+
+
+
+/**  get the time, (probably needlessly) mutex protected 
+ **  call to gettimeofday
+ **/
+
+void __appl__gettime(struct timeval* t)
+{
+  pthread_mutex_lock(&time_lock);
+  gettimeofday (t, NULL);            
+  pthread_mutex_unlock(&time_lock);
+}
+
+
+
+/**  significantly (0.02ms) faster and simpler timer start/stop 
+ **  functions 
+ **/
+
+struct timeval appl_timer_start(void)
+{
+  struct timeval start_time;
+  __appl__gettime (&start_time);
+  return start_time;
+}
+
+
+// double appl_d(struct timeval time) { return (time.tv_sec*1000.0) + (time.tv_usec/1000.0); }
+
+double appl_timer_stop(struct timeval start_time)
+{
+  double time = 0;
+  struct timeval stop_time;
+  struct timeval diff_time;
+  //  double diff;
+
+  __appl__gettime (&stop_time);
+  pthread_mutex_lock(&time_lock);
+  appl_timersub( &stop_time, &start_time, &diff_time );
+  //  printf("diff: %d\n", _timersub( &stop_time, &start_time, &diff_time ) );
+  //  diff = appl_d(stop_time)-appl_d(start_time);
+  //  printf("timer:   %12.5f %12.5f  %12.5f   %12.5f\n", appl_d(stop_time), appl_d(start_time), appl_d(diff_time), diff ); 
+  pthread_mutex_unlock(&time_lock);  
+  time = (diff_time.tv_sec*1000.0) + (diff_time.tv_usec/1000.0);
+  return time;
+}
+
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/tsparse3d.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/tsparse3d.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/tsparse3d.h	(revision 2010)
@@ -0,0 +1,376 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    tsparse3d.h
+ **
+ **     @brief   very basic 3d sparse matrix class, needs to be improved
+ **              
+ **
+ **     @author  mark sutton
+ **     @date    Fri Nov  9 00:17:31 CET 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: tsparse3d.h, v   Fri Nov  9 00:17:31 CET 2007 sutt $
+ **
+ **/
+
+
+#ifndef __TSPARSE3D_H
+#define __TSPARSE3D_H
+
+#include <iostream>
+
+#include "tsparse2d.h"
+
+
+template<class T> 
+class tsparse3d : public tsparse_base {
+
+public:
+
+  tsparse3d(int nx, int ny, int nz) 
+    : tsparse_base(nx), m_Ny(ny), m_Nz(nz), m_v(NULL), m_trimmed(false) {
+    m_v = new tsparse2d<T>*[m_Nx];
+    for ( int i=0 ; i<m_Nx ; i++ ) m_v[i] = new tsparse2d<T>(m_Ny, m_Nz);
+    //   setup_fast();
+  }
+
+  // Fixme: need to rewrite this constructor properly, so that 
+  // a trimmed matrix is copied as trimmed and not copied in full 
+  // and then trimmed
+  tsparse3d(const tsparse3d& t) 
+    : tsparse_base(t.m_Nx), m_Ny(t.m_Ny), m_Nz(t.m_Nz), m_v(NULL), m_trimmed(t.m_trimmed) {
+    m_v = new tsparse2d<T>*[m_Nx];
+    for ( int i=0 ; i<m_Nx ; i++ ) m_v[i] = new tsparse2d<T>(m_Ny, m_Nz);
+    //   setup_fast();
+    // deep copy of all elements
+    for ( int i=0 ; i<m_Nx ; i++ ) {
+      for ( int j=0 ; j<m_Ny ; j++ ) {
+	for ( int k=0 ; k<m_Nz ; k++ ) (*this)(i,j,k) = t(i,j,k);
+      }
+    }    
+    if ( m_trimmed ) trim();
+  }
+  
+  
+  ~tsparse3d() { 
+    if ( m_v ) { 
+      for ( int i=m_ux-m_lx+1 ; i-- ;  )  if ( m_v[i] ) delete m_v[i];
+      delete[] m_v;
+    }
+    //    empty_fast();
+  }
+  
+
+  // accessors
+  int Ny() const { return m_Ny; } 
+  int Nz() const { return m_Nz; } 
+
+
+
+  void trim() { 
+    
+    m_trimmed = true;
+    
+    // trim each column down 
+    for ( int i=m_ux-m_lx+1 ; i-- ;  )  if ( m_v[i] ) m_v[i]->trim();
+
+    int xmin = m_lx;
+    int xmax = m_ux;
+    
+    // now find new limits
+    for ( ; xmin<xmax+1 && (*m_v[xmin-m_lx])==0 ; xmin++ )  delete m_v[xmin-m_lx];
+    for ( ; xmin<xmax   && (*m_v[xmax-m_lx])==0 ; xmax-- )  delete m_v[xmax-m_lx];
+
+    m_empty = false;
+    if ( xmax<xmin ) m_empty = true; 
+   
+    // don't need to change anything
+    if ( xmin==m_lx && xmax==m_ux ) return; 
+
+    // trimmed matrix is empty
+    if ( xmax<xmin ) { 
+      m_lx = xmin;   m_ux = xmax;
+      delete[] m_v;  m_v = NULL;
+      return; 
+    }
+
+    // copy to trimmed matrix (there must be a better way to do this)
+    tsparse2d<T>** m_vnew = new tsparse2d<T>*[xmax-xmin+1]; 
+    tsparse2d<T>** mnp = m_vnew;
+    tsparse2d<T>** mvp = m_v+xmin-m_lx;
+    for ( int j=xmin ; j<=xmax ; j++ )  (*mnp++) = (*mvp++);
+    delete[] m_v;
+
+    // update the sparse data and corresponding limits 
+    m_lx = xmin;
+    m_ux = xmax;
+    m_v  = m_vnew;
+
+    m_empty = false;
+    if ( empty() ) m_empty = true;
+  }
+
+
+  void untrim() { 
+    m_trimmed = false;
+    grow(0);
+    grow(Nx()-1);
+    tsparse2d<T>** mvp = m_v;
+    for ( int i=m_lx ; i<=m_ux ; i++ ) (*mvp++)->untrim();
+  }
+  
+  bool trimmed() const { return m_trimmed; }
+
+  bool trimmed(int i, int j, int k) const { 
+    if ( i<m_lx || i>m_ux ) return false;
+    else                    return m_v[i-m_lx]->trimmed(j,k);
+  } 
+
+
+  T operator()(int i, int j, int k) const {
+    //  range_check(i);
+    if ( i<m_lx || i>m_ux ) return 0;
+    return (*((const tsparse2d<T>** const)m_v)[i-m_lx])(j,k);
+  } 
+
+
+  const tsparse2d<T>* operator[](int i) const {
+    // range_check(i);
+    if ( i<m_lx || i>m_ux ) return NULL;
+    return m_v[i-m_lx];
+  } 
+  
+
+  T& operator()(int i, int j, int k) {
+    // range_check(i);
+    grow(i);
+    return (*m_v[i-m_lx])(j,k);
+  } 
+
+  int size() const { 
+    int N=0;
+    for ( int i=m_ux-m_lx+1 ; i-- ; )  N += m_v[i]->size();
+    return N; // +3*sizeof(int);
+  }
+
+#if 0
+  void setup_fast() { 
+    // set up fast lookup table into the (untrimmed) 3d array.
+    m_fastindex = new T*[Nx()*Ny()*Nz()];
+
+    for ( int i=0 ; i<Nx() ; i++ ) { 
+      for ( int j=0 ; j<Ny() ; j++ ) { 
+	for ( int k=0 ; k<Nz() ; k++ ) { 
+	  m_fastindex[(i*Ny()+j)*Nz()+k] = &(m_v[i]->v()[j])->v()[k];
+	}
+      }
+    }
+  }
+  
+  
+  void empty_fast() { 
+    if ( m_fastindex ) delete[] m_fastindex;
+    m_fastindex=NULL;
+  }
+  
+  T& fill_fast(int i, int j, int k) { 
+    return *m_fastindex[(i*Ny()+j)*Nz()+k];
+  }
+
+  T fill_fast(int i, int j, int k) const { 
+    return *m_fastindex[(i*Ny()+j)*Nz()+k];
+  }
+#endif
+
+
+  void print() const {
+    if ( m_ux-m_lx+1==0 ) std::cout << "-" << "\n";
+    else { 
+      for ( int i=0 ; i<Nx() ; i++ ) {     
+	if ( tsparse_base::trimmed(i) ) { 
+	  std::cout << "m_v[" << i << "]=" << m_v[i-m_lx] << "\n";
+	  if ( m_v[i-m_lx] ) m_v[i-m_lx]->print();
+	}
+	else {
+	  std::cout << "- \t"; 
+	}
+	std::cout << "\n";
+      }
+    }
+  }  
+
+  int ymin() const { 
+    int minx = m_Ny;
+    tsparse2d<T>** mvp = m_v;
+    for ( int i=m_lx ; i<=m_ux ; i++ ) { 
+      int _minx=(*mvp++)->xmin();
+      if ( _minx<minx ) minx=_minx;
+    }
+    return minx;
+  }
+
+  int ymax() const { 
+    int maxx = -1;
+    tsparse2d<T>** mvp = m_v;
+    for ( int i=m_lx ; i<=m_ux ; i++ ) { 
+      int _minx=(*mvp)->xmin();
+      int _maxx=(*mvp++)->xmax();
+      if ( _minx<=_maxx && _maxx>maxx ) maxx=_maxx;
+    }
+    if ( maxx==-1 ) maxx=m_Ny-1;
+    return maxx;
+  }
+
+
+  int zmin() const { 
+    int minx = m_Nz;
+    tsparse2d<T>** mvp = m_v;
+    for ( int i=m_lx ; i<=m_ux ; i++ ) { 
+      int _minx=(*mvp++)->ymin();
+      if ( _minx<minx ) minx=_minx;
+    }
+    return minx;
+  }
+
+  int zmax() const { 
+    int maxx = -1;
+    tsparse2d<T>** mvp = m_v;
+    for ( int i=m_lx ; i<=m_ux ; i++ ) { 
+      int _minx=(*mvp)->ymin();
+      int _maxx=(*mvp++)->ymax();
+      if ( _minx<=_maxx && _maxx>maxx ) maxx=_maxx;
+    }
+    if ( maxx==-1 ) maxx=m_Nz-1;
+    return maxx;
+  }
+
+
+  // algebraic operators
+
+  tsparse3d& operator=(const tsparse3d& t) { 
+    // clearout what is already in the matrix 
+    if ( m_v ) { 
+      for ( int i=m_ux-m_lx+1 ; i-- ; )  if ( m_v[i] ) delete m_v[i];
+      delete[] m_v;
+      m_v = NULL;
+    }
+
+    m_trimmed = t.m_trimmed;
+
+    // now copy everything else
+    m_Nx = t.m_Nx;
+    m_Ny = t.m_Ny;
+    m_Nz = t.m_Nz;    
+
+    m_lx = 0;
+    m_ux = m_Nx-1;
+    m_v = new tsparse2d<T>*[m_Nx];
+    for ( int i=0 ; i<m_Nx ; i++ ) m_v[i] = new tsparse2d<T>(m_Ny, m_Nz);
+    //   setup_fast();
+    // deep copy of all elements
+    for ( int i=0 ; i<m_Nx ; i++ ) {
+      for ( int j=0 ; j<m_Ny ; j++ ) {
+	for ( int k=0 ; k<m_Nz ; k++ ) (*this)(i,j,k) = t(i,j,k);
+      }
+    }    
+    if ( m_trimmed ) trim();
+  }
+  
+  
+  tsparse3d& operator*=(const double& d) { 
+    for ( int i=0 ; i<Nx() ; i++ ) {     
+      if ( tsparse_base::trimmed(i) ) { 
+	if ( m_v[i-m_lx] ) (*m_v[i-m_lx])*=d;
+      }
+    }
+    return *this;
+  }
+  
+  
+  tsparse3d& operator+=(const tsparse3d& t) {
+    m_trimmed = false;
+    if ( Nx()!=t.Nx() || Ny()!=t.Ny() || Nz()!=t.Nz() ) throw out_of_range("bin mismatch");
+    for ( int i=0 ; i<Nx() ; i++ ) {
+      for ( int j=0 ; j<Ny() ; j++ ) {
+	for ( int k=0 ; k<Nz() ; k++ ) (*this)(i,j,k) += t(i,j,k);
+      }
+    }
+    return *this;
+  }
+  
+
+private:
+  
+  void grow(int i) { 
+     
+    // nothing needs to be done; 
+    if ( i>=m_lx && i<=m_ux ) return;
+
+    // is it empty? add a single row
+    if ( m_lx>m_ux ) { 
+      m_v    = new tsparse2d<T>*[1];
+      (*m_v) = new tsparse2d<T>(m_Ny, m_Nz, m_Ny, m_Nz, 0, 0);
+      m_lx = m_ux = i;
+      return;
+    }
+    
+    tsparse2d<T>** newv = new tsparse2d<T>*[ ( i<m_lx ? m_ux-i+1 : i-m_lx+1 ) ];
+    tsparse2d<T>** newp = newv;
+    tsparse2d<T>** mvp  = m_v;
+
+    int lx = m_lx;
+    int ux = m_ux;
+
+    // grow at front
+    for ( ; m_lx>i ; m_lx-- )  (*newp++) = new tsparse2d<T>(m_Ny, m_Nz, m_Ny, m_Nz, 0, 0);
+    // copy existing
+    for ( ; lx<=ux ; lx++ )    (*newp++) = (*mvp++);
+    // grow at back
+    for ( ; m_ux<i ; m_ux++ )  (*newp++) = new tsparse2d<T>(m_Ny, m_Nz, m_Ny, m_Nz, 0, 0);
+
+    delete[] m_v;
+    m_v = newv;
+  }
+
+
+protected:
+  
+  int m_Ny;
+  int m_Nz;
+  
+  tsparse2d<T>**  m_v;
+  
+  //  T** m_fastindex;
+  
+  bool m_trimmed;
+
+};
+
+
+// stream IO template
+template<class T> std::ostream& operator<<(std::ostream& s, const tsparse3d<T>& sp) { 
+  for ( int i=0 ; i<sp.Nx() ; i++ ) { 
+    for ( int j=0 ; j<sp.Ny() ; j++ ) { 
+      for ( int k=0 ; k<sp.Nz() ; k++ ) { 
+	s << sp(i,j,k) << "\t"; 
+      }
+      s << "\n";
+    }
+    s << "\n";
+  }
+  return s;
+}
+
+
+#endif  // __TSPARSE3D_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/histogram.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/histogram.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/histogram.cxx	(revision 2010)
@@ -0,0 +1,323 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **   @file    histogram.h        
+ **                   
+ **   @author  sutt
+ **   @date    Mon 28 Dec 2020 19:33:12 GMT
+ **
+ **   $Id: histogram.h, v0.0   Mon 28 Dec 2020 19:33:12 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#include "appl_grid/histogram.h"
+
+#include "appl_grid/serialise_base.h"
+
+
+
+histogram::histogram( const std::string& name ) : mname(name) { } 
+
+
+histogram::histogram( const std::string& name, size_t nbins, const double* limits ) : mname(name) {
+  if ( nbins>0 ) create( nbins, limits );
+  else throw exception("histogram: not enough bins creating histogram: "+name);
+}
+
+
+histogram::histogram( const std::string& name, const std::vector<double>& limits ) : mname(name) {
+    if ( limits.size()>1 ) create( limits.size()-1, &limits[0] );
+    else throw exception("histogram: not enough bin limits creating histogram: "+name);
+}
+
+
+histogram::histogram( const std::string& name, size_t nbins, double lo, double hi ) : mname(name) {    
+    if ( nbins==0 ) throw exception("histogram: not enough bins creating histogram: "+name);
+    std::vector<double> limits(nbins+1,0);
+    for ( size_t i=nbins ; i-- ; ) limits[i] = (lo*(nbins-i) + hi*i)/nbins;
+    /// make sure the limits at least are exact 
+    limits[0]     = lo;
+    limits[nbins] = hi;
+    create( nbins, &limits[0] );
+} 
+
+
+/// constructor from stream
+
+histogram::histogram( const std::vector<SB::TYPE> stream ) { 
+    deserialise( stream );
+    mx.resize(my.size());
+    for ( size_t i=my.size() ; i-- ; ) mx[i] = 0.5*(mxlimits[i]+mxlimits[i+1]);
+}
+
+
+histogram::histogram( const histogram& h ) : 
+    mname(h.mname),
+    mxlimits(h.mxlimits),
+    mx(h.mx),
+    my(h.my),
+    mye(h.mye), 
+    myelo(h.myelo) 
+{ }
+
+
+/// merge bins i and bin i+1
+
+void histogram::merge_bins( size_t i, bool norm ) {
+ 
+    //    std::cout << "before:\n" << *this << std::endl;
+
+    size_t n = my.size();
+
+    if ( n<2 || i>=n-1 ) { 
+      std::cerr << "app::grid::merge_bins() cannot merge" << std::endl;
+      return; 
+    }
+
+    int j=i+1;
+
+    double w0 = mxlimits[i+1]-mxlimits[i];
+    double w1 = mxlimits[j+1]-mxlimits[j];
+    double w  = mxlimits[j+1]-mxlimits[i];
+
+    if ( !norm ) w0 = w1 = w = 1;
+
+    mx[i]  = 0.5*(mxlimits[j+1] + mxlimits[i]);
+    my[i]  = (my[i]*w0 + my[j]*w1)/w;
+    mye[i] = std::sqrt( w0*w0*mye[i]*mye[i] + w1*w1*mye[j]*mye[j] )/w;
+
+    mxlimits.erase( mxlimits.begin()+j );
+    mx.erase( mx.begin()+j );
+    my.erase( my.begin()+j );
+    mye.erase( mye.begin()+j );
+
+    if ( myelo.size()>0 ) { 
+      myelo[i] = std::sqrt( w0*w0*myelo[i]*myelo[i] + w1*w1*myelo[j]*myelo[j] )/w;
+      myelo.erase( myelo.begin()+j );
+    }
+
+    //    std::cout << "after:\n" << *this << std::endl;
+
+}
+
+
+#if 0
+void histogram::merge_bin( int i ) {
+ 
+    std::cout << "before:\n" << *this << std::endl;
+
+    size_t n  = my.size();
+    size_t nn = my.size()-1;
+
+
+    double w0 = mxlimits[n-1]-mxlimits[n-2];
+    double w1 = mxlimits[n]-mxlimits[n-1];
+
+    double w = mxlimits[n]-mxlimits[n-2];
+
+    if ( n<2 ) return;
+
+    mxlimits[nn] = mxlimits[n];
+    mxlimits.resize(n);
+
+    mx[nn-1] = 0.5*(mxlimits[nn] - mxlimits[nn-1]);
+
+    mx.resize(nn);
+    
+    my[nn-1] = (my[nn-1]*w0 + my[n-1]*w1)/w;
+    my.resize(nn);
+
+    mye[nn-1] = std::sqrt( w0*w0*mye[nn-1]*mye[nn-1] + w1*w1*mye[n-1]*mye[n-1] )/w;
+    mye.resize(nn);
+
+    if ( myelo.size()>0 ) { 
+      myelo[nn-1] = std::sqrt( w0*w0*myelo[nn-1]*myelo[nn-1] + w1*w1*myelo[n-1]*myelo[n-1] )/w;
+      myelo.resize(nn);
+    }
+
+    std::cout << "after:\n" << *this << std::endl;
+
+}
+#endif
+
+
+histogram& histogram::operator=( histogram&& h ) {
+    std::swap( mname, h.mname );
+  
+    std::swap( mxlimits, h.mxlimits );
+    std::swap( mx, h.mx );
+
+    std::swap( my,    h.my );
+    std::swap( mye,   h.mye );
+    std::swap( myelo, h.myelo );
+    
+    return *this;
+}
+
+
+histogram& histogram::operator=( const histogram& h ) {
+    mname    = h.mname;
+    mxlimits = h.mxlimits;
+    mx       = h.mx;
+    my       = h.my;
+    mye      = h.mye;
+    myelo    = h.myelo;
+    return *this;
+}
+
+
+
+histogram& histogram::operator/=( const histogram& h) {
+    if ( ! operator==(h) )  throw exception( "histogram: bin mismatch for operator /= " );
+    for ( size_t i=size() ; i-- ; ) { 
+      my[i]  /= h.my[i];
+      mye[i]  = std::sqrt( my[i]*my[i]*h.mye[i]*h.mye[i]/(h.my[i]*h.my[i]*h.my[i]*h.my[i]) + 
+			   mye[i]*mye[i]/(h.my[i]*h.my[i]) );
+    }
+    if ( myelo.size()>0 ) {  
+      for ( size_t i=size() ; i-- ; ) { 
+	myelo[i]  = std::sqrt( my[i]*my[i]*h.myelo[i]*h.myelo[i]/(h.my[i]*h.my[i]*h.my[i]*h.my[i]) + 
+			       myelo[i]*myelo[i]/(h.my[i]*h.my[i]) );
+      }
+    }
+    return *this;
+}
+
+
+histogram& histogram::operator*=( const histogram& h) {
+    if ( ! operator==(h) )  throw exception( "histogram: bin mismatch for operator *= " );
+    for ( size_t i=size() ; i-- ; ) { 
+      my[i]  *= h.my[i];
+      mye[i]  = std::sqrt( my[i]*my[i]*h.mye[i]*h.mye[i] + h.my[i]*h.my[i]*mye[i]*mye[i] );
+    }
+    if ( myelo.size()>0 ) {  
+      for ( size_t i=size() ; i-- ; ) { 
+	myelo[i]  = std::sqrt( my[i]*my[i]*h.myelo[i]*h.myelo[i] + h.my[i]*h.my[i]*myelo[i]*myelo[i] );
+      }
+    }
+    return *this;
+}
+  
+
+histogram& histogram::operator+=( const histogram& h ) { 
+    if ( ! operator==(h) )  throw exception( "histogram: bin mismatch for operator +- " );
+    for ( size_t i=size() ; i-- ; ) { 
+      my[i]  += h.my[i];
+      mye[i]  = std::sqrt( h.mye[i]*h.mye[i] + mye[i]*mye[i] );
+    }
+    if ( myelo.size()>0 ) {  
+      for ( size_t i=size() ; i-- ; ) { 
+	myelo[i]  = std::sqrt( h.myelo[i]*h.myelo[i] + myelo[i]*myelo[i] );
+      }
+    }
+    return *this;
+}
+
+
+
+void histogram::fill( double x, double w ) {
+    int ibin = index( x );
+    if ( ibin != -1 ) { 
+      my[ibin] += w;
+      mye[ibin] = std::sqrt( mye[ibin]*mye[ibin] + w );
+    } 
+    if ( myelo.size()>0 ) myelo[ibin] = std::sqrt( myelo[ibin]*mye[ibin] + w );
+}
+  
+
+/// serialisation and deserialisation ...
+
+void histogram::serialise_internal( std::vector<SB::TYPE>& s ) const {    
+    SB::serialise( s, name() );
+    SB::serialise( s, mxlimits );
+    SB::serialise( s, my );
+    std::vector<double> ye(mye);
+    if ( myelo.size()>0 ) ye.insert( ye.end(), myelo.begin(), myelo.end() );
+    SB::serialise( s, ye );
+}
+
+
+void histogram::deserialise_internal( std::vector<SB::TYPE>::const_iterator& itr ) { 
+    SB::deserialise( itr, mname );
+    SB::deserialise( itr, mxlimits );
+    SB::deserialise( itr, my );
+    std::vector<double> ye;
+    SB::deserialise( itr, ye );
+    if ( ye.size()==my.size() ) mye = ye;
+    else { 
+      mye.clear();
+      mye.insert( mye.begin(), ye.begin(), ye.begin()+(ye.size()/2) ); 
+      myelo.insert( myelo.begin(), ye.begin()+(ye.size()/2), ye.end() ); 
+    }
+} 
+
+
+
+void histogram::create( size_t nbins, const double* limits ) { 
+    
+    mxlimits.resize(nbins+1);
+    
+    for ( size_t i=nbins+1 ; i-- ; ) mxlimits[i] = limits[i];
+    
+    mx.resize(nbins);
+    
+    for ( size_t i=nbins ; i-- ; ) mx[i] = 0.5*(limits[i]+limits[i+1]);
+    
+    my  = std::vector<double>(nbins,0);
+    mye = std::vector<double>(nbins,0);
+   
+}
+
+
+
+void histogram::set( const std::vector<double>& v, 
+		     const std::vector<double>& ve,
+		     const std::vector<double>& velo ) 
+{ 
+    if ( v.size()!=size() ) throw exception( "histogram: number of histogram and value bins don't match" ); 
+    my = v;
+    
+    if ( ve.size()!=0 ) { 
+      if ( ve.size()!=size() ) throw exception( "histogram: number of histogram and value bins don't match" ); 
+      mye = ve;      
+    }
+    else mye = std::vector<double>(v.size(),0);
+
+    if ( velo.size()!=0 ) { 
+      if ( velo.size()!=size() ) throw exception( "histogram: number of histogram and value bins don't match" ); 
+      myelo = velo;      
+    }
+    else myelo.clear();
+} 
+
+
+void histogram::set_errors( const std::vector<double>& ve,
+			    const std::vector<double>& velo ) 
+{ 
+    if ( ve.size()!=size() ) throw exception( "histogram: number of histogram and value bins don't match" ); 
+    mye = ve;      
+      
+    if ( velo.size()!=0 ) { 
+      if ( velo.size()!=size() ) throw exception( "histogram: number of histogram and value bins don't match" ); 
+      myelo = velo;      
+    }
+    else myelo.clear();
+
+}
+
+
+void histogram::clear() { 
+  for ( size_t i=my.size()  ; i-- ; ) my[i] = 0; 
+  for ( size_t i=mye.size() ; i-- ; ) mye[i] = 0;     
+  myelo.clear();
+}
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/dis_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/dis_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/dis_pdf.h	(revision 2010)
@@ -0,0 +1,59 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    dis_pdf.h
+ **
+ **     @brief   pdf transform functions for nlojet                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: dis_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+#ifndef __DIS_PDF_H
+#define __DIS_PDF_H
+
+#include "appl_grid/appl_pdf.h" 
+
+
+
+// nlojet pdf combination class 
+
+class dis_pdf : public appl::appl_pdf {
+
+public:
+
+  dis_pdf() : appl_pdf("dis") { m_Nproc=3; } 
+
+  void evaluate(const double* fA, const double* fB, double* H) const;
+
+};  
+
+
+inline void  dis_pdf::evaluate(const double* fA, const double* , double* H) const {  
+
+  fA += 6;  // offset internal pointers so can use fA[-6]..fA[6] for simplicity
+
+  double U=0, D=0, G=fA[0]; 
+
+  // don't include the gluon (fA[0], fB[0])
+
+  for ( int i=1 ; i<=5 ; i+=2 ) D += fA[i]+fA[-i] ;
+  for ( int i=2 ; i<=6 ; i+=2 ) U += fA[i]+fA[-i] ;
+
+  H[0] =  G ;  
+  H[1] =  D+U ;  
+  H[2] =  (D+4*U)/9 ; 
+}
+
+
+extern "C" void fdis_pdf__(const double* fA, const double* fB, double* H);
+
+
+#endif  // __DIS_PDF_H
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/mcfmzjet_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmzjet_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmzjet_pdf.h	(revision 2010)
@@ -0,0 +1,124 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    mcfmzjet_pdf.h
+ **
+ **     @brief   pdf transform functions                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: mcfmz_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+#ifndef __MCFMZJET_PDF_H
+#define __MCFMZJET_PDF_H
+
+#include <cmath>
+
+#include "appl_grid/appl_pdf.h" 
+
+
+//
+// MCFM Z-jet production
+//
+class mcfmzjet_pdf : public appl::appl_pdf { 
+
+public:
+	mcfmzjet_pdf() : appl_pdf("mcfm-zjet") { m_Nproc = 33; } 
+	
+	~mcfmzjet_pdf() { } 
+	
+	virtual void evaluate(const double* fA, const double* fB, double* H) const;
+	
+};
+
+
+// actual funtion to evaluate the pdf combinations 
+
+inline  void mcfmzjet_pdf::evaluate(const double* fA, const double* fB, double* H) const {
+	//  const int nQuark = 6;
+	const int iQuark = 5; 
+	
+	// offset psd ptrs so can use [-6..6] indexing rather than [nQuark+..] indexing
+	fA += 6;
+	fB += 6;
+	
+	double GA=fA[0];
+	double GB=fB[0];
+	
+	double UpA=0; double UpB=0; double DnA=0; double DnB=0;
+	double UpbarA=0; double UpbarB=0; double DnbarA=0; double DnbarB=0;
+	
+	for (int i = 1; i <= iQuark; i++) {
+		if ((i % 2) == 0) {
+			UpA += fA[i];
+			UpB += fB[i];
+		}else{
+			DnA += fA[i];
+			DnB += fB[i];
+		}
+	}
+	
+	for(int i = -iQuark; i < 0; i++) {
+		if (((int)(std::abs((double)i)) % 2) == 0) {
+			UpbarA += fA[i];
+			UpbarB += fB[i];
+		} else {
+			DnbarA += fA[i];
+			DnbarB += fB[i];
+		}
+	}
+	
+	// zero H first
+	for(int i = 0; i<m_Nproc;++i) H[i]=0;
+	
+	static int  _choice[13] = { 2, 3, 2, 3, 2, 3, 0, 1, 0, 1, 0, 1, 0 } ;
+	static int*  choice     = _choice+6;
+	
+	for(int i = -iQuark; i <= iQuark; i++) {
+		if (i == 0) continue;
+		//    int Choice = ( i>0 ? i%2 : abs(i%2)+2 );
+		int Choice = choice[i];
+		H[Choice] += fA[i] * fB[-i];
+	}
+	
+	H[4] = GA * UpB;
+	H[5] = GA * UpbarB;
+	H[6] = GA * DnB;
+	H[7] = GA * DnbarB;
+	H[8] =    UpA *GB;
+	H[9] = UpbarA *GB;
+	H[10]=    DnA *GB;
+	H[11]= DnbarA *GB;
+	
+	H[12] = GA*GB;
+	
+	
+	for(int i = -iQuark; i <= iQuark; i++) {
+		if(i==0) continue;
+		for(int j = -iQuark; j <= iQuark; j++) {
+			if(j==0 || i==j || i==-j) continue;
+			H[13+choice[i]+4*choice[j]] += fA[i] * fB[j];
+		}
+	}
+	
+	for(int i = -iQuark; i <= iQuark; i++) {
+		if (i == 0) continue;
+		//    int Choice = ( i>0 ? i%2 : abs(i%2)+2 );
+		int Choice = choice[i];
+		H[29+Choice] += fA[i] * fB[i];
+	}
+	
+	
+	return;
+}
+
+
+extern "C" void fmcfmzjet_pdf__(const double* fA, const double* fB, double* H);
+
+#endif  // __MCFMZJET_PDF_H
+
Index: applgrid/tags/applgrid-01-06-36/src/appl_igrid.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/appl_igrid.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/appl_igrid.h	(revision 2010)
@@ -0,0 +1,839 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    appl_igrid.h
+ **
+ **     @brief   grid class header - all the functions needed to create and 
+ **              fill the grid from an NLO calculation program, based on the 
+ **              class from D.Clements and T.Carli.
+ **
+ **              See contribution from T.Carli et al from the HERA-LHC 
+ **              workshop - working group 3 
+ **
+ **     @author  mark sutton
+ **     @date    Fri Nov  2 05:31:03 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: appl_igrid.h, v2.00  Fri Nov  2 05:31:03 GMT 2007 sutt $
+ **
+ **/
+
+// Fixme: this needs to be tidied up. eg there are too many different, 
+//        and too many version of, accessors for x/y, Q2/tau etc there 
+//        should be only  one set, for x and Q2 *or* y and tau, but 
+//        not both. In fact maybe they should be for x and Q2, since y 
+//        and tau should perhaps be purely an internal grid issue of no 
+//        concern for the user.
+
+
+#ifndef __APPL_IGRID_H
+#define __APPL_IGRID_H
+
+#include <iostream>
+#include <sstream>
+#include <vector>
+#include <map>
+#include <string>
+#include <cmath>
+
+#include "appl_grid/appl_root.h"
+
+#include "appl_grid/Directory.h"
+#include "appl_grid/appl_pdf.h"
+#include "appl_grid/appl_file.h"
+
+#include "SparseMatrix3d.h"
+#include "threadManager.h"
+
+#include "Cache.h"
+
+
+namespace appl {
+
+class grid;
+
+
+
+class igrid  : public threadManager {
+
+private: 
+
+  virtual void run_thread();
+
+private:
+
+  // grid error exception
+  class exception { 
+  public:
+    exception(const std::string& s) { std::cerr << s << std::endl; }; 
+    exception(std::stringstream& s) { std::cerr << s.str() << std::endl; }; 
+  };
+
+  typedef double (igrid::*transform_t)(double) const;
+
+  // structure to store the x<->y transform pairs for 
+  struct transform_vec {
+    transform_vec() : mfx(0), mfy(0) { } 
+    transform_vec( transform_t __fx, transform_t __fy) : mfx(__fx), mfy(__fy) { } 
+    transform_t mfx;
+    transform_t mfy;
+  };
+  
+
+  struct conv_param {
+
+    /// input parameters to the thread
+
+    conv_param() :
+      dsigma(0),
+      dsigmaNLO(0),
+      dsigmaNNLO(0)
+    { }
+
+    int     lo_order;  
+    int     _nloop; 
+
+    double  rscale_factor;
+    double  fscale_factor;
+    double  Escale0;
+    double  Escale1;
+
+    double* sig;
+    double* H;
+    double* HA;
+    double* HB;
+    // split gen pdf at NNLO
+    double* HA2;
+    double* HB2;
+    double* HAB;
+ 
+    appl_pdf* genpdf;
+
+    NodeCache* pdf0;    
+    NodeCache* pdf1;
+
+    double (*alphas)(const double& );
+
+    /// output from thread
+
+    double dsigma;
+    double dsigmaNLO;
+    double dsigmaNNLO;
+
+  };
+
+
+
+public:
+
+  igrid();
+
+  igrid(int NQ2,   double Q2min=10000.0, double Q2max=25000000.0,  int Q2order=5,  
+	int Nx=50, double xmin=1e-5,     double xmax=0.9,          int xorder=5, 
+	std::string transform="f",       std::string qtransform="h0", 
+	int Nproc=6, bool disflag=false);
+
+  igrid(const igrid& g);
+
+#ifdef USEROOT
+  // read grid from stored file
+  igrid( TFile& f, const std::string& s );
+#endif
+
+  // read grid from stored file
+  igrid( appl::file& f, const std::string& s );
+
+  ~igrid();
+
+  // optimise the grid dinemsions  
+  void optimise(int extrabins=1) { optimise(m_Ntau, m_Ny1, m_Ny2, extrabins ); }
+  void optimise(int NQ2, int Nx1, int Nx2, int extrabins=1);  
+ 
+  // formatted print
+  std::ostream&  print(std::ostream& s=std::cout) const {
+    header(std::cout);
+    for ( int i=0 ; i<m_Nproc ; i++ ) { 
+      s << "sub process " << i << std::endl;
+      s << "Nx1: " << Ny1() << "\tNx2: " << Ny2() << "\tNs: " << Ntau() << std::endl;  
+      m_weight[i]->print();
+    }
+    return s;
+  }
+
+
+  // debug print
+  std::ostream&  debug(std::ostream& s=std::cout) const;
+  
+  // return the number of words used for storage
+  int size() const {
+    int _size = 0;
+    for ( int i=0 ; i<m_Nproc ; i++ ) _size += m_weight[i]->size();
+    return _size;
+  }
+
+  // trim unfilled elements
+  void trim() { for ( int i=0 ; i<m_Nproc ; i++ )  m_weight[i]->trim(); }
+
+  // inflate unfilled elements
+  void untrim() { for ( int i=0 ; i<m_Nproc ; i++ ) m_weight[i]->untrim(); }
+
+  // write to the current root directory
+  void write(const std::string& name);
+
+  void write( appl::file& _file, const std::string& name);
+  
+  // update grid with one set of event weights
+  void fill(const double x1, const double x2, const double Q2, const double* weight);
+
+  // update grid with one set of event weights
+  void fill_DIS(const double x1, const double Q2, const double* weight);
+
+  // fast filling with no interpolation for optimisation
+  void fill_phasespace(const double x1, const double x2, const double Q2, const double* weight);
+
+  // fast filling with no interpolation for optimisation
+  void fill_DIS_phasespace(const double x1, const double Q2, const double* weight);
+
+  void fill_index(const int ix1, const int ix2, const int iQ2, const double* weight);
+
+  // get the sparse structure for easier access  
+  const SparseMatrix3d* weightgrid(int ip) const { return m_weight[ip]; }
+  SparseMatrix3d**      weightgrid()              { return m_weight; } 
+
+
+  // this section stores the available x<->y transforms.
+  // the function pairs are stored in a std::map with a (const std::string) tag - the tag can 
+  // be saved to a root file to uniquely identify the transform pair.
+  // additional user defined transform pairs can *no longer* be added to the std::map since now
+  // only local class functions can be used
+
+  // transform 
+  double fy(double x) const { return (this->*mfy)(x); }
+  double fx(double y) const { return (this->*mfx)(y); }
+
+  std::string transform()  const { return m_transform; } 
+  std::string qtransform() const { return m_qtransform; } 
+
+
+  transform_t mfy;
+  transform_t mfx;
+
+  // initialise the transform map - no longer shared between class members
+  void init_fmap() { 
+    if ( m_fmap.size()==0 ) { 
+      /// x, y transforms
+      add_transform( "f",  &igrid::_fx,  &igrid::_fy  );
+      add_transform( "f0", &igrid::_fx0, &igrid::_fy0  );
+      add_transform( "f1", &igrid::_fx1, &igrid::_fy1  );
+      add_transform( "f2", &igrid::_fx2, &igrid::_fy2  );
+      add_transform( "f3", &igrid::_fx3, &igrid::_fy3  );
+      add_transform( "f4", &igrid::_fx4, &igrid::_fy4  );
+      /// Q2, tau tranforms
+      add_transform( "h0", &igrid::_fQ20, &igrid::_ftau0  );
+      add_transform( "h1", &igrid::_fQ21, &igrid::_ftau1  );
+    }
+  }
+
+  void clear_fmap() { m_fmap.clear(); }
+
+  /// add a transform
+  void add_transform(const std::string transform, transform_t __fx, transform_t __fy ) { 
+    if ( m_fmap.find(transform)!=m_fmap.end() ) { 
+      throw exception("igrid::add_fmap() transform "+transform+" already in std::map");
+    }
+    m_fmap[transform] = transform_vec( __fx, __fy );
+  }
+
+  void set_transforms( const std::string& t, transform_t& _x, transform_t& _y ) { 
+    if ( m_fmap.find(t)==m_fmap.end() ) throw exception("igrid::igrid() transform " + t + " not found\n");
+    _x = m_fmap.find(t)->second.mfx;
+    _y = m_fmap.find(t)->second.mfy;
+  }
+
+  // define all these so that ymin=fy(xmin) rather than ymin=fy(xmax)
+  double _fy(double x) const { return std::log(1/x-1); } 
+  double _fx(double y) const { return 1/(1+std::exp(y)); } 
+
+  double _fy0(double x) const { return -std::log(x); } 
+  double _fx0(double y) const { return  std::exp(-y); } 
+
+  double _fy1(double x) const { return std::sqrt(-std::log(x)); } 
+  double _fx1(double y) const { return std::exp(-y*y); } 
+
+  double _fy2(double x) const { return -std::log(x)+m_transvar*(1-x); }
+  double _fx2(double y) const {
+    // use Newton-Raphson: y = ln(1/x)
+    // solve   y - yp - a(1 - exp(-yp)) = 0
+    // deriv:  - 1 -a exp(-yp)
+
+    if ( m_transvar==0 )  return std::exp(-y); 
+        
+    const double eps  = 1e-12;  // our accuracy goal
+    const int    imax = 100;    // for safety (avoid infinite loops)
+    
+    double yp = y;
+    double x, delta, deriv;
+    for ( int iter=imax ; iter-- ; ) {
+      x = std::exp(-yp);
+      delta = y - yp - m_transvar*(1-x);
+      if ( std::fabs(delta)<eps ) return x; // we have found good solution
+      deriv = -1 - m_transvar*x;
+      yp  -= delta/deriv;
+    }
+    // exceeded maximum iterations 
+    std::cerr << "_fx2() iteration limit reached y=" << y << std::endl; 
+    //    std::cout << "_fx2() iteration limit reached y=" << y << std::endl; 
+    return std::exp(-yp); 
+  }
+  
+  double _fy3(double x) const { return std::sqrt(-std::log10(x)); }
+  double _fx3(double y) const { return std::pow(10,-y*y); } 
+
+  // fastnlo dis transform
+  double _fy4(double x) const { return -std::log10(x); }
+  double _fx4(double y) const { return  std::pow(10,-y); } 
+
+
+  
+
+  // pdf weight function
+  // double _fun(double x) { return pow(x,0.65)*pow((1-0.99*x),-3.3); } 
+  // double _fun(double x) { double n=(1-0.99*x); return sqrt(x)/(n*n*n); } 
+  // double _fun(double x) { return 1; } 
+  
+  // this is significantly quicker than pow(x,1.5)*pow(1-0.99*x,3) 
+  static double _fun(double x)      { double n=(1-0.99*x); return std::sqrt(x*x*x)/(n*n*n); } 
+  static double weightfun(double x) { return _fun(x); }
+  
+
+
+  transform_t mftau;
+  transform_t mfQ2;
+
+  double ftau(double Q2) const { return (this->*mftau)(Q2); }
+  double fQ2(double tau) const { return (this->*mfQ2)(tau); }
+
+
+  /// applgrid transform
+  double _ftau0(double Q2) const { return std::log(std::log(Q2/m_lambda)); }
+  double _fQ20(double tau) const { return m_lambda*std::exp(std::exp(tau)); }
+
+  /// fastnlo transforms
+  double _ftau1(double Q2) const { return std::log(std::log(std::sqrt(Q2)/m_lambda)); }
+  double _fQ21(double tau) const { 
+    double q = m_lambda*std::exp(std::exp(tau));
+    return q*q;
+  }
+
+
+
+   
+  // grid value accessors
+  double gety1(int iy)    const { return m_weight[0]->yaxis()[iy]; }   
+  double gety2(int iy)    const { return m_weight[0]->zaxis()[iy]; }   
+  //  double gety(int iy)     const { return gety1(iy); } 
+
+  double gettau(int itau) const { return m_weight[0]->xaxis()[itau]; } 
+
+  // number of subprocesses
+
+  int SubProcesses() const { return m_Nproc; } 
+
+  // kinematic variable accessors
+  // y (x) 
+  int    Ny1()      const { return m_weight[0]->yaxis().N(); }    
+  double y1min()    const { return m_weight[0]->yaxis().min(); }  
+  double y1max()    const { return m_weight[0]->yaxis().max(); }  
+  double deltay1()  const { return m_weight[0]->yaxis().delta(); }
+
+  int    Ny2()      const { return m_weight[0]->zaxis().N(); }    
+  double y2min()    const { return m_weight[0]->zaxis().min(); }  
+  double y2max()    const { return m_weight[0]->zaxis().max(); }  
+  double deltay2()  const { return m_weight[0]->zaxis().delta(); }
+
+  //  int    Ny()      const { return Ny1(); } 
+  //  double ymin()    const { return y1min(); }  
+  //  double ymax()    const { return y2min(); }  
+  //  double deltay()  const { return deltay1(); }  
+
+  //  int yorder(int i)   { return m_yorder=i; }  
+  int yorder()  const { return m_yorder; }  
+
+  // tau (Q2)
+  int    Ntau()     const { return m_weight[0]->xaxis().N(); } 
+  double taumin()   const { return m_weight[0]->xaxis().min(); } 
+  double taumax()   const { return m_weight[0]->xaxis().max(); } 
+  double deltatau() const { return m_weight[0]->xaxis().delta(); } 
+  
+  //  int tauorder(int i)  { return m_tauorder=i; }  
+  int tauorder() const { return m_tauorder; }  
+
+
+  // maybe these are redundant and should be removed
+  int    getNQ2()     const { return m_weight[0]->xaxis().N(); }
+  double getQ2min()   const { return fQ2(taumin()); }
+  double getQ2max()   const { return fQ2(taumax()); }
+  
+  int    getNx1()     const { return m_weight[0]->yaxis().N(); }
+  double getx1min()   const { return fx(y1max()); }
+  double getx1max()   const { return fx(y1min()); }
+
+  int    getNx2()     const { return m_weight[0]->zaxis().N(); }
+  double getx2min()   const { return fx(y2max()); }
+  double getx2max()   const { return fx(y2min()); }
+
+  /// limits on the *actual* filled nodes of the grids
+  int  itaufilledmin() const { return m_taufilledmin; }
+  int  itaufilledmax() const { return m_taufilledmax; }
+
+  int  iy1filledmin() const { return m_y1filledmin; }
+  int  iy1filledmax() const { return m_y1filledmax; }
+
+  int  iy2filledmin() const { return m_y2filledmin; }
+  int  iy2filledmax() const { return m_y2filledmax; }
+
+  /// tranformed the grid values back to x and Q2 limits
+  double x1filledmin() const { return getx1(iy1filledmax()); }
+  double x1filledmax() const { return getx1(iy1filledmin()); }
+
+  double x2filledmin() const { return getx2(iy2filledmax()); }
+  double x2filledmax() const { return getx2(iy2filledmin()); }
+
+  double Q2filledmin() const { return getQ2(itaufilledmax()); } 
+  double Q2filledmax() const { return getQ2(itaufilledmin()); } 
+	
+  /// transforms from grid node, to actual Q2 or x1, x2 values 
+  double getQ2( int itau ) const { return  fQ2(gettau(itau)); }
+
+  double getx1( int iy ) const { return fx(gety1(iy)); }
+
+  double getx2( int iy ) const { return fx(gety2(iy)); }
+
+  void nodesx1() const { for ( int i=0 ; i<getNx1() ; i++ ) std::cout << i << "\t" << getx1(i) << "\n"; std::cout << std::endl; }
+  void nodesx2() const { for ( int i=0 ; i<getNx2() ; i++ ) std::cout << i << "\t" << getx2(i) << "\n"; std::cout << std::endl; }
+  void nodesQ2() const { for ( int i=0 ; i<getNQ2() ; i++ ) std::cout << i << "\t" << getQ2(i) << "\n"; std::cout << std::endl; }
+
+  /// these set the static class used to initialise the 
+  /// local variables upon grid creation, since a value may 
+  /// be needed by the constructor  
+  static double transformvar()         { return transvar; }
+  static double transformvar(double v) { return transvar=v; }
+
+  static double lambdavar()            { return lambda; }
+  static double lambdavar(double v)    { return lambda=v; }
+
+  bool   symmetrise(bool t=true)   { return m_symmetrise=t; }
+  bool   isSymmetric() const       { return m_symmetrise; }
+
+  bool   isOptimised() const       { return m_optimised; }
+  bool   setOptimised(bool t=true) { return m_optimised=t; } 
+
+  bool   isDISgrid() const        { return m_DISgrid; }
+  bool   setDISgrid(bool t=true)  { return m_DISgrid=t; } 
+
+  bool   reweight(bool t=true)   { return m_reweight=t; }
+
+  bool   shrink(const std::vector<int>& keep);
+
+  bool   combine_proc(int i, int j);
+
+  bool   remove(int i);
+
+  // setup the pdf grid for calculating the pdf using 
+  // interpolation - needed if you actually want 
+  // the interpolated values for the pdf's or if you want 
+  // the grid to calculate the cross section for you
+  void setuppdf(double (*alphas)(const double&),
+		//		void (*pdf0)(const double& , const double&, double* ),
+		//		void (*pdf1)(const double& , const double&, double* )=0,
+		NodeCache* pdf0,
+		NodeCache* pdf1=0,
+		int nloop=0, 
+		double rscale_factor=1,
+		double fscale_factor=1,
+		double beam_scale0=1,
+		double beam_scale1=1 );
+
+  // get the interpolated pdf's
+  //  void pdfinterp(double x1, double Q2, double* f);
+
+
+  void convolute_internal(); 
+
+  double convolute(NodeCache* pdf0,
+		   NodeCache* pdf1,
+		   // void   (*pdf0)(const double& , const double&, double* ), 
+		   // void   (*pdf1)(const double& , const double&, double* ), 
+		   appl_pdf* genpdf, 
+		   double (*alphas)(const double& ), 
+		   int     lo_order=0,  
+		   int     nloop=0, 
+		   double  rscale_factor=1,
+		   double  fscale_factor=1,
+		   double Escale0=1,
+		   double Escale1=1 );
+  
+
+  void amc_convolute_internal();
+
+  
+  /// convolute method for amcatnlo grids
+  double amc_convolute(NodeCache* pdf0,
+		       NodeCache* pdf1,
+		       // void   (*pdf0)(const double& , const double&, double* ), 
+		       // void   (*pdf1)(const double& , const double&, double* ), 
+		       appl_pdf* genpdf, 
+ 		       double (*alphas)(const double& ), 
+ 		       int     lo_order=0,  
+ 		       int     nloop=0, 
+ 		       double  rscale_factor=1,
+ 		       double  fscale_factor=1,
+ 		       double Escale0=1,
+ 		       double Escale1=1 );
+  
+  
+
+  // some useful algebraic operators
+  igrid& operator=(const igrid& g); 
+  
+  igrid& operator*=(const double& d) { 
+    for ( int ip=0 ; ip<m_Nproc ; ip++ ) if ( m_weight[ip] ) (*m_weight[ip]) *= d; 
+    return *this;
+  } 
+
+  // should really check all the limits and *everything* is the same
+  igrid& operator+=(const igrid& g);
+
+
+  /// check that the grid axes match
+  bool compare_axes(const igrid& g) const { 
+    for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+      if ( m_weight[ip] && g.m_weight[ip] ) { 
+	if ( !m_weight[ip]->compare_axes( *g.m_weight[ip] ) ) return false;
+	// if ( (*m_weight[ip]) != (*g.m_weight[ip]) )  return false;
+      }
+      if ( m_weight[ip]    && g.m_weight[ip]==0 ) return false; 
+      if ( m_weight[ip]==0 && g.m_weight[ip]    ) return false; 
+    }
+    return true;
+  }
+
+
+  bool operator==(const igrid& g) const { 
+    for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+      if ( m_weight[ip] && g.m_weight[ip] ) { 
+	if ( (*m_weight[ip]) != (*g.m_weight[ip]) ) return false;
+      }
+      if ( m_weight[ip]    && g.m_weight[ip]==0 ) return false; 
+      if ( m_weight[ip]==0 && g.m_weight[ip]    ) return false; 
+    }
+    return true;
+  } 
+
+  bool operator!=(const igrid& g) const { return !((*this)==g); }
+
+
+  // ouput header
+  std::ostream& header(std::ostream& s) const;
+
+  double xsec()     const { return m_conv_param.dsigma; }
+  double xsecNLO()  const { return m_conv_param.dsigmaNLO; }
+  double xsecNNLO() const { return m_conv_param.dsigmaNNLO; }
+
+
+  /// is this grid actually empty
+
+  bool empty() const {
+    if ( m_weight==0 ) return true;
+    for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+      if ( m_weight[ip] && !m_weight[ip]->empty() ) return false; 
+    }
+    return true;
+  }
+
+  void setlimits();
+
+  //  double&       weights()       { return m_weights; }
+  //  const double& weights() const { return m_weights; }
+
+  void add( igrid* g );
+
+  void merge( igrid* g );
+
+  void remap( appl::igrid* g ) const;
+
+  /// zero the grid
+  void clear(); 
+ 
+private:
+
+  // internal common construct for the different types of constructor
+  void construct();
+
+  // cleanup
+  void deleteweights();
+  void deletepdftable();
+
+  // interpolation section - inline and static internals for calculation of the 
+  // interpolation for storing on the grid nodes 
+
+  // x (y) interpolation formula
+  int fk1(double x) const {
+    double y = fy(x);
+    // make sure we are in the range covered by our binning
+    if( ( y<y1min() || y>y1max() ) && range_count<range_count_max ) { 
+
+#if 0
+      if ( y<y1min() ) std::cerr <<"\tWarning: x1 out of range: x=" << x << "\t(y=" << y << ")\tBelow Delx=" << x-fx(y1min());
+      else             std::cerr <<"\tWarning: x1 out of range: x=" << x << "\t(y=" << y << ")\tAbove Delx=" << x-fx(y1min());
+
+      std::cerr << " ( " <<  fx(y1max())  << " - " <<  fx(y1min())  << " )"  
+		<< "\ty=" << y << "\tDely=" << y-y1min() << " ( " << y1min() << " - " <<  y1max()  << " )" << std::endl;
+#endif
+
+      std::cerr <<"\tWarning: x1 out of range: x=" << x 
+		<< "\t[ " <<  fx(y1max())  << " - " <<  fx(y1min())  << " ]";
+      
+      if ( y<y1min() ) std::cerr << "\tDelta x=" << x-fx(y1min()) << " above";
+      else             std::cerr << "\tDelta x=" << fx(y1max())-x << " below";
+      
+      std::cerr	 << "\ty=" << y << "\t[ " << y1min() << " - " <<  y1max()  << " ]" 
+		 << "\tDelta y=" << ( y<y1min() ? y1min()-y : y-y1max() ) << std::endl;
+  
+      range_count++;
+	
+      //     cerr << "\t" << m_weight[0]->yaxis() << "\n\t" 
+      //          << m_weight[0]->yaxis().transform(fx) << std::endl; 
+    }
+    int k = (int)((y-y1min())/deltay1() - (m_yorder>>1)); // fast integer divide by 2
+    if ( k<0 ) k=0;  
+    // shift interpolation end nodes to enforce range
+    if(k+m_yorder>=Ny1())  k=Ny1()-1-m_yorder;    
+    return k;
+  }
+
+  int fk2(double x) const {
+    double y = fy(x);
+    // make sure we are in the range covered by our binning
+    if( (y<y2min() || y>y2max()) && range_count<range_count_max ) { 
+      
+#if 0
+      if ( y<y2min() ) std::cerr <<"\tWarning: x2 out of range: x=" << x << "\t(y=" << y << ")\tBelow Delx=" << x-fx(y2min());
+      else             std::cerr <<"\tWarning: x2 out of range: x=" << x << "\t(y=" << y << ")\tAbove Delx=" << x-fx(y2min());
+      std::cerr << " ( " <<  fx(y2max())  << " - " <<  fx(y2min())  << " )"  
+		<< "\ty=" << y << "\tdely=" << y-y2min() << " ( " << y2min() << " - " <<  y2max()  << " )" << std::endl;
+      //     cerr << "\t" << m_weight[0]->yaxis() << "\n\t" << m_weight[0]->yaxis().transform(fx) << std::endl; 
+#endif
+      
+      std::cerr <<"\tWarning: x2 out of range: x=" << x 
+		<< "\t[ " <<  fx(y2max())  << " - " <<  fx(y2min())  << " ]";
+      
+      if ( y<y2min() ) std::cerr << "\tDelta x=" << x-fx(y2min()) << " above";
+      else             std::cerr << "\tDelta x=" << fx(y2max())-x << " below";
+      
+      std::cerr	 << "\ty=" << y << "\t[ " << y2min() << " - " <<  y2max()  << " ]" 
+		 << "\tDelta y=" << ( y<y2min() ? y2min()-y : y-y2max() ) << std::endl;
+     
+      range_count++;
+      
+    }
+    int k = (int)((y-y2min())/deltay2() - (m_yorder>>1)); // fast integer divide by 2
+    if ( k<0 ) k=0;  
+    // shift interpolation end nodes to enforce range
+    if(k+m_yorder>=Ny2())  k=Ny2()-1-m_yorder;    
+    return k;
+  }
+
+  
+  // Q2 (tau) interpolation formula 
+  int fkappa(double Q2) const {
+    double tau = ftau(Q2);
+    // make sure we are in the range covered by our binning
+    if( ( tau<taumin() || tau>taumax() ) && range_count<range_count_max ) { 
+      std::cerr << "\tWarning: Q2 out of range Q2=" << Q2 
+		<< "\t ( " << fQ2(taumin()) << " - " << fQ2(taumax()) << " )" << std::endl;
+      //      cerr << "\t" << m_weight[0]->xaxis() << "\n\t" << m_weight[0]->xaxis().transform(fQ2) << std::endl;
+
+      range_count++;
+    }
+    int kappa = (int)((tau-taumin())/deltatau() - (m_tauorder>>1)); // fast integer divide by 2
+    // shift interpolation end nodes to enforce range
+    if(kappa+m_tauorder>=Ntau()) kappa=Ntau()-1-m_tauorder;  
+    if(kappa<0) kappa=0;
+    return kappa;
+  }
+  
+  //  int _fk(double x)      const { return fk(x);  }
+  //  int _fkappa(double Q2) const { return fkappa(Q2); }
+
+  // fast -1^i function
+  static int pow1(int i) { return ( 1&i ? -1 : 1 ); } 
+
+  // fast factorial
+  static double fac(int i) {
+    int j;
+    static int ntop = 4;
+    static double f[34] = { 1, 1, 2, 6, 24 }; 
+    if ( i<0 )  {  std::cerr << "igrid::fac() negative input"  << std::endl; return 0;  }
+    if ( i>33 ) {  std::cerr << "igrid::fac() input too large" << std::endl; return 0;  }
+    while ( ntop<i ) {
+      j=ntop++;
+      f[ntop]=f[j]*ntop;
+    } 
+    return f[i];
+  }
+
+  // although not the best way to calculate interpolation coefficients,
+  // it may be the best for our use, where the "y" values of the nodes 
+  // are not yet defined at the time of evaluation.
+  static double fI(int i, int n, double u) {
+    if ( n==0 && i==0 )     return 1.0;
+    if ( std::fabs(u-i)<1e-8 ) return 1.0;
+    //    if ( std::fabs(u-n)<u*1e-8 ) return 1.0;
+    double product = pow1(n-i) / ( fac(i)*fac(n-i)*(u-i) );
+    for( int z=0 ; z<=n ; z++ )  product *= (u-z);
+    return product;
+  }
+  
+public:
+
+  void setparent( grid* parent ) { m_parent=parent; }
+
+  grid* parent() const { return m_parent; }
+
+  void include_photon( bool b ) { m_partons=( b ? 14 : 13 ); }
+ 
+private:
+
+  /// parent grid so that it can access parent 
+  /// grid paremeters
+  grid* m_parent;
+
+  // ranges of interest
+
+  // x <-> y parameters
+  int    m_Ny1; 
+  double m_y1min; 
+  double m_y1max;
+  double m_deltay1;
+
+  int    m_Ny2; 
+  double m_y2min; 
+  double m_y2max;
+  double m_deltay2;
+
+  int    m_yorder; 
+
+  // tau <-> Q2 parameters
+  int    m_Ntau; 
+  double m_taumin; 
+  double m_taumax;
+  double m_deltatau;
+  int    m_tauorder; 
+
+  int    m_Nproc;   // number of subprocesses
+
+  // grid state information
+
+  // useful transform information for storage in root file 
+  std::string                   m_transform;
+  std::string                   m_qtransform;
+
+  std::map<const std::string, transform_vec> m_fmap;
+
+  static double    transvar;   // transform function parameter
+  double         m_transvar; // local copy transform function parameter
+
+  double         m_lambda; // Q2 transform variable
+  static double    lambda; // Q2 transform variable
+
+  /// *don't* make m_reweight static!!! otherwise we can't mix 
+  ///  reweighted and non-reweighted grids!!! 
+  bool   m_reweight;    // reweight the pdf?
+  
+  bool   m_symmetrise;   // symmetrise the grid or not 
+  bool   m_optimised;    // optimised?
+
+  // the actual weight grids
+  SparseMatrix3d**   m_weight;
+
+  // pdf value table for convolution 
+  // (NB: doesn't need to be a class variable)
+
+  std::vector< std::vector< std::vector<double> > > m_fg1;
+  std::vector< std::vector< std::vector<double> > > m_fg2;
+
+  // values from the splitting functions
+  std::vector< std::vector< std::vector<double> > > m_fsplit1;
+  std::vector< std::vector< std::vector<double> > > m_fsplit2;
+
+  // values from the splitting functions at NNLO
+  std::vector< std::vector< std::vector<double> > > m_fsplit12;
+  std::vector< std::vector< std::vector<double> > > m_fsplit22;
+
+  // alpha_s table
+  std::vector<double>  m_alphas;
+
+
+  // flag to emulate a 2d (Q2, x) grid of use the 
+  // full 3d (Q2, x1, x2) grid
+  bool m_DISgrid;
+
+  /// actual number of weights for this igrid - needed so that
+  /// we can independently weight different bins within a grid  
+
+private:
+
+  /// communication with internal process
+  conv_param m_conv_param;
+
+  /// limits on *actual* *filled* occupancy for this grid 
+  /// NOT just the limits of the grid itself, but the actual
+  /// limits of the filled portion as *indices* into the 
+  /// grid rather than actual values
+ 
+  /// NB:  calculate these when reading in grid, not defined 
+  ///      while filling grid 
+  
+  int m_taufilledmin;
+  int m_taufilledmax;
+
+  int  m_y1filledmin;
+  int  m_y1filledmax;
+
+  int  m_y2filledmin;
+  int  m_y2filledmax;
+
+  int  m_partons; 
+
+public:
+
+  static void disable_threads(bool b=true) { threads_disabled = b; } 
+
+private: 
+
+  static bool threads_disabled;
+
+private: 
+
+  static int range_count;
+  static int range_count_max;
+  
+public:
+
+  static void set_range_count_limit() { 
+    char* crange_count = std::getenv( "APPL_RANGE_COUNT_LIMIT" );
+    if ( crange_count ) { 
+      range_count_max = std::atoi(crange_count);
+      std::cout << "appl::grid: setting out of range printout limit to " << range_count_max << std::endl;  
+    }
+  }
+
+};
+
+};
+
+std::ostream& operator<<(std::ostream& s, const appl::igrid& mygrid);
+
+
+#endif // __APPL_IGRID_H 
Index: applgrid/tags/applgrid-01-06-36/src/appl_grid.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/appl_grid.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/appl_grid.cxx	(revision 2010)
@@ -0,0 +1,4704 @@
+/**
+ **     @file    appl_grid.cxx
+ **
+ **     @brief   grid class - all the functions needed to create and 
+ **              fill the grid from an NLO calculation program. 
+ **
+ **     @author  mark sutton
+ **     @date    2007/10/16 17:01:39 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: appl_grid.cxx, v1.00 2007/10/16 17:01:39 sutt $
+ **
+ **/
+
+#include <cstdlib>
+#include <stdio.h>
+#include <sys/stat.h>
+
+#include <vector>
+#include <map>
+#include <set>
+#include <iostream>
+#include <fstream>
+#include <iomanip>
+#include <cmath>
+
+#include "appl_grid/appl_pdf.h"
+#include "appl_grid/appl_timer.h"
+#include "appl_grid/Directory.h"
+#include "appl_grid/appl_grid.h"
+#include "appl_grid/appl_file.h"
+#include "appl_grid/vector_stream.h"
+#include "appl_grid/stream_vector.h"
+
+#include "appl_grid/generic_pdf.h"
+#include "appl_grid/lumi_pdf.h"
+
+#include "appl_igrid.h"
+#include "Cache.h"
+
+#ifdef USEROOT
+
+#include "TFileString.h"
+#include "TFileVector.h"
+
+#include "TFile.h"
+#include "TObjString.h"
+#include "TVectorT.h"
+
+#endif
+
+#include "amconfig.h"
+
+#include "Splitting.h"
+
+#include "appl_grid/appl_TH1D.h"
+
+/// in case of non c++11 compliant compilation, define
+/// some c++11 type replacemant functions
+
+#if __cplusplus <= 199711L
+// #warning Not using C++11 compilation - using substitute functions
+// Fixme: check whether this still works any more - many exception handling changes
+// are not compatible between versions, also need to check other c++11 extensions 
+
+namespace std {
+
+int stoi( const std::string s ) { return atoi(s.c_str()); }
+
+std::string to_string( int i ) {
+    char a[16];
+    sprintf( a, "%d", i );
+    return a;
+}
+
+
+std::string to_string( double f ) {
+    char a[178];
+    sprintf( a, "%lf", f );
+    return a;
+}
+
+}
+
+#endif
+
+
+
+
+static bool appl_first = true;
+
+
+/// this is a compatability flag for persistent versions 
+/// of the grid
+/// NB: ONLY change the major version if the persistent 
+///     class changes in a non-backwards compatible way.
+
+// const std::string appl::grid::m_version = "version-3.3";
+const std::string appl::grid::m_version = PACKAGE_VERSION;
+
+std::string appl::grid::appl_version() const { return PACKAGE_VERSION; }
+
+#include "hoppet_init.h"
+
+#ifdef HAVE_HOPPET
+
+#include "hoppet_v1.h"
+
+// include hoppet splitting function code
+
+static appl::hoppet_init* hoppet = 0;
+
+void Splitting(const double& x, const double& Q, double* xf, int nLoops) {
+  //  static const int nLoops    = 1;
+  static const int nFlavours = 5;
+  hoppetevalsplit_( x, Q, nLoops, nFlavours, xf); 
+  return;
+}
+
+#else
+
+void Splitting(const double& , const double& , double* , int ) {
+  throw appl::grid::exception( "hoppet library not included - cannot call splitting function"  ); 
+  return; // technically, don't need this - should throw an exception
+}
+
+#endif
+
+
+#define appl_line() std::cout << __FILE__ << " " << __LINE__ << std::endl;
+
+
+
+/// helper function
+static bool contains(const std::string& s, const std::string& reg ) { 
+  return s.find(reg)!=std::string::npos;
+}
+
+/// make sure pdf std::map is initialised
+// bool pdf_ready = appl::appl_pdf::create_map(); 
+
+std::string appl::compiled() {  return __DATE__; }
+std::string appl::version()  { return VERSION; }
+
+
+/// current time
+std::string appl::date() { 
+  time_t _t;
+  time(&_t);
+  std::string a = ctime(&_t);
+  std::string b = "";
+  for ( unsigned i=0 ; i<a.size()-1 ; i++ ) b+=a[i];
+  return b;
+}
+
+
+/// simple test if a file exists
+bool appl::file_exists(const std::string& s) {
+  struct stat sb;
+  if ( stat( s.c_str(), &sb)==0 ) return true; // && S_ISREG(sb.st_mode ))
+  else return false;
+}
+
+
+
+
+appl::grid::grid(int NQ2, double Q2min, double Q2max, int Q2order, 
+		 int Nx,  double xmin,  double xmax,  int xorder,
+		 int Nobs,  double obsmin, double obsmax, 
+		 std::string genpdfname,
+		 int leading_order, int _nloops, 
+		 std::string transform,
+		 std::string qtransform ) :
+  m_leading_order(leading_order), m_order(_nloops+1), 
+  m_run(0), m_optimised(false), m_trimmed(false), m_normalised(false), m_symmetrise(false), 
+  m_transform(transform), m_qtransform(qtransform), 
+  m_genpdfname(genpdfname), m_cmsScale(0), m_dynamicScale(0),
+  m_applyCorrections(false),
+  m_documentation(""),
+  m_type(STANDARD),
+  m_read(false),
+  m_subproc(-1),
+  m_bin(-1),
+  m_genwithpdf(""),
+  m_photon(false),
+  m_isDIS(false),
+  m_use_duplicates(false)
+{
+  // Initialize histogram that saves the correspondence obsvalue<->obsbin
+  m_ref=new appl::TH1D("referenceInternal","Bin-Info for Observable", Nobs, obsmin, obsmax);
+  m_ref->SetDirectory(0);
+  m_ref->Sumw2(); /// grrr root is so rubbish - not scaling errors properly
+
+  m_ref_combined = m_ref;
+
+  /// check to see if we require a generic pdf from a text file, and 
+  /// and if so, create the required generic pdf
+  //  if      ( m_genpdfname.find(".dat")!=std::string::npos ) addpdf(m_genpdfname);
+  //  else if ( m_genpdfname.find(".dat")!=std::string::npos ) addpdf(m_genpdfname);
+  if ( contains(m_genpdfname, ".dat") ||  contains(m_genpdfname, ".config") ) addpdf(m_genpdfname);
+  findgenpdf( m_genpdfname );
+
+  construct(Nobs, NQ2, Q2min, Q2max, Q2order, Nx, xmin, xmax, xorder, m_order, m_transform, m_qtransform ); 
+}
+
+
+
+
+appl::grid::grid(int Nobs, const double* obsbins, 
+		 int NQ2,  double Q2min, double Q2max, int Q2order, 
+		 int Nx,   double xmin, double xmax,   int xorder, 
+		 std::string genpdfname, 
+		 int leading_order, int _nloops, 
+		 std::string transform,
+		 std::string qtransform,
+		 bool isdis ) :
+  m_leading_order(leading_order), m_order(_nloops+1), 
+  m_run(0), m_optimised(false), m_trimmed(false),  m_normalised(false), m_symmetrise(false),
+  m_transform(transform),  m_qtransform(qtransform), 
+  m_genpdfname(genpdfname), m_cmsScale(0), m_dynamicScale(0),
+  m_applyCorrections(false),
+  m_documentation(""),
+  m_type(STANDARD),
+  m_read(false),
+  m_subproc(-1),
+  m_bin(-1),
+  m_genwithpdf(""),
+  m_photon(false),
+  m_isDIS(isdis),
+  m_use_duplicates(false)
+{
+  
+  // Initialize histogram that saves the correspondence obsvalue<->obsbin
+  m_ref=new appl::TH1D("referenceInternal","Bin-Info for Observable", Nobs, obsbins);
+  m_ref->SetDirectory(0);
+  m_ref->Sumw2(); /// grrr root is so rubbish - not scaling errors properly
+
+  m_ref_combined = m_ref;
+
+  /// check to see if we require a generic pdf from a text file, and 
+  /// and if so, create the required generic pdf
+  //  if ( m_genpdfname.find(".dat")!=std::string::npos ) addpdf(m_genpdfname);
+  if ( contains(m_genpdfname, ".dat") ||  contains(m_genpdfname, ".config") ) addpdf(m_genpdfname);
+
+  findgenpdf( m_genpdfname );
+
+  construct(Nobs, NQ2, Q2min, Q2max, Q2order, Nx, xmin, xmax, xorder, m_order, m_transform, m_qtransform, m_isDIS );
+}
+
+
+
+
+appl::grid::grid(const std::vector<double>& obs, 
+		 int NQ2, double Q2min, double Q2max, int Q2order,
+		 int Nx,  double xmin,  double xmax,  int xorder, 
+		 std::string genpdfname,
+		 int leading_order, int _nloops, 
+		 std::string transform,
+		 std::string qtransform ) :
+  m_leading_order(leading_order), m_order(_nloops+1), 
+  m_run(0), m_optimised(false), m_trimmed(false), m_normalised(false), m_symmetrise(false),  
+  m_transform(transform),  m_qtransform(qtransform), 
+  m_genpdfname(genpdfname), m_cmsScale(0), m_dynamicScale(0),
+  m_applyCorrections(false),
+  m_documentation(""),
+  m_type(STANDARD),
+  m_read(false),
+  m_subproc(-1),
+  m_bin(-1),
+  m_genwithpdf(""),
+  m_photon(false),
+  m_isDIS(false),
+  m_use_duplicates(false)
+{
+  
+  if ( obs.size()==0 ) { 
+    std::cerr << "grid::not enough bins in observable" << std::endl;
+    std::exit(0);
+  } 
+  
+  //  double* obsbins = new double[obs.size()];  
+  //  for ( unsigned i=0 ; i<obs.size() ; i++ ) obsbins[i] = obs[i];
+  //  int Nobs = obs.size()-1;
+
+  // Initialize histogram that saves the correspondence obsvalue<->obsbin
+  m_ref=new appl::TH1D("referenceInternal","Bin-Info for Observable", obs.size()-1, &obs[0] );
+  m_ref->SetDirectory(0);
+  m_ref->Sumw2(); /// grrr root is so rubbish - not scaling errors properly
+  //  delete[] obsbins;
+
+  m_ref_combined = m_ref;
+
+  /// check to see if we require a generic pdf from a text file, and 
+  /// and if so, create the required generic pdf
+  //   if ( m_genpdfname.find(".dat")!=std::string::npos ) addpdf(m_genpdfname);
+  if ( contains(m_genpdfname, ".dat") ||  contains(m_genpdfname, ".config") ) addpdf(m_genpdfname);
+  findgenpdf( m_genpdfname );
+
+  construct( obs.size()-1, NQ2, Q2min, Q2max, Q2order, Nx, xmin, xmax, xorder, m_order, m_transform, m_qtransform ); 
+}
+
+
+
+appl::grid::grid(const std::vector<double>& obs, 
+		 std::string genpdfname,
+		 int leading_order, int _nloops, 
+		 std::string transform,
+		 std::string qtransform )  :
+  m_leading_order(leading_order), m_order(_nloops+1), 
+  m_run(0), m_optimised(false), m_trimmed(false), m_normalised(false), m_symmetrise(false),  
+  m_transform(transform),  m_qtransform(qtransform), 
+  m_genpdfname(genpdfname), m_cmsScale(0), m_dynamicScale(0),
+  m_applyCorrections(false),  
+  m_documentation(""),
+  m_type(STANDARD),
+  m_read(false),
+  m_subproc(-1),
+  m_bin(-1),
+  m_genwithpdf(""),
+  m_photon(false),
+  m_isDIS(false),
+  m_use_duplicates(false)
+{ 
+  if ( obs.size()==0 ) { 
+    std::cerr << "grid::not enough bins in observable" << std::endl;
+    std::exit(0);
+  } 
+  
+  //  double* obsbins = new double[obs.size()];  
+  //  for ( unsigned i=0 ; i<obs.size() ; i++ ) obsbins[i] = obs[i];
+  //  int Nobs = obs.size()-1;
+
+  // Initialize histogram that saves the correspondence obsvalue<->obsbin
+  m_ref=new appl::TH1D("referenceInternal","Bin-Info for Observable", obs.size()-1, &obs[0] );
+  m_ref->SetDirectory(0);
+  m_ref->Sumw2(); /// grrr root is so rubbish - not scaling errors properly
+  //  delete[] obsbins;
+
+  m_ref_combined = m_ref;
+
+  /// check to see if we require a generic pdf from a text file, and 
+  /// and if so, create the required generic pdf
+  //  if ( m_genpdfname.find(".dat")!=std::string::npos ) addpdf(m_genpdfname);
+  if ( contains(m_genpdfname, ".dat") ||  contains(m_genpdfname, ".config") ) addpdf(m_genpdfname);
+  findgenpdf( m_genpdfname );
+
+  //  for ( int iorder=0 ; iorder<m_order ; iorder++ ) m_grids[iorder] = new igrid*[obs.size()-1];
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) m_grids[iorder].resize(obs.size());
+
+}
+
+
+
+#ifdef USEROOT
+appl::grid::grid( const std::string& filename, const std::string& dirname ) : 
+#else
+appl::grid::grid( const std::string& filename, const std::string& /* dirname */ ) : 
+#endif
+  m_leading_order(0),  m_order(0),
+  m_optimised(false),  m_trimmed(false), 
+  m_normalised(false),
+  m_symmetrise(false), m_transform(""),  m_qtransform(""), 
+  m_dynamicScale(0),
+  m_applyCorrections(false),
+  m_documentation(""),
+  m_type(STANDARD),
+  m_read(false),
+  m_subproc(-1),
+  m_bin(-1),
+  m_genwithpdf(""),
+  m_photon(false),
+  m_isDIS(false),
+  m_use_duplicates(false)
+{
+
+  if ( filename.find(".root")==(filename.size()-5) ) {
+#   ifdef USEROOT
+    construct_root( filename, dirname );
+#   else
+    std::cerr << "appl::grid() root not compiled in - cannot read root files" << std::endl;
+    std::exit(-1);
+#   endif
+  }
+  else construct_appl( filename );
+
+}
+
+
+#ifdef USEROOT
+void appl::grid::construct_root( const std::string& filename, const std::string& dirname ) { 
+
+  //  for ( int iorder=0 ; iorder<MAXGRIDS ; iorder++ ) m_refstore.m_ref_order[iorder] = 0;
+
+  if ( appl_first ) { 
+    appl_first = false;
+    std::cout << "appl::grid() version " << VERSION << " compiled " << __DATE__ << std::endl;
+    appl::igrid::set_range_count_limit();
+  }
+  
+  m_ref_combined = m_ref = 0;
+
+  struct timeval tstart = appl_timer_start();
+  
+  struct stat _fileinfo;
+  if ( stat(filename.c_str(),&_fileinfo) )   {    
+    throw exception( std::string("grid::grid() cannot find file ") + filename  ); 
+  }
+
+  std::cout << "appl::grid() reading grid from file " << filename;
+  
+  TFile* gridfilep = TFile::Open(filename.c_str());
+
+  if (gridfilep==0 ) {
+    std::cout << std::endl;
+    throw exception( std::string("grid::grid() cannot open root file: ") + filename );
+  }
+
+  if (gridfilep->IsZombie()) {
+    std::cout << std::endl;
+    delete gridfilep;
+    throw exception( std::string("grid::grid() cannot open root file: zombie ") + filename );
+  }
+
+  // TFile gridfile(filename.c_str());
+  
+  //  gDirectory->cd(dirname.c_str());
+
+  //  std::cout << "pwd=" << gDirectory->GetName() << std::endl;
+
+  //  gDirectory->cd(dirname.c_str());
+
+  //  std::cout << "pwd=" << gDirectory->GetName() << std::endl;
+
+  //  Directory d(dirname);
+  //  d.push();
+
+  TFileString* _tagsp = (TFileString*)gridfilep->Get((dirname+"/Tags").c_str());  
+
+  if ( _tagsp==0 ) { 
+    std::cout << std::endl;
+    throw exception( std::string("grid::grid() cannot get tags: ") + filename );
+  }
+
+  TFileString _tags = *_tagsp;
+
+  m_transform  = _tags[0];
+  m_genpdfname = _tags[1];
+  std::string _version = _tags[2];
+
+
+  /// ah ha !! this is messed up, in the older version, 
+  /// documentation only gets added if it was a non-empty string
+  /// and similarly with the pdf of the calculation, 
+  /// so this pdf might be at position 3 if there is no 
+  /// documentation, so then when reading back, the pdf
+  /// would go into the documentation and the version would 
+  /// be undefined. 
+  ///
+  /// In this version, the documntation is always 
+  /// added, even if it is an empty string  
+
+  //  std::cout << "tags:: transform: " << m_transform << "\tpdfname: " << m_genpdfname << "\tdoc: " << m_documentation << std::endl;  
+
+  if ( _tags.size()>3 ) m_documentation = _tags[3];
+
+  std::cout << "\t:  applgrid version " << _version;
+  if ( _version != m_version ) std::cout  << " (transformed to " << m_version << ")";
+
+  /// encoded pdfset used for the generation 
+  if ( _tags.size()>4 ) { 
+    std::string tmp = _tags[4];
+    if ( tmp.find(":")!=std::string::npos ) {  
+      m_genwithpdf  = tmp.substr(0,tmp.find(":"));
+      m_genwithipdf = std::stoi(tmp.substr(tmp.find(":")+1,std::string::npos));
+    }
+    else { 
+      m_genwithpdf  = tmp;
+      m_genwithipdf = 0;
+    }
+    std::cout << "\t:  grid generated with " << m_genwithpdf << " : set " << m_genwithipdf;
+  }
+
+  std::cout << std::endl;
+
+  //  std::cout << "trying qtransform" << std::endl;
+
+  TFileString* _qtags = (TFileString*)gridfilep->Get((dirname+"/QTags").c_str());  
+
+  if ( _qtags==0 ) m_qtransform = "h0"; 
+  else             m_qtransform = _qtags->at(0);   
+
+
+  /// clean up - delete 0 works fine, no need to check
+  delete _tagsp;
+  delete _qtags;
+
+  // check it has the correct version
+  // if ( _version > m_version ) { 
+  //      throw exception(cerr << "incorrect version " << _version << " expected " << m_version ); 
+  // }
+  //  m_version = _version;
+  
+
+  if ( getDocumentation()!="" ) std::cout << getDocumentation() << std::endl; 
+  
+  bool added = false;
+
+  if ( m_genpdfname=="basic" ) { 
+    added = true;
+    m_genpdfname = "basic.config"; 
+    if ( contains(m_genpdfname, ".dat") ||  contains(m_genpdfname, ".config") ) addpdf(m_genpdfname);
+    findgenpdf( m_genpdfname );
+  }
+
+  //  std::cout << "Tags=" << _tags << std::endl;
+
+  //  std::cout << "grid::grid() use transform " << m_transform << std::endl;
+
+  // read state information
+  // hmmm, have to use TVectorT<double> since TVector<int> 
+  // apparently has no constructor (???)
+  TVectorT<double>* setup=(TVectorT<double>*)gridfilep->Get((dirname+"/State").c_str());
+ 
+  m_run        = (*setup)(0);
+  m_optimised  = ( (*setup)(1)!=0 ? true : false );
+  m_symmetrise = ( (*setup)(2)!=0 ? true : false );  
+
+  m_leading_order = int((*setup)(3)+0.5);  
+  m_order         = int((*setup)(4)+0.5);  
+
+  if ( setup->GetNoElements()>5 ) m_cmsScale = (*setup)(5);
+  else                            m_cmsScale = 0;
+ 
+  if ( setup->GetNoElements()>6 ) m_normalised = ( (*setup)(6)!=0 ? true : false );
+  else                            m_normalised = true;
+
+  if ( setup->GetNoElements()>7 ) m_applyCorrections = ( (*setup)(7)!=0 ? true : false );
+  else                            m_applyCorrections = false;
+
+  int n_userdata = 0;
+  if ( setup->GetNoElements()>10 ) n_userdata = int((*setup)(10)+0.5);
+
+  if ( setup->GetNoElements()>11 ) m_use_duplicates = ( int((*setup)(11)+0.5) ? true : false );
+
+
+  //  std::vector<double> _ckmsum;
+  std::vector<std::vector<double> > _ckm2;
+  std::vector<std::vector<double> > _ckm;
+
+  /// check whether we need to read in the ckm matrices
+
+  if ( setup->GetNoElements()>8 && (*setup)(8)!=0 ) {
+
+    std::cout << "grid::grid() read ckm matrices" << std::endl;
+    
+    /// try 14 x 14 squared ckm matrix 
+
+    TVectorT<double>* ckm2flat=(TVectorT<double>*)gridfilep->Get((dirname+"/CKM2").c_str());
+
+    if ( ckm2flat ) { 
+      int Nrows = 13;
+      if ( ckm2flat->GetNrows()>169 ) Nrows = 14; 
+      if ( ckm2flat->GetNrows()>0 ) { 
+	_ckm2 = std::vector<std::vector<double> >(Nrows, std::vector<double>(Nrows) );
+      
+	for ( int ic=0 ; ic<Nrows ; ic++ ) { 
+	  for ( int id=0 ; id<Nrows ; id++ ) _ckm2[ic][id] = (*ckm2flat)(ic*Nrows+id); 
+	}
+
+	if ( Nrows==14 ) {
+	  /// photons ??
+	  for ( int ic=0 ; ic<14 ; ic++ ) _ckm2[13][ic] = 0; 
+	  for ( int ic=0 ; ic<14 ; ic++ ) _ckm2[ic][13] = 0; 
+	}
+
+      }  
+    }
+
+    /// now try usual 3x3 matrix
+
+    TVectorT<double>* ckmflat=(TVectorT<double>*)gridfilep->Get((dirname+"/CKM").c_str());
+
+    if ( ckmflat ) { 
+      if ( ckmflat->GetNrows()>0 ) { 
+	_ckm = std::vector<std::vector<double> >(3, std::vector<double>(3) );
+	
+	for ( int ic=0 ; ic<3 ; ic++ ) { 
+	  for ( int id=0 ; id<3 ; id++ ) _ckm[ic][id] = (*ckmflat)(ic*3+id); 
+	}
+      }  
+    }
+
+  }
+
+  if ( setup->GetNoElements()>9 ) m_type = (CALCULATION)int( (*setup)(9)+0.5 );
+  else                            m_type = STANDARD;
+
+  //  std::cout << "appl::grid() reading grid calculation type: " << _calculation(m_type) << std::endl;
+
+  //  std::cout << "appl::grid() normalised: " << getNormalised() << std::endl;
+
+  /// check to see if we require a generic pdf from a text file, and 
+  /// and if so, create the required generic pdf (or lumi_pdf for amcatnlo)
+  //  if ( m_genpdfname.find(".dat")!=std::string::npos ) addpdf(m_genpdfname);
+
+  //  std::cout << "appl::grid() requested pdf combination " << m_genpdfname << std::endl;
+  
+
+  if ( !added && contains(m_genpdfname, ".config") ) { 
+    /// decode the pdf combination if appropriate
+
+    // find out if we have one combination per order or one overall
+    std::vector<std::string> namevec = parse( m_genpdfname, ":" );
+
+    std::string label = dirname+"/Combinations";
+
+    for ( unsigned i=0 ; i<namevec.size() ; i++ ) { 
+
+      /// again have to use TVectorT<double> because TVectorT<int> has no constructor!!!
+      /// I ask you!! what's the point of a template if it doesn't actually instantiate
+      /// it's pathetic!
+
+      TVectorT<double>* _combinations = (TVectorT<double>*)gridfilep->Get( label.c_str() );
+
+      label += "N"; /// add an N for each order, N-LO, NN-LO etc
+
+      if ( _combinations==0 ) throw exception( std::string( "grid::grid() cannot read pdf combination " ) + namevec[i] );
+
+      std::vector<int> combinations(_combinations->GetNoElements());
+
+      for ( unsigned ic=0 ; ic<combinations.size() ; ic++ )  combinations[ic] = int((*_combinations)(ic)); 
+    
+      //      addpdf(m_genpdfname, combinations);
+      addpdf( namevec[i], combinations );
+
+    }
+  }
+  else { 
+    /// of just create the generic from the file
+    if ( contains(m_genpdfname, ".dat") ) addpdf(m_genpdfname);
+  }
+  
+  /// retrieve the pdf routine 
+  findgenpdf( m_genpdfname );
+
+  m_photon = false;
+
+  /// check whether there are any photon contributions in this grid ...
+ 
+  for ( int iord=0 ; iord<m_order ; iord++ ) {
+    const lumi_pdf* lpdf = dynamic_cast<const lumi_pdf*>(m_genpdf[iord]);
+    if ( lpdf && ( lpdf->contains(7) || lpdf->contains(22) )  ) m_photon = true;
+  }
+
+  //  std::cout << "grid::grid() read " << m_genpdfname << " " << m_genpdf[0]->getckmsum().size() << std::endl; 
+  //  std::cout << "grid::grid() read " << *dynamic_cast<const lumi_pdf*>(m_genpdf[0]) << std::endl; 
+  //  std::cout << "grid::grid() read " << *dynamic_cast<const lumi_pdf*>(m_genpdf[1]) << std::endl; 
+  //  std::cout << "grid::grid() read " << *dynamic_cast<const lumi_pdf*>(m_genpdf[2]) << std::endl; 
+
+  // set the ckm matrices 
+  if      ( _ckm.size()>0 )  setckm( _ckm );
+  else if ( _ckm2.size()>0 ) setckm2( _ckm2 );
+
+  delete setup;
+
+  //  std::cout << "grid::grid() read setup" << std::endl;
+
+  // Read observable bins information
+  //  gridfile.GetObject("obs_bins", m_ref );
+
+  ::TH1D* m_ref_t = (::TH1D*)gridfilep->Get((dirname+"/reference_internal").c_str());
+  
+  if ( m_ref_t ) { 
+    m_ref = convert( m_ref_t );
+    m_ref_t->SetDirectory(0);
+    delete m_ref_t;
+
+    ::TH1D* m_ref_combined_t = (::TH1D*)gridfilep->Get((dirname+"/reference").c_str());
+    if ( m_ref_combined_t ) { 
+      m_ref_combined = convert( m_ref_combined_t );
+      m_ref_combined->SetDirectory(0);
+      if ( run() ) m_ref_combined->Scale(run());
+
+      m_ref_combined_t->SetDirectory(0);
+      delete m_ref_combined_t;
+      /// for ( int i=0 ; i<m_ref_combined->GetNbinsX ; i++ ) m_ref_combined->SetBinContent( i+1, m_ref_combined->GetBinContent( i+1, run(i) ) );
+    }
+  }
+  else { 
+    m_ref_t = (::TH1D*)gridfilep->Get((dirname+"/reference").c_str());
+    if ( m_ref_t ) { 
+      m_ref = convert( m_ref_t );
+      m_ref_combined = m_ref;
+
+      m_ref_t->SetDirectory(0);
+      delete m_ref_t;
+    }
+    else { 
+      throw exception( "cannot read reference histogram" );
+    }
+  }
+
+  m_ref->SetDirectory(0);
+  if ( run() ) m_ref->Scale(run());
+  /// for ( int i=0 ; i<m_ref->GetNbinsX ; i++ ) m_ref->SetBinContent( i+1, m_ref->GetBinContent( i+1, run(i) ) );
+
+  m_ref->SetName("referenceInternal");
+  if ( m_normalised && m_optimised ) m_read = true;
+
+  ///  std::cout << "grid::grid() read obs bins" << std::endl;
+  
+  /// order by order reference histograms
+  
+  for ( int ih=0 ; ih<=nloops() ; ih++ ) { 
+    std::string refname = dirname+"/reference_" + std::to_string(ih);
+    ::TH1D* h = (::TH1D*)gridfilep->Get(refname.c_str());
+    if ( h ) { 
+      appl::TH1D* hp = convert( h );
+      h->SetDirectory(0);
+      delete h;		       
+      m_refstore.m_ref_order[ih] = *hp; 
+      delete hp;
+      //      std::cout << "refstore: " << ih << "\t" << m_refstore.m_ref_order[ih] << "\t" << refname << std::endl;
+    }
+  }
+
+
+  /// also calculate maximum, values for Q2 and y=ln(1/x)
+  /// for hoppet initialisation
+
+  double _Q2max = 0;
+  double _xmin  = 2; // always bigger than 1
+
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    //    std::cout << "grid::grid() iorder=" << iorder << std::endl;
+    //    m_grids[iorder] = new igrid*[Nobs_internal()];  
+    m_grids[iorder].resize(Nobs_internal());  
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+      char name[128];  sprintf(name, (dirname+"/weight[alpha-%d][%03d]").c_str(), iorder, iobs);
+      //   std::cout << "grid::grid() reading " << name << "\tiobs=" << iobs << std::endl;
+
+      m_grids[iorder][iobs] = new igrid(*gridfilep, name);
+      m_grids[iorder][iobs]->setparent( this ); 
+
+      if (  !m_grids[iorder][iobs]->empty() ) { 
+
+	double x1  = m_grids[iorder][iobs]->x1filledmin();
+	double x2  = m_grids[iorder][iobs]->x2filledmin();	
+	double Q2  = m_grids[iorder][iobs]->Q2filledmax();
+	double Q2i = m_grids[iorder][iobs]->Q2filledmin();
+
+	if ( Q2i>Q2 ) Q2 = Q2i;
+
+	if ( m_grids[iorder][iobs]->isDISgrid() ) m_isDIS = true;
+
+	if (  Q2>_Q2max ) _Q2max = Q2;
+	if (  x1<_xmin )   _xmin = x1;
+	if (  x2<_xmin )   _xmin = x2;
+      }
+
+      //    _size += m_grids[iorder][iobs]->size();
+      //      std::cout << "grid::grid() done" << std::endl;
+    }
+  }
+
+  if ( _Q2max>0 )           m_Qmax = std::sqrt(_Q2max);
+  if ( _xmin>0 && _xmin<2 ) m_ymax = std::log(1/_xmin);
+
+  //  std::cout << "Qmax " << m_Qmax << std::endl; 
+  //  std::cout << "ymax " << m_ymax << std::endl;
+
+  //  d.pop();
+
+  /// bin-by-bin correction labels                                       
+  TFileString* correctionLabels = (TFileString*)gridfilep->Get((dirname+"/CorrectionLabels").c_str());  
+  if ( correctionLabels ) { 
+    for ( unsigned i=0 ; i<correctionLabels->size() ; i++ ) {
+      m_correctionLabels.push_back( (*correctionLabels)[i] ); // copy the correction label
+    }
+  }
+
+#if 0
+  /// bin-by-bin correction values
+  TFileVector* corrections = (TFileVector*)gridfilep->Get((dirname+"/Corrections").c_str());  
+  if ( corrections ) { 
+    for ( unsigned i=0 ; i<corrections->size() ; i++ ) {
+      m_corrections.push_back( (*corrections)[i] ); // copy the correction histograms
+      m_applyCorrection.push_back(false);
+    }
+  }
+#else
+
+  //  std::cout << "run: " << m_run << std::endl; 
+
+  TFileVector* corrections = (TFileVector*)gridfilep->Get((dirname+"/Corrections").c_str());  
+  if ( corrections ) { 
+    for ( unsigned i=0 ; i<corrections->size() ; i++ ) {
+      m_corrections.push_back( *getReference() );
+      m_corrections.back().name() = m_correctionLabels[i];
+      m_corrections.back().set( (*corrections)[i] );
+      m_applyCorrection.push_back(false);
+
+      std::cout << "correction: " <<  m_corrections.back().name() << std::endl; 
+      //      std::cout << m_corrections.back() << std::endl;
+
+    }
+  }
+
+#endif
+
+  /// now read in vector of bins to be combined if present
+  TVectorT<double>* _combine = (TVectorT<double>*)gridfilep->Get((dirname+"/CombineBins").c_str());
+
+  if ( _combine!=0 ) { 
+    //    std::cout << "read in " << _combine->GetNrows() << " entries in combine" << std::endl;
+    m_combine = std::vector<int>(_combine->GetNrows(),0);
+    for ( unsigned i=_combine->GetNrows() ; i-- ; ) m_combine[i] = int((*_combine)(i));
+
+    if ( m_combine.size() ) combineReference();
+  }
+
+  //  std::cout << "grid::grid() read from file Nobs = " << Nobs_internal() << std::endl;
+
+  //  std::cout << "read grid" << std::endl;
+
+  /// read in the user data
+  if ( n_userdata ) { 
+    TVectorT<double>* userdata=(TVectorT<double>*)gridfilep->Get((dirname+"/UserData").c_str());
+    m_userdata.clear();
+    for ( int i=0 ; i<n_userdata ; i++ ) m_userdata.push_back( (*userdata)(i) );
+  }
+
+  gridfilep->Close();
+  delete gridfilep;
+
+  double tstop = appl_timer_stop( tstart );
+
+  unsigned usize = size();
+
+  struct timeval tstart2 =  appl_timer_start();
+
+  trim();
+
+  double tstop2 = appl_timer_stop( tstart2 );
+
+  std::cout << "appl::grid() read grid, size ";
+  if ( usize>1024*10 ) std::cout << usize/1024./1024. << " MB";
+  else                 std::cout << usize/1024.       << " kB";
+
+  //  unsigned nusize = size();
+
+  //  std::cout << " \t-> ";
+
+  //  if ( nusize>1024*10 ) std::cout << nusize/1024./1024. << " MB";
+  //  else                  std::cout << nusize/1024.       << " kB";
+
+  if ( m_photon ) include_photon( true );
+
+ 
+  std::cout << "\tin " << tstop << " ms";
+
+  std::cout << "\ttrim in " << tstop2 << " ms" << std::endl;
+
+  //  std::cout << "appl::grid::weights " << run() << std::endl;
+
+}
+#endif
+
+
+void appl::grid::construct_appl(const std::string& filename )  { 
+
+  //  for ( int iorder=0 ; iorder<MAXGRIDS ; iorder++ ) m_refstore.m_ref_order[iorder] = 0;
+
+  if ( appl_first ) { 
+    appl_first = false;
+    std::cout << "appl::grid() version " << VERSION << " compiled " << __DATE__ << std::endl;
+    appl::igrid::set_range_count_limit();
+  }
+  
+  m_ref_combined = m_ref = 0;
+
+  struct timeval tstart = appl_timer_start();
+  
+  struct stat _fileinfo;
+  if ( stat(filename.c_str(),&_fileinfo) )   {    
+    throw exception( std::string("grid::grid() cannot find file ") + filename  ); 
+  }
+
+  std::cout << "appl::grid() reading grid from file " << filename;
+  
+
+  appl::file gridfile( filename, "r" );
+
+  if ( !gridfile.isopen() ) {
+    std::cout << std::endl;
+    throw exception( std::string("grid::grid() cannot open file: ") + filename );
+  }
+
+  
+  stream_vector<std::string> tags = gridfile.Read<stream_vector<std::string> >( "Tags" );
+
+  if ( tags.size()<3 ) { 
+    std::cout << std::endl;
+    throw exception( std::string("grid::grid() cannot get tags: ") + filename );
+  }
+
+  m_transform  = tags[0];
+  m_genpdfname = tags[1];
+  std::string _version = tags[2];
+
+
+  /// ah ha !! this is messed up, in the older version, 
+  /// documentation only gets added if it was a non-empty string
+  /// and similarly with the pdf of the calculation, 
+  /// so this pdf might be at position 3 if there is no 
+  /// documentation, so then when reading back, the pdf
+  /// would go into the documentation and the version would 
+  /// be undefined. 
+  ///
+  /// In this version, the documntation is always 
+  /// added, even if it is an empty string  
+
+  if ( tags.size()>3 ) m_documentation = tags[3];
+
+  std::cout << "\t:  applgrid version " << _version;
+  if ( _version != m_version ) std::cout  << " (transformed to " << m_version << ")";
+
+  /// encoded pdfset used for the generation 
+  if ( tags.size()>4 ) { 
+    std::string tmp = tags[4];
+    if ( tmp.find(":")!=std::string::npos ) {  
+      m_genwithpdf  = tmp.substr(0,tmp.find(":"));
+      m_genwithipdf = std::stoi(tmp.substr(tmp.find(":")+1,std::string::npos));
+    }
+    else { 
+      m_genwithpdf  = tmp;
+      m_genwithipdf = 0;
+    }
+    std::cout << "\t:  grid generated with " << m_genwithpdf << " : set " << m_genwithipdf;
+  }
+
+  std::cout << std::endl;
+
+  //  std::cout << "trying qtransform" << std::endl;
+
+  stream_vector<std::string> qtags = gridfile.Read<stream_vector<std::string> >( "QTags" ); 
+
+  if ( qtags.size() ) m_qtransform = "h0"; 
+  else                m_qtransform = qtags[0];   
+
+  if ( getDocumentation()!="" ) std::cout << getDocumentation() << std::endl; 
+  
+  bool added = false;
+
+  if ( m_genpdfname=="basic" ) { 
+    added = true;
+    m_genpdfname = "basic.config"; 
+    if ( contains(m_genpdfname, ".dat") ||  contains(m_genpdfname, ".config") ) addpdf(m_genpdfname);
+    findgenpdf( m_genpdfname );
+  }
+
+
+  stream_vector<double> setup = gridfile.Read<stream_vector<double> >( "State" );
+
+  m_run        = setup[0];
+  m_optimised  = ( setup[1]!=0 ? true : false );
+  m_symmetrise = ( setup[2]!=0 ? true : false );  
+
+  m_leading_order = int(setup[3]+0.5);  
+  m_order         = int(setup[4]+0.5);  
+
+  if ( setup.size()>5 ) m_cmsScale = setup[5];
+  else                  m_cmsScale = 0;
+ 
+  if ( setup.size()>6 ) m_normalised = ( setup[6]!=0 ? true : false );
+  else                  m_normalised = true;
+
+  if ( setup.size()>7 ) m_applyCorrections = ( setup[7]!=0 ? true : false );
+  else                  m_applyCorrections = false;
+
+  int n_userdata = 0;
+  if ( setup.size()>10 ) { 
+    n_userdata = int(setup[10]+0.5);
+    std::cout << "appl::grid userdata: " << n_userdata << std::endl;
+  }
+
+
+  if ( setup.size()>11 ) m_use_duplicates = ( int(setup[11]+0.5) ? true : false );
+
+  std::vector<std::vector<double> > _ckm2;
+  std::vector<std::vector<double> > _ckm;
+
+
+  /// check whether we need to read in the ckm matrices
+ 
+  if ( setup.size()>8 && setup[8]!=0 ) {
+
+    //    std::cout << "grid::grid() read ckm matrices" << std::endl;
+    
+    /// now try usual 3x3 matrix
+
+    if ( gridfile.index().find( "CKM" ).size > 0 ) { 
+      
+      stream_vector<double> ckmflat = gridfile.Read< stream_vector<double> >( "CKM" );
+
+      if ( ckmflat.size()>0 ) { 
+	_ckm = std::vector<std::vector<double> >(3, std::vector<double>(3) );
+	
+	for ( int ic=0 ; ic<3 ; ic++ ) { 
+	  for ( int id=0 ; id<3 ; id++ ) _ckm[ic][id] = ckmflat[ic*3+id]; 
+	}
+      }  
+    }
+    else { 
+
+      /// try 14 x 14 squared ckm matrix 
+
+      if ( gridfile.index().find( "CKM2" ).size > 0 ) { 
+	
+	stream_vector<double> ckm2flat = gridfile.Read< stream_vector<double> >( "CKM2" );
+	
+	if ( ckm2flat.size()>0 ) { 
+	  int Nrows = 13;
+	  if ( ckm2flat.size()>169 ) Nrows = 14; 
+	  if ( ckm2flat.size()>0 ) { 
+	    _ckm2 = std::vector<std::vector<double> >(Nrows, std::vector<double>(Nrows) );
+	    
+	    for ( int ic=0 ; ic<Nrows ; ic++ ) { 
+	      for ( int id=0 ; id<Nrows ; id++ ) _ckm2[ic][id] = ckm2flat[ic*Nrows+id]; 
+	    }
+	    
+	    if ( Nrows==14 ) { 
+	      /// photons ??
+	      for ( int ic=0 ; ic<14 ; ic++ ) _ckm2[13][ic] = 0; 
+	      for ( int ic=0 ; ic<14 ; ic++ ) _ckm2[ic][13] = 0; 
+	    } 
+	    
+	  }
+	}
+      }
+      
+    }
+
+  }
+
+  if ( setup.size()>9 ) m_type = (CALCULATION)int( setup[9]+0.5 );
+  else                  m_type = STANDARD;
+
+  //  std::cout << "appl::grid() reading grid calculation type: " << _calculation(m_type) << std::endl;
+  //  std::cout << "appl::grid() normalised: " << getNormalised() << std::endl;
+
+  /// check to see if we require a generic pdf from a text file, and 
+  /// and if so, create the required generic pdf (or lumi_pdf for amcatnlo)
+  //  if ( m_genpdfname.find(".dat")!=std::string::npos ) addpdf(m_genpdfname);
+
+  //  std::cout << "appl::grid() requested pdf combination " << m_genpdfname << std::endl;
+  
+
+  if ( !added && contains(m_genpdfname, ".config") ) { 
+    /// decode the pdf combination if appropriate
+
+    // find out if we have one combination per order or one overall
+    std::vector<std::string> namevec = parse( m_genpdfname, ":" );
+
+    std::string label = "Combinations";
+
+    for ( unsigned i=0 ; i<namevec.size() ; i++ ) { 
+
+      /// again have to use TVectorT<double> because TVectorT<int> has no constructor!!!
+      /// I ask you!! what's the point of a template if it doesn't actually instantiate
+      /// it's pathetic!
+
+      stream_vector<double> _combinations = gridfile.Read<stream_vector<double> >( label );
+
+      //   std::cout << "combinations: " << label << " " << _combinations << std::endl;
+
+      label += "N"; /// add an N for each order, N-LO, NN-LO etc
+
+      if ( _combinations.size()==0 ) throw exception( std::string( "grid::grid() cannot read pdf combination " ) + namevec[i] );
+
+      std::vector<int> combinations(_combinations.size());
+
+      for ( unsigned ic=0 ; ic<combinations.size() ; ic++ )  combinations[ic] = int(_combinations[ic]); 
+    
+      //      std::cout << "combinations: " << label << " " << combinations << std::endl;
+
+      //      addpdf(m_genpdfname, combinations);
+      addpdf( namevec[i], combinations );
+
+    }
+  }
+  else { 
+    /// of just create the generic from the file
+    if ( contains(m_genpdfname, ".dat") ) addpdf(m_genpdfname);
+  }
+  
+  /// retrieve the pdf routine 
+  findgenpdf( m_genpdfname );
+
+  m_photon = false;
+
+  /// check whether there are any photon contributions in this grid ...
+ 
+  for ( int iord=0 ; iord<m_order ; iord++ ) {
+    const lumi_pdf* lpdf = dynamic_cast<const lumi_pdf*>(m_genpdf[iord]);
+    if ( lpdf && ( lpdf->contains(7) || lpdf->contains(22) )  ) m_photon = true;
+  }
+
+  //  std::cout << "grid::grid() read " << m_genpdfname << " " << m_genpdf[0]->getckmsum().size() << std::endl; 
+  //  std::cout << "grid::grid() read " << *dynamic_cast<const lumi_pdf*>(m_genpdf[0]) << std::endl; 
+  //  std::cout << "grid::grid() read " << *dynamic_cast<const lumi_pdf*>(m_genpdf[1]) << std::endl; 
+  //  std::cout << "grid::grid() read " << *dynamic_cast<const lumi_pdf*>(m_genpdf[2]) << std::endl; 
+  
+  // set the ckm matrices 
+  if      ( _ckm.size()>0 )  setckm( _ckm );
+  else if ( _ckm2.size()>0 ) setckm2( _ckm2 );
+
+  if ( gridfile.index().find( "reference_internal" ).size > 0 ) { 
+    m_ref = new appl::TH1D( gridfile.Read<appl::TH1D>( "reference_internal" ) );
+    if ( gridfile.index().find( "reference" ).size > 0 ) { 
+      m_ref_combined = new appl::TH1D( gridfile.Read<appl::TH1D>( "reference" ) );
+      if ( run() ) m_ref_combined->Scale(run());
+    }
+    else { 
+      throw exception( "cannot read reference histogram" );
+    }
+  }
+  else { 
+    if ( gridfile.index().find( "reference" ).size > 0 ) { 
+      m_ref = new appl::TH1D( gridfile.Read<appl::TH1D>( "reference" ) );
+      m_ref_combined = m_ref;
+    }
+    else { 
+      throw exception( "cannot read reference histogram" );
+    }
+  }
+
+  if ( run() ) m_ref->Scale(run());
+
+  /// get order-by-order references if they are there ...
+
+  /// first make sure everything is zeroed - safety first !! 
+  //  for ( int iorder=0 ; iorder<MAXGRIDS ; iorder++ ) m_refstore.m_ref_order[iorder] = 0;
+
+  //  int iref = MAXGRIDS;
+  if ( m_order>MAXGRIDS ) std::cerr << "oh dear: order too high: " << m_order << " > " << MAXGRIDS << std::endl;
+  //  else iref = m_order;
+
+  //  std::cout << "iref: " << iref << "\tm_order: " << m_order << std::endl;
+  
+  /// use duplicated histogram now - the default reference is
+  /// for the accumulation of the weights, so is manipulated 
+  /// by the number of entries 
+  //  m_refstore.m_ref_order[iref] = *m_ref;
+  
+  for( int ih=0 ; ih<=nloops() ; ih++ ) {
+    std::string refname = "reference_"+std::to_string(ih);
+    if ( gridfile.index().find(refname).size > 0 ) { 
+      m_refstore.m_ref_order[ih] = gridfile.Read<appl::TH1D>( refname );
+      //      std::cout << "read reference : order " << ih << "\t" << m_refstore.m_ref_order[ih].name() << std::endl;
+    }
+  }
+    
+  m_ref->SetName("referenceInternal");
+  if ( m_normalised && m_optimised ) m_read = true;
+
+  /// also calculate maximum, values for Q2 and y=ln(1/x)
+  /// for hoppet initialisation
+
+  double _Q2max = 0;
+  double _xmin  = 2; // always bigger than 1
+
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    // m_grids[iorder] = new igrid*[Nobs_internal()];  
+    m_grids[iorder].resize(Nobs_internal());  
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+
+      char name[128];  sprintf(name, "weight[alpha-%d][%03d]", iorder, iobs);
+
+      m_grids[iorder][iobs] = new igrid( gridfile, name);
+      m_grids[iorder][iobs]->setparent( this ); 
+
+      if (  !m_grids[iorder][iobs]->empty() ) { 
+
+	double x1 = m_grids[iorder][iobs]->x1filledmin();
+	double x2 = m_grids[iorder][iobs]->x2filledmin();	
+	double Q2 = m_grids[iorder][iobs]->Q2filledmax();
+	double Q2i = m_grids[iorder][iobs]->Q2filledmin();
+	
+	if ( Q2i>Q2 ) Q2 = Q2i;
+
+	if ( m_grids[iorder][iobs]->isDISgrid() ) m_isDIS = true;
+
+	if (  Q2>_Q2max ) _Q2max = Q2;
+	if (  x1<_xmin )   _xmin = x1;
+	if (  x2<_xmin )   _xmin = x2;
+      }
+
+      //    _size += m_grids[iorder][iobs]->size();
+      //      std::cout << "grid::grid() done" << std::endl;
+    }
+  }
+
+  if ( _Q2max>0 )           m_Qmax = std::sqrt(_Q2max);
+  if ( _xmin>0 && _xmin<2 ) m_ymax = std::log(1/_xmin);
+  
+  //  std::cout << "Qmax " << m_Qmax << std::endl; 
+  //  std::cout << "ymax " << m_ymax << std::endl;
+
+  if ( gridfile.index().find("Corrections").size > 0 ) { 
+    
+    m_correctionLabels = gridfile.Read<stream_vector<std::string> >( "CorrectionLabels" ).payload();
+    
+    std::vector<std::vector<double> > corrections = gridfile.Read< stream_vector<std::vector<double> > >( "Corrections" ).payload();
+    
+    m_applyCorrection = std::vector<bool>( corrections.size(), false );
+    
+    m_corrections.clear();
+    for ( size_t i=0 ; i<corrections.size() ; i++ ) { 
+      m_corrections.push_back( *getReference() );
+      m_corrections.back().name() = m_correctionLabels[i];
+      m_corrections.back().set( corrections[i] );
+      std::cout << "correction: " <<  m_corrections.back().name() << std::endl; 
+      /// std::cout << m_corrections.back() << std::endl;
+    }
+
+  }
+
+
+  /// now write out vector of bins to be combined if this has been set
+  if ( gridfile.index().find("CombineBins").size > 0 ) {
+    std::vector<double> combine = gridfile.Read<stream_vector<double> >( "CombineBins" ).payload(); 
+    m_combine.clear();
+    m_combine.resize(combine.size());
+    for ( unsigned i=m_combine.size() ; i-- ; ) m_combine[i] = combine[i]; 
+
+    if ( m_combine.size() ) combineReference();
+  }
+
+
+  m_userdata = gridfile.Read<stream_vector<double> >("UserData").payload();
+
+  gridfile.Close();
+
+  double tstop = appl_timer_stop( tstart );
+
+  unsigned usize = size();
+
+  struct timeval tstart2 =  appl_timer_start();
+
+  trim();
+
+  double tstop2 = appl_timer_stop( tstart2 );
+
+  std::cout << "appl::grid() read grid, size ";
+  if ( usize>1024*10 ) std::cout << usize/1024./1024. << " MB";
+  else                 std::cout << usize/1024.       << " kB";
+
+  //  std::cout << " -> ";
+
+  //  unsigned nusize = size();
+
+  //  if ( nusize>1024*10 ) std::cout << nusize/1024./1024. << " MB";
+  //  else                  std::cout << nusize/1024.       << " kB";
+
+  if ( m_photon ) include_photon( true );
+
+  std::cout << "\tin " << tstop << " ms";
+
+  std::cout << "\ttrim in " << tstop2 << " ms" << std::endl;
+
+  //  std::cout << "appl::grid::weights " << run() << std::endl;
+  
+}
+
+
+
+appl::grid::grid(const grid& g) : 
+  m_ref(new appl::TH1D(*g.m_ref)), 
+  m_leading_order(g.m_leading_order), m_order(g.m_order), 
+  m_run(g.m_run), m_optimised(g.m_optimised), m_trimmed(g.m_trimmed), 
+  m_normalised(g.m_normalised),
+  m_symmetrise(g.m_symmetrise),
+  m_transform(g.m_transform),
+  m_qtransform(g.m_qtransform),
+  m_genpdfname(g.m_genpdfname), 
+  m_cmsScale(g.m_cmsScale),
+  m_dynamicScale(g.m_dynamicScale),
+  m_applyCorrections(g.m_applyCorrections), 
+  m_documentation(g.m_documentation),
+  m_ckmsum(g.m_ckmsum), /// need a deep copy of the contents
+  m_ckm2(g.m_ckm2),     /// need a deep copy of the contents
+  m_ckm(g.m_ckm),       /// need a deep copy of the contents
+  m_type(g.m_type),
+  m_read(g.m_read),
+  m_bin(-1),
+  m_genwithpdf(""),
+  m_photon(g.m_photon),
+  m_isDIS(g.m_isDIS),
+  m_use_duplicates(g.m_use_duplicates)
+{
+  m_ref->SetDirectory(0);
+  m_ref->Sumw2();
+
+  m_ref_combined = m_ref;
+  if ( g.m_ref_combined != g.m_ref) { 
+    m_ref_combined = new appl::TH1D(*g.m_ref_combined); 
+    m_ref_combined->SetDirectory(0);
+  }
+
+  /// check to see if we require a generic pdf from a text file, and 
+  /// and if so, create the required generic pdf
+  //  if ( m_genpdfname.find(".dat")!=std::string::npos ) addpdf(m_genpdfname);
+  if ( contains(m_genpdfname, ".dat") ||  contains(m_genpdfname, ".config") ) addpdf(m_genpdfname);
+  findgenpdf( m_genpdfname );
+
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    //    m_grids[iorder] = new igrid*[Nobs_internal()];
+    m_grids[iorder].resize(Nobs_internal());
+    for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ )  { 
+      m_grids[iorder][iobs] = new igrid(*g.m_grids[iorder][iobs]);
+      m_grids[iorder][iobs]->setparent( this ); 
+    }
+  }
+} 
+
+
+// Initialize histogram that saves the correspondence obsvalue<->obsbin
+
+// constructor common internals 
+void appl::grid::construct(int Nobs, 
+			   int NQ2,  double Q2min, double Q2max, int Q2order,
+			   int Nx,   double xmin,  double xmax,  int xorder, 
+			   int order, 
+			   std::string transform, 
+			   std::string qtransform, 
+			   bool isdis ) { 
+  
+  //  std::cout << "appl::grid::construct() m_order " << m_order << "\tNobs " << Nobs << std::endl; 
+
+  if ( m_order!=order ) std::cerr << "appl::grid::construct() order mismatch" << std::endl;
+
+  for ( int iorder=0 ; iorder<m_order ; iorder++ )  m_grids[iorder].clear();
+
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    //    m_grids[iorder] = new igrid*[Nobs];
+    m_grids[iorder].resize(Nobs);
+    for ( int iobs=0 ; iobs<Nobs ; iobs++ ) {
+      m_grids[iorder][iobs] = new igrid(NQ2, Q2min, Q2max, Q2order, 
+					Nx,   xmin,  xmax,  xorder,  transform, qtransform, m_genpdf[iorder]->Nproc(), isdis);
+      m_grids[iorder][iobs]->setparent( this ); 
+    }
+  }
+  //  std::cout << "appl::grid::construct() return" << std::endl; 
+
+  appl::igrid::set_range_count_limit();
+
+}
+
+
+
+
+  // number of subprocesses 
+int appl::grid::subProcesses(int i) const { 
+  if ( i<0 || i>=m_order ) { 
+    std::stringstream s;
+    s << "grid::subProcess(int i) " << i << " out of range [0-" << m_order-1 << "]";
+    throw exception( s.str() );
+  }
+  return m_grids[i][0]->SubProcesses();     
+}  
+
+
+/// access the transform functions for the appl::igrid so that the 
+/// igrid can be hidden 
+double appl::grid::transformvar()         { return igrid::transformvar(); }
+double appl::grid::transformvar(double v) { return igrid::transformvar(v); }
+
+// add a single grid
+void appl::grid::add_igrid(int bin, int order, igrid* g) { 
+
+  if ( !(order>=0 && order<m_order) ) { 
+    std::cerr << "grid::add_igrid() order out of range " << order << std::endl; 
+    return;
+  } 
+
+  if ( !(bin>=0 && bin<Nobs_internal() ) ) {
+    //    std::cerr << "grid::add_igrid() observable bin out of range " << bin << std::endl; 
+    return;
+  }
+
+  m_grids[order][bin] = g;
+  m_grids[order][bin]->setparent(this);
+
+  if ( g->transform()!=m_transform ) { 
+    std::cerr << "grid::add_igrid() transform " << m_transform 
+	      << " does not match that from added igrid, " << g->transform() << std::endl;
+    m_transform = g->transform();
+  }
+
+  if ( g->transform()!=m_transform ) { 
+    std::cerr << "grid::add_igrid() transform " << m_transform 
+	      << " does not match that from added igrid, " << g->transform() << std::endl;
+    m_transform = g->transform();
+  }
+
+}
+
+
+
+
+appl::grid::~grid() {
+
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {  
+    if( m_grids[iorder].size() ) { 
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) { 
+	delete m_grids[iorder][iobs];
+      }
+      //      delete[] m_grids[iorder];
+      //      m_grids[iorder] = 0;
+      m_grids[iorder].clear();
+    }
+  }
+
+  if (m_ref_combined) {
+    if ( m_ref_combined!=m_ref) delete m_ref_combined;
+  }
+
+  if (m_ref) delete m_ref;
+  m_ref=0;
+  m_ref_combined = 0;
+
+#ifdef HAVE_HOPPET
+  if ( hoppet ) delete hoppet; 
+  hoppet=0; 
+#endif
+
+}
+
+
+
+
+// algebraic operators
+
+appl::grid& appl::grid::operator=(const appl::grid& g) { 
+  // clear out the old...
+  if ( m_ref_combined!=m_ref ) delete m_ref_combined;
+  delete m_ref;
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ )  delete m_grids[iorder][iobs];
+    // delete m_grids[iorder];
+    m_grids[iorder].clear();
+  }
+  
+  // copy the new
+  m_ref = new appl::TH1D(*g.m_ref);
+  m_ref->SetDirectory(0);
+  m_ref->Sumw2();
+
+  m_ref_combined = m_ref;
+
+  // copy the state
+  m_leading_order = g.m_leading_order;
+  m_order         = g.m_order;
+
+  m_run       = g.m_run;
+  m_optimised = g.m_optimised;
+  m_trimmed   = g.m_trimmed;
+
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    /// m_grids[iorder] = new igrid*[Nobs_internal()];
+    m_grids[iorder].resize(Nobs_internal());
+    for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ )  { 
+      m_grids[iorder][iobs] = new igrid(*g.m_grids[iorder][iobs]);
+      m_grids[iorder][iobs]->setparent( this ); 
+    }
+  }
+  return *this;
+} 
+  
+
+
+appl::grid& appl::grid::operator*=(const std::vector<std::vector<double> >& vv ) {
+
+  if ( vv.size()<unsigned(m_order) ) return *this; 
+
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    /// don't do anything if the scale vector is not the correct size
+    if ( vv[iorder].size() != size_t(Nobs_internal()) ) return *this;
+  }
+
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) (*m_grids[iorder][iobs])*=vv[iorder][iobs]; 
+  }
+
+#if 0   
+  for ( int i=0 ; i<getReference()->GetNbinsX() ; i++ ) {
+    int ih=i+1;
+    getReference()->SetBinContent( ih,  getReference()->GetBinContent(ih)*v[i] );
+    getReference()->SetBinError(   ih,  getReference()->GetBinError(ih)*v[i] );
+  }
+
+  for ( int i=0 ; i<getReference_internal()->GetNbinsX() ; i++ ) {
+    int ih=i+1;
+    //   std::cout << "v[" << i << "] = " << v[i] << std::endl;
+    getReference_internal()->SetBinContent( ih,  getReference_internal()->GetBinContent(ih)*v[i] );
+    getReference_internal()->SetBinError(   ih,  getReference_internal()->GetBinError(ih)*v[i] );
+  }
+
+  combineReference(true);
+
+#endif
+
+  return *this;
+}
+
+
+
+
+double integral( appl::TH1D* h ) { 
+  double d = 0;
+  for ( int i=0 ; i<h->GetNbinsX() ; i++ ) d += h->GetBinContent(i+1);
+  return d;
+}
+
+
+appl::grid& appl::grid::operator*=(const double& d) { 
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) (*m_grids[iorder][iobs]) *= d; 
+  }
+  getReference()->Scale( d );
+  if(getReference()!=getReference_internal()) getReference_internal()->Scale( d );
+  combineReference(true);
+
+  return *this;
+}
+
+
+void hprint( appl::TH1D* h ) { 
+  for ( int i=1 ; i<=h->GetNbinsX() ; i++ ) std::cout << h->GetBinContent(i) << " ";
+  std::cout << std::endl;
+}
+
+
+
+
+/// this is messed up - how can we apply "per bin" normalisations
+/// it we have specified more actual bins and are combining them 
+/// a-posteriori ? 
+/// Fixme: at some point *remove* the bin combining functionality
+///        it is far too much of a pain to maintain
+
+appl::grid& appl::grid::operator*=(const std::vector<double>& v) {
+
+  /// don't do anything if the scale vector is not the correct size
+  if ( v.size() != size_t(Nobs_internal()) ) return *this;
+
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) (*m_grids[iorder][iobs])*=v[iobs]; 
+  }
+
+#if 0   
+  for ( int i=0 ; i<getReference()->GetNbinsX() ; i++ ) {
+    int ih=i+1;
+    getReference()->SetBinContent( ih,  getReference()->GetBinContent(ih)*v[i] );
+    getReference()->SetBinError(   ih,  getReference()->GetBinError(ih)*v[i] );
+  }
+#endif
+
+  for ( int i=0 ; i<getReference_internal()->GetNbinsX() ; i++ ) {
+    int ih=i+1;
+    //   std::cout << "v[" << i << "] = " << v[i] << std::endl;
+    getReference_internal()->SetBinContent( ih,  getReference_internal()->GetBinContent(ih)*v[i] );
+    getReference_internal()->SetBinError(   ih,  getReference_internal()->GetBinError(ih)*v[i] );
+  }
+
+  combineReference(true);
+
+  return *this;
+}
+
+
+
+double appl::grid::fx(double x) const { 
+  if ( m_order>0 && Nobs_internal()>0 ) return m_grids[0][0]->fx(x);
+  else return 0;
+}
+
+double appl::grid::fy(double x) const { 
+  if ( m_order>0 && Nobs_internal()>0 ) return m_grids[0][0]->fy(x);
+  else return 0;
+}
+
+
+appl::grid& appl::grid::operator+=(const appl::grid& g) {
+  m_run      += g.m_run;
+  m_optimised = g.m_optimised;
+  m_trimmed   = g.m_trimmed;
+  if ( Nobs_internal()!=g.Nobs_internal() ) throw exception("grid::operator+ Nobs bin mismatch");
+  if ( m_order!=g.m_order )                 throw exception("grid::operator+ different order grids");
+  if ( m_leading_order!=g.m_leading_order ) throw exception("grid::operator+ different order processes in grids");
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) { 
+      (*m_grids[iorder][iobs]) += (*g.m_grids[iorder][iobs]); 
+    }
+  }
+
+  /// grrr use root TH1::Add() even though I don't like it. 
+  //  getReference()->Add( g.getReference() );
+  *(getReference_internal()) += *(g.getReference_internal());
+
+  combineReference(true);
+
+  /// individual order refeneces - should these be averaged ???  
+  for ( int i=0 ; i<=nloops() ; i++ ) { 
+    if ( getReference(i)->name().find("reference_")==0 && getReference(i)->name()==g.getReference(i)->name() ) { 
+      *(getReference(i)) += *(g.getReference(i)); 
+    }
+  }
+
+  return *this;
+}
+
+
+
+
+/// check grids match
+bool appl::grid::operator==(const appl::grid& g) const {
+  if ( Nobs_internal()!=g.Nobs_internal() ) return false;
+  if ( m_order!=g.m_order ) return false;
+  if ( m_leading_order!=g.m_leading_order ) return false;
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    //    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) match &= ( (*m_grids[iorder][iobs]) == (*g.m_grids[iorder][iobs]) ); 
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) { 
+      if ( ! m_grids[iorder][iobs]->compare_axes( *g.m_grids[iorder][iobs] ) ) return false; 
+    }
+  }
+  return true;
+}
+
+
+
+// fill the appropriate igrid with these weights
+void appl::grid::fill(const double x1, const double x2, const double Q2, 
+		      const double obs, 
+		      const double* weight, const int iorder)  {  
+  int iobs = m_ref->FindBin(obs)-1;
+
+  //  std::cout << "grid::fill() iobs " << iobs << "\tobs=" << obs << std::endl;
+
+  if ( iobs<0 || iobs>=Nobs_internal() ) {
+    //    cerr << "grid::fill() iobs out of range " << iobs << "\tobs=" << obs << std::endl;
+    //    cerr << "obs=" << obs << "\tobsmin=" << obsmin() << "\tobsmax=" << obsmax() << std::endl;
+    return;
+  }
+  
+  //  std::cout << "iobs=" << iobs << "\tobs=" << obs;
+  //  for ( int i=0 ; i<subProcesses(iorder) ; i++ ) std::cout << "\t" << weight[i];
+  //  std::cout << std::endl;
+
+  //  std::cout << "\tiobs=" << iobs << std::endl;
+  if ( m_symmetrise && x2<x1 )  m_grids[iorder][iobs]->fill(x2, x1, Q2, weight);
+  else                          m_grids[iorder][iobs]->fill(x1, x2, Q2, weight);
+}
+
+
+// fast fill pre-optimisation don't perform the interpolation and so on
+void appl::grid::fill_phasespace(const double x1, const double x2, const double Q2, 
+				 const double obs, 
+				 const double* weight, const int iorder) {
+  int iobs = m_ref->FindBin(obs)-1;
+
+  //  std::cout << "grid::fill_phasespace() iobs " << iobs << "\tobs=" << obs << std::endl;
+
+  if ( iobs<0 || iobs>=Nobs_internal() ) {
+    // std::cerr << "grid::fill() iobs out of range " << iobs << "\tobs=" << obs << std::endl;
+    // std::cerr << "obs=" << obs << "\tobsmin=" << obsmin() << "\tobsmax=" << obsmax() << std::endl;
+    return;
+  }
+  if ( m_symmetrise && x2<x1 )  m_grids[iorder][iobs]->fill_phasespace(x2, x1, Q2, weight);
+  else                          m_grids[iorder][iobs]->fill_phasespace(x1, x2, Q2, weight);
+}
+
+
+
+
+// fast fill pre-optimisation don't perform the interpolation and so on
+void appl::grid::fill_index(const int ix1, const int ix2, const int iQ2, 
+			    const int iobs, 
+			    const double* weight, const int iorder) {
+
+  if ( iobs<0 || iobs>=Nobs_internal() ) {
+    //  cerr << "grid::fill() iobs out of range " << iobs << "\tobs=" << obs << std::endl;
+    //  cerr << "obs=" << obs << "\tobsmin=" << obsmin() << "\tobsmax=" << obsmax() << std::endl;
+    return;
+  }
+  if ( m_symmetrise && ix2<ix1 )  m_grids[iorder][iobs]->fill_index(ix2, ix1, iQ2, weight);
+  else                            m_grids[iorder][iobs]->fill_index(ix1, ix2, iQ2, weight);
+}
+
+
+void appl::grid::trim(int iorder) {
+  if ( iorder>=0 ) { 
+    if ( iorder<m_order ) for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) m_grids[iorder][iobs]->trim(); 
+  }
+  else { 
+    m_trimmed = true;
+    for( int iord=0 ; iord<m_order ; iord++ ) {
+      for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) m_grids[iord][iobs]->trim(); 
+    }
+  }
+}
+
+void appl::grid::untrim(int iorder) {
+  if ( iorder>=0 ) { 
+    if ( iorder<m_order ) for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) m_grids[iorder][iobs]->untrim(); 
+  }
+  else { 
+    m_trimmed = false;
+    for( int iord=0 ; iord<m_order ; iord++ ) {
+      for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) m_grids[iord][iobs]->untrim(); 
+    }
+  }
+}
+
+std::ostream& appl::grid::print(std::ostream& s) const {
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {     
+      s << iobs << "\t" 
+	<< std::setprecision(5) << std::setw(6) << getReference()->GetBinLowEdge(iobs+1) << "\t- " 
+	<< std::setprecision(5) << std::setw(6) << getReference()->GetBinLowEdge(iobs+2) << "\t"; 
+      m_grids[iorder][iobs]->print(s);       
+    }
+  }
+  return s;
+}
+
+
+std::ostream& appl::grid::printpdf(std::ostream& s) const {
+  for( int i=0 ; i<m_order ; i++ ) { 
+    if ( m_genpdf[i] ) s << "order: " << i << " " << *m_genpdf[i] << "\n";
+  }
+  return s;
+}
+
+
+
+std::ostream& appl::grid::debug(std::ostream& s) const {
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {     
+      s << iobs << "\t" 
+	<< std::setprecision(5) << std::setw(6) << getReference()->GetBinLowEdge(iobs+1) << "\t- " 
+	<< std::setprecision(5) << std::setw(6) << getReference()->GetBinLowEdge(iobs+2) << "\t"; 
+      m_grids[iorder][iobs]->debug(s);       
+    }
+  }
+  return s;
+}
+
+
+
+
+ /// get the required pdf combinations from those registered   
+void appl::grid::findgenpdf( std::string s ) { 
+    std::vector<std::string> names = parse( s, ":" );
+    if ( names.size()==unsigned(m_order) ) for ( int i=0 ; i<m_order ; i++ ) m_genpdf[i] = appl_pdf::getpdf( names[i] );
+    else  if ( names.size()==1 )           for ( int i=0 ; i<m_order ; i++ ) m_genpdf[i] = appl_pdf::getpdf( names[0] );
+    else  { 
+      std::stringstream s_;
+      s_ << "requested " << m_order << " pdf combination but given " << names.size();
+      throw exception( s_.str() );
+    }
+}
+
+
+void appl::grid::setGenpdf( const std::string& genpdfname ) {
+   
+   m_genpdfname = genpdfname;
+   addpdf( m_genpdfname );
+   findgenpdf( genpdfname );
+ 
+   std::cout << "appl::grid::shrink()  new " 
+ 	    << "\t" << m_genpdf[0]->name() << ":" << m_genpdf[0]->size() 
+ 	    << "\t" << m_genpdf[1]->name() << ":" << m_genpdf[1]->size() 
+	    <<  std::endl;
+   std::cout << "appl::grid::setGenpdf()  "; 
+   for ( int i=0 ; i<MAXGRIDS ; i++ ) {
+     if ( m_genpdf[i] ) std::cout << "\t" << m_genpdf[1]->name() << ":" << m_genpdf[i]->size(); 
+   }
+   std::cout <<  std::endl;
+ 
+}
+
+
+void appl::grid::addpdf( const std::string& s, const std::vector<int>& combinations ) {
+
+  //  std::cout << "addpdf() in " << std::endl;
+
+    /// parse names, if they contain .dat, then create the new generic pdfs
+    /// they will be added to the pdf std::map automatically 
+    std::vector<std::string> names = parse( s, ":" );
+
+    unsigned imax = unsigned(m_order); 
+
+    /// check to see whether we have a different pdf for each order
+    if ( names.size()!=imax ) { 
+      if ( names.size()==1 ) imax = 1;
+      else { 
+	std::stringstream s_;
+	s_ << "requested " << m_order << " pdf combination but given " << names.size();
+	throw exception( s_.str() );
+      }
+    }
+
+    //    std::cout << "imax " << imax << std::endl; 
+
+    /// loop through all the required pdfs checking whether they exist already,
+    /// if not (from thrown exception) then create it, otherwise, don't need to 
+    /// do anything 
+    for ( unsigned i=0 ; i<imax ; i++ ) { 
+
+      if ( names[i].find(".dat")!=std::string::npos ) {
+
+	if ( appl_pdf::getpdf(names[i])==0 ) { 
+ 	  std::cout << "appl::grid::addpdf() creating new generic_pdf " << names[i] << std::endl;
+	  new generic_pdf(names[i]);
+	}
+
+	// 	try {
+	// 	  appl_pdf::getpdf(names[i]); // , false);
+	// 	}
+	// 	catch ( appl_pdf::exception e ) { 
+	// 	  std::cout << "creating new generic_pdf " << names[i] << std::endl;
+	// 	  new generic_pdf(names[i]);
+	// 	}
+
+      }
+      else if ( names[i].find(".config")!=std::string::npos ) { 
+
+	if ( appl_pdf::getpdf(names[i])==0 ) { 
+	  //	  std::cout << "appl::grid::addpdf() creating new lumi_pdf " << names[i] << std::endl;
+	  lumi_pdf* lp = new lumi_pdf(names[i], combinations);
+	  //  latex( *lp );
+	  if ( !m_use_duplicates ) lp->removeDuplicates();
+	  //	  latex( *lp, "duff" );
+	}
+
+	// 	try {
+	// 	  appl_pdf::getpdf(names[i]); // , false);
+	// 	}
+	// 	catch ( appl_pdf::exception e ) { 
+	// 	  std::cout << "creating new lumi_pdf " << names[i] << std::endl;
+	// 	  new lumi_pdf(names[i], combinations);
+	// 	  //	  std::cout << "created" << names[i] << std::endl;
+	
+	// 	}
+
+      }
+
+    }
+
+}
+
+
+
+void appl::grid::setckm2( const std::vector<std::vector<double> >& ckm2 ) { 
+  for ( int i=0 ; i<m_order ; i++ ) m_genpdf[i]->setckm2(ckm2);
+}
+
+
+void appl::grid::setckm( const std::vector<std::vector<double> >& ckm ) { 
+  for ( int i=0 ; i<m_order ; i++ ) m_genpdf[i]->setckm(ckm);
+}
+
+
+void appl::grid::setckm( const std::vector<double>& ckm ) {
+  std::vector<std::vector<double> > _ckm(3, std::vector<double>(3,0) );
+  for ( unsigned i=0 ; i<ckm.size() && i<9 ; i++ ) _ckm[i/3][i%3] = ckm[i]; 
+  for ( int i=0 ; i<m_order ; i++ ) m_genpdf[i]->setckm(_ckm);
+}
+
+void appl::grid::setckm( const double* ckm ) { 
+  std::vector<std::vector<double> > _ckm(3, std::vector<double>(3,0) );
+  for ( unsigned i=0 ; i<9 ; i++ ) _ckm[i/3][i%3] = ckm[i]; 
+  for ( int i=0 ; i<m_order ; i++ ) m_genpdf[i]->setckm(_ckm);
+}
+
+
+const std::vector<std::vector<double> >& appl::grid::getckm()  const { return m_genpdf[0]->getckm(); }  
+
+const std::vector<std::vector<double> >& appl::grid::getckm2() const { return m_genpdf[0]->getckm2(); }  
+
+
+// void appl::grid::setuppdf(void (*pdf)(const double&, const double&, double* ) )  {  }
+// void grid::pdfinterp(double x, double Q2, double* f) {  }
+
+
+
+// set the rewight flag of the internal grids
+bool appl::grid::reweight(bool t) { 
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) m_grids[iorder][iobs]->reweight(t);       
+  }
+  return t;
+}
+
+
+bool exists( const std::string& filename ) { 
+  struct stat sb;
+  if ( stat( filename.c_str(), &sb)==0 ) return true; // && S_ISREG(sb.st_mode ))
+  else return false;
+}
+
+
+void appl::grid::Write(const std::string& filename, 
+		       const std::string& dirname, 
+		       const std::string& pdfname) {
+  std::cout << "Write(): filename:  " << filename << std::endl;
+  if ( filename.find(".root")==filename.size()-5 ) { 
+#   ifdef USEROOT
+    Write_root( filename, dirname, pdfname );
+#   else    
+    std::cerr << "appl::grid::Write() root file requested, but root not compiled in" << std::endl;
+    std::exit(-1);
+#   endif
+  }
+  else  Write_appl( filename, dirname, pdfname );
+}
+  
+
+#ifdef USEROOT
+// dump to file
+void appl::grid::Write_root(const std::string& filename, 
+			    const std::string& dirname, 
+			    const std::string& pdfname) 
+{ 
+
+  std::cout << "appl::grid::Write() " << filename << "\tdirname " << dirname << "\tpdfname " << pdfname << std::endl; 
+
+  if ( exists( filename ) ) { 
+    std::string filename_save = filename + "-save";
+    if ( std::rename( filename.c_str(), filename_save.c_str() ) ) std::cerr << "could not rename grid file " << filename << std::endl;
+  } 
+
+  if ( pdfname!="" ) shrink( pdfname, m_genpdf[0]->getckmcharge() );
+
+  //  std::cout << "grid::Write() writing to file " << filename << std::endl;
+  TFile rootfile(filename.c_str(),"recreate");
+
+  //  std::cout << "pwd=" << gDirectory->GetName() << std::endl;
+
+  Directory d(dirname);
+  d.push();
+  
+  //  std::cout << "pwd=" << gDirectory->GetName() << std::endl;
+
+  // write the name of the transform pair and the
+  // generalised pdf
+  TFileString _tags("Tags");
+  _tags.add(m_transform);
+  _tags.add(m_genpdfname);
+  _tags.add(m_version);
+  //  if ( m_documentation!="" ) 
+  _tags.add( m_documentation );
+  if ( m_genwithpdf!="" )    _tags.add( m_genwithpdf + ":"+std::to_string(m_genwithipdf) );
+  _tags.Write();
+
+  TFileString _qtags("QTags");
+  _qtags.add(m_qtransform);
+  _qtags.Write();
+
+  //  std::cout << "genwith pdf " << m_genwithpdf << " : " << m_genwithipdf << std::endl; 
+
+  //  appl::TH1D* _transform = new appl::TH1D("Transform", m_transform.c_str(), 1, 0, 1);  
+  //  _transform->Write();
+
+  //  appl::TH1D* _genpdfname = new appl::TH1D("GenPDF", m_genpdfname.c_str(), 1, 0, 1);
+  //  _genpdfname->Write();
+
+
+  //  std::cout << "state std::vector=" << std::endl;
+
+  // state information
+  TVectorT<double>* setup=new TVectorT<double>(12); // add a few extra just in case 
+  (*setup)(0) = m_run;
+  (*setup)(1) = ( m_optimised  ? 1 : 0 );
+  (*setup)(2) = ( m_symmetrise ? 1 : 0 );
+  (*setup)(3) =   m_leading_order ;
+  (*setup)(4) =   m_order ;
+  (*setup)(5) =   m_cmsScale ;
+  (*setup)(6) = ( m_normalised ? 1 : 0 );
+  (*setup)(7) = ( m_applyCorrections ? 1 : 0 );
+
+  if ( m_genpdf[0]->getckmsum().size()==0 ) (*setup)(8) = 0;
+  else                                      (*setup)(8) = 1;
+
+  (*setup)(9) = (int)m_type;
+
+  (*setup)(10) = m_userdata.size();
+
+  /// by default we do not want to use duplicates, 
+  /// so we want them removed
+  if ( m_use_duplicates ) (*setup)(11) = 1;
+
+  setup->Write("State");
+
+  if ( (*setup)(8) == 1 ) { 
+    
+    const std::vector<std::vector<double> >& _ckm = m_genpdf[0]->getckm();
+
+    bool useckm = false;
+
+    if ( _ckm.size()>0 ) { 
+      /// no longer write out squared ckm matrix - just use the 3x3
+      TVectorT<double>* ckmflat = new TVectorT<double>(9);
+      
+      for ( int ic=0 ; ic<3 ; ic++ ) { 
+	for ( int id=0 ; id<3 ; id++ ) { (*ckmflat)(ic*3+id) = _ckm[ic][id]; if ( _ckm[ic][id]!=0 ) useckm=true; } 
+      }
+
+      if ( useckm )  ckmflat->Write("CKM");
+      else delete ckmflat;
+    }
+
+    if ( !useckm ) { 
+
+      const std::vector<std::vector<double> >& _ckm2 = m_genpdf[0]->getckm2();
+
+      if ( _ckm2.size()>0 ) { 
+	/// no longer write out squared ckm matrix - just use the 3x3 unless it is an OLD, grid
+	
+	int nrows = _ckm2.size();
+	
+	TVectorT<double>* ckm2flat = new TVectorT<double>(nrows*nrows);
+	
+	for ( int ic=0 ; ic<nrows ; ic++ ) { 
+	  for ( int id=0 ; id<nrows ; id++ ) (*ckm2flat)(ic*nrows+id) = _ckm2[ic][id];
+	}
+
+       	ckm2flat->Write("CKM2");
+      }
+    }      
+
+  }
+
+
+  /// encode the pdf combination if appropriate
+  
+  if ( contains( m_genpdfname, ".config" ) ) { 
+    
+    // find out if we have one combination per order or one overall
+    
+    std::vector<std::string> namevec = parse( m_genpdfname, ":" );
+    
+    /// now write out all the ones we need
+    
+    std::string label = "Combinations";
+    
+    for ( unsigned i=0 ; i<namevec.size() && i<unsigned(m_order) ; i++ ) {  
+
+      //      dynamic_cast<lumi_pdf*>(m_genpdf[i])->restoreDuplicates();
+
+      lumi_pdf* lpdf = dynamic_cast<lumi_pdf*>(m_genpdf[i]);
+
+      std::cout << "lumi_pdf:   " << lpdf->name() << "\t processes " << lpdf->Nproc() << "\n";
+
+      //      std::cout << "lumi pdf: " << *lpdf << std::endl;
+
+      lumi_pdf lpdf_tmp(*lpdf);
+
+      lpdf_tmp.restoreDuplicates();
+
+      //      std::cout << "lumi pdf: " << *lpdf     << std::endl;
+      //      std::cout << "lumi pdf: " <<  lpdf_tmp << std::endl;
+
+      //      std::vector<int>   combinations = dynamic_cast<lumi_pdf*>(m_genpdf[i])->serialise();
+      std::vector<int>   combinations = lpdf_tmp.serialise();
+      TVectorT<double>* _combinations = new TVectorT<double>(combinations.size());
+      for ( unsigned ic=0 ; ic<combinations.size() ; ic++ ) { 
+	if ( combinations[ic]<0 ) (*_combinations)(ic) = double(combinations[ic]-0.5);
+	else                      (*_combinations)(ic) = double(combinations[ic]+0.5);
+      }
+      
+      _combinations->Write( label.c_str() );
+      
+      //      std::cout << "writing " << m_genpdf[i]->name() << std::endl;
+      
+      label += "N";  /// add an N for each order, N-LO, NN-LO etc !!!! ARGH!!! Need to be more general !!!
+    }
+  } 
+  
+  //  std::cout << "grids Nobs = " << Nobs_internal() << std::endl;
+  
+  untrim();
+  int untrim_size = size();
+  trim();
+  int trim_size = size();
+  std::cout <<"grid::Write() "
+	    << "size(untrimmed)=" << untrim_size/1024 << " kB"
+	    << "\tsize(trimmed)=" <<   trim_size/1024 << " kB"
+	    << " (" << 0.1*int(trim_size*1000./untrim_size) << "%)" << std::endl;
+  
+
+  // internal grids
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+      char name[128];  sprintf(name, "weight[alpha-%d][%03d]", iorder, iobs);
+      // std::cout << "writing grid " << name << std::endl;
+      //   _size += m_grids[iorder][iobs]->size();
+      m_grids[iorder][iobs]->write(name);
+      //   trim_size += m_grids[iorder][iobs]->size();
+    }
+  }
+ 
+  //  d.pop();
+
+  //  std::cout << "reference" << std::endl;
+
+  appl::TH1D* reference          = 0;
+  appl::TH1D* reference_internal = 0;
+
+  //  std::cout << "m_ref_combined: " << m_ref_combined << std::endl;
+  //  std::cout << "m_ref:          " << m_ref << std::endl;
+
+  if ( m_ref_combined != m_ref )  { 
+    reference = (appl::TH1D*)m_ref_combined->Clone("reference");
+    reference->SetDirectory(0);
+
+    reference_internal = (appl::TH1D*)m_ref->Clone("reference_internal");
+    reference_internal->SetDirectory(0);
+  }
+  else { 
+    reference = (appl::TH1D*)m_ref->Clone("reference");
+    reference->SetDirectory(0);
+  }
+
+  /// grrr, what is this? This whole normalisation issue is a real pain - 
+  /// we need to address this - but how to do it in a backwards compatible way?
+  /// will need to check the grid version and do things differently probably
+
+  //  std::cout << "normalised() " << getNormalised() << "\tread " << m_read << std::endl; 
+  
+  //  if ( !getNormalised() || m_read )  if ( run() ) reference->Scale(1/double(run()));
+  if ( run() ) { 
+    reference->Scale(1/double(run()));
+    if ( reference_internal ) reference_internal->Scale(1/double(run()));
+  }
+
+  // if ( run() ) reference->Scale(1/double(run()));
+
+
+#if 0
+
+  double hmin = reference->GetBinContent(1); 
+  double hmax = reference->GetBinContent(1); 
+  for ( int i=1 ; i<=reference->GetNbinsX() ; i++ ) { 
+    std::cout << "reference " << i << " " << reference->GetBinContent(i) << std::endl;
+    if ( hmin<reference->GetBinContent(i) ) hmin = reference->GetBinContent(i); 
+    if ( hmax>reference->GetBinContent(i) ) hmax = reference->GetBinContent(i); 
+  }
+
+  reference->SetMinimum(hmin);
+  reference->SetMaximum(hmax);
+
+#endif
+
+  reference->Write();
+
+  delete reference;
+
+  if ( reference_internal ) { 
+    reference_internal->Write();
+    delete reference_internal;
+  }
+
+  /// order by order references
+  
+  for ( int ih=0 ; ih<=nloops() ; ih++ ) { 
+    std::string refname = "reference_" + std::to_string(ih);
+    if ( m_refstore.m_ref_order[ih].name()==refname ) { 
+      appl::TH1D* h = (appl::TH1D*)m_refstore.m_ref_order[ih].Clone(refname);
+      h->SetDirectory(0);
+      h->Write();
+      delete h;
+    }
+  }
+
+
+  //  std::cout << "corrections" << std::endl;
+
+  /// correction histograms
+
+  if ( m_corrections.size()>0 ) {
+
+    /// Fixme: should add the labels to the actual corrections rather than save separately
+    /// write labels
+    TFileVector* corrections = new TFileVector("Corrections");
+    for ( unsigned i=0 ; i<m_corrections.size() ; i++ )  corrections->add( m_corrections[i].y() );    
+    corrections->Write("Corrections");
+
+    /// write actual corrections
+    TFileString correctionLabels("CorrectionLabels");
+    for ( unsigned i=0 ; i<m_correctionLabels.size() ; i++ )  correctionLabels.add( m_correctionLabels[i] );
+    correctionLabels.Write("CorrectionLabels");
+
+  }
+
+  /// now write out vector of bins to be combined if this has been set
+  if ( m_combine.size()>0 ) { 
+    TVectorT<double>* _combine = new TVectorT<double>(m_combine.size()); 
+    for ( unsigned i=m_combine.size() ; i-- ; ) (*_combine)(i) = m_combine[i]+0.5; /// NB: add 0.5 to prevent root double -> int rounding errors
+    _combine->Write( "CombineBins" );
+  }
+
+
+  /// now write out the user data
+
+  if ( m_userdata.size() ) { 
+    TVectorT<double>* userdata=new TVectorT<double>(m_userdata.size()); // add a few extra just in case 
+    for ( unsigned i=0 ; i<m_userdata.size() ; i++ ) (*userdata)(i) = m_userdata[i];
+    userdata->Write("UserData");
+  }
+
+  d.pop();
+  rootfile.Close();
+
+}
+#endif
+
+
+
+
+
+
+// dump to file
+void appl::grid::Write_appl(const std::string& filename, 
+			    const std::string& dirname, 
+			    const std::string& pdfname) 
+{ 
+
+  std::cout << "appl::grid::Write() " << filename << "\tdirname " << dirname << "\tpdfname " << pdfname << std::endl; 
+
+  if ( exists( filename ) ) { 
+    std::string filename_save = filename + "-save";
+    if ( std::rename( filename.c_str(), filename_save.c_str() ) ) std::cerr << "could not rename grid file " << filename << std::endl;
+  } 
+
+  if ( pdfname!="" ) shrink( pdfname, m_genpdf[0]->getckmcharge() );
+
+  //  std::cout << "grid::Write() writing to file " << filename << std::endl;
+
+  appl::file gridfile( filename.c_str(), "w" );
+
+  // write the name of the transform pair and the
+  // generalised pdf
+
+  stream_vector<std::string> tags( "Tags" );
+  
+  tags.add(m_transform);
+  tags.add(m_genpdfname);
+  tags.add(m_version);
+  tags.add(m_documentation);
+  if ( m_genwithpdf!="" ) tags.add( m_genwithpdf + ":" + std::to_string(m_genwithipdf) );
+
+  gridfile.Write( tags );
+
+
+  gridfile.Write( stream_vector<std::string>( "QTags", std::vector<std::string>( 1, m_qtransform ) ) );
+
+  //  std::cout << "genwith pdf " << m_genwithpdf << " : " << m_genwithipdf << std::endl; 
+
+  stream_vector<double> setup( "State" );
+
+  // state information
+  setup.add( m_run );
+  setup.add( m_optimised  ? 1 : 0 );
+  setup.add( m_symmetrise ? 1 : 0 );
+  setup.add( m_leading_order );
+  setup.add( m_order );
+  setup.add( m_cmsScale );
+  setup.add( m_normalised ? 1 : 0 );
+  setup.add( m_applyCorrections ? 1 : 0 );
+
+  if ( m_genpdf[0]->getckmsum().size()==0 ) setup.add(0);
+  else                                      setup.add(1);
+
+  setup.add((int)m_type);
+
+  setup.add(m_userdata.size());
+
+  /// by default we do not want to use duplicates, 
+  /// so we want them removed
+  if ( m_use_duplicates ) setup.add(1);
+
+  gridfile.Write( setup );
+
+  if ( setup[8] == 1 ) { 
+    
+    //    std::cout << "ckm: size: " << m_genpdf[0]->getckm().size() << std::endl;
+
+    const std::vector<std::vector<double> >& ckm = m_genpdf[0]->getckm();
+
+    /// no longer write out squared ckm matrix - just use the 3x3 - unless it is an OLD, OLD grid
+
+    std::vector<double> ckmflat(9);
+
+    bool useckm = false;
+
+    if ( ckm.size() ) {  
+      for ( int ic=0 ; ic<3 ; ic++ ) { 
+	for ( int id=0 ; id<3 ; id++ ) { ckmflat[ic*3+id] = ckm[ic][id]; if ( ckm[ic][id]!=0 ) useckm=true; }
+      }
+    }
+    
+    if ( useckm ) { 
+      gridfile.Write(  stream_vector<double>( "CKM", ckmflat ) );
+    }
+    else { 
+      
+      const std::vector<std::vector<double> >& _ckm2 = m_genpdf[0]->getckm2();
+   
+      if ( _ckm2.size()>0 ) { 
+	/// no longer write out squared ckm matrix - just use the 3x3 - unless it is an OLD, OLD grid
+
+	int nrows = _ckm2.size();
+	
+	std::vector<double> ckm2flat(nrows*nrows);
+	
+	for ( int ic=0 ; ic<nrows ; ic++ ) { 
+	  for ( int id=0 ; id<nrows ; id++ ) ckm2flat[ic*nrows+id] = _ckm2[ic][id];
+	}
+
+	gridfile.Write(  stream_vector<double>( "CKM2", ckm2flat ) );
+      }
+    }
+
+  }
+
+  /// encode the pdf combination if appropriate
+  
+  if ( contains( m_genpdfname, ".config" ) ) { 
+    
+    // find out if we have one combination per order or one overall
+    
+    std::vector<std::string> namevec = parse( m_genpdfname, ":" );
+    
+    /// now write out all the ones we need
+    
+    std::string label = "Combinations";
+    
+    for ( unsigned i=0 ; i<namevec.size() && i<unsigned(m_order) ; i++ ) {  
+
+      //      dynamic_cast<lumi_pdf*>(m_genpdf[i])->restoreDuplicates();
+
+      lumi_pdf* lpdf = dynamic_cast<lumi_pdf*>(m_genpdf[i]);
+
+      std::cout << "lumi_pdf:   " << lpdf->name() << "\t processes " << lpdf->Nproc() << "\n";
+
+      //      std::cout << "lumi pdf: " << *lpdf << std::endl;
+
+      lumi_pdf lpdf_tmp(*lpdf);
+
+      lpdf_tmp.restoreDuplicates();
+
+      //      std::cout << "lumi pdf: " << *lpdf     << std::endl;
+      //      std::cout << "lumi pdf: " <<  lpdf_tmp << std::endl;
+
+      //      std::vector<int>   combinations = dynamic_cast<lumi_pdf*>(m_genpdf[i])->serialise();
+      std::vector<int>     combinations = lpdf_tmp.serialise();
+      std::vector<double> _combinations(combinations.size());
+      for ( size_t ic=0 ; ic<_combinations.size() ; ic++ ) { 
+	if ( combinations[ic]<0 ) _combinations[ic] = double(combinations[ic]-0.5);
+	else                      _combinations[ic] = double(combinations[ic]+0.5);
+      }
+
+      gridfile.Write( stream_vector<double>( label, _combinations ) );
+
+      //      std::cout << "writing " << m_genpdf[i]->name() << std::endl;
+      
+      label += "N";  /// add an N for each order, N-LO, NN-LO etc !!!! ARGH!!! Need to be more general !!!
+    }
+  } 
+   
+  //  std::cout << "reference" << std::endl;
+
+  /// grrr only have to use pointers for the root backewards compatability 
+  /// how I hate root
+  appl::TH1D* reference          = 0;
+  appl::TH1D* reference_internal = 0;
+
+  //  std::cout << "m_ref_combined: " << m_ref_combined << std::endl;
+  //  std::cout << "m_ref:          " << m_ref << std::endl;
+
+  if ( m_ref_combined != m_ref )  { 
+    reference = (appl::TH1D*)m_ref_combined->Clone("reference");
+    reference_internal = (appl::TH1D*)m_ref->Clone("reference_internal");
+  }
+  else { 
+    reference = (appl::TH1D*)m_ref->Clone("reference");
+  }
+
+  /// grrr, what is this? This whole normalisation issue is a real pain - 
+  /// we need to address this - but how to do it in a backwards compatible way?
+  /// will need to check the grid version and do things differently probably
+
+  //  std::cout << "normalised() " << getNormalised() << "\tread " << m_read << std::endl; 
+  
+  //  if ( !getNormalised() || m_read )  if ( run() ) reference->Scale(1/double(run()));
+  if ( run() ) { 
+    reference->Scale(1/double(run()));
+    if ( reference_internal ) reference_internal->Scale(1/double(run()));
+  }
+
+  // if ( run() ) reference->Scale(1/double(run()));
+
+  gridfile.Write( *reference );
+  delete reference;
+
+  if ( reference_internal ) { 
+    gridfile.Write( *reference_internal );
+    delete reference_internal;
+  }
+
+  /// write reference histogroms if stored by order
+  
+  for( int ih=0 ; ih<=nloops() ; ih++ ) {
+    std::string refname = "reference_"+std::to_string(ih);
+    if ( m_refstore.m_ref_order[ih].name() == refname ) gridfile.Write( m_refstore.m_ref_order[ih] );
+  }
+
+  
+  //  std::cout << "grids Nobs = " << Nobs_internal() << std::endl;
+  
+  untrim();
+  int untrim_size = size();
+  trim();
+  int trim_size = size();
+
+  std::cout <<"grid::Write() "
+	    << "size(untrimmed)=" << untrim_size/1024 << " kB"
+	    << "\tsize(trimmed)=" <<   trim_size/1024 << " kB"
+	    << " (" << 0.1*int(trim_size*1000./untrim_size) << "%)" << std::endl;
+  
+
+  // internal grids
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+      char name[128];  sprintf(name, "weight[alpha-%d][%03d]", iorder, iobs);
+      // std::cout << "writing grid " << name << std::endl;
+      m_grids[iorder][iobs]->write( gridfile, name);
+    }
+  }
+
+
+
+  //  std::cout << "corrections" << std::endl;
+
+  /// correction histograms
+
+  if ( m_corrections.size()>0 ) {
+
+    std::vector<std::string> correctionLabels(m_corrections.size());
+    for ( size_t i=0 ; i<m_corrections.size() ; i++ ) correctionLabels[i] = m_correctionLabels[i];
+    for ( size_t i=0 ; i<m_corrections.size() ; i++ ) { 
+      std::cout << "labels : " << m_correctionLabels[i] << "\t" << m_correctionLabels[i] << std::endl;
+    }
+    gridfile.Write( "CorrectionLabels", correctionLabels );
+
+    std::vector<std::vector<double> > corrections(m_corrections.size());
+    for ( size_t i=0 ; i<m_corrections.size() ; i++ ) corrections[i] = m_corrections[i].y();
+    gridfile.Write( "Corrections", corrections );
+ 
+  }
+   
+
+  /// now write out vector of bins to be combined if this has been set
+
+  if ( m_combine.size()>0 ) { 
+
+    std::vector<double> combine(m_combine.size()); 
+    for ( unsigned i=m_combine.size() ; i-- ; ) combine[i] = m_combine[i]; /// NB: add 0.5 to prevent root double -> int rounding errors
+    gridfile.Write( "CombineBins", combine );
+
+  }
+
+
+  /// now write out the user data
+
+  gridfile.Write( "UserData", m_userdata );
+
+
+  //  d.pop();
+  gridfile.Close();
+
+}
+
+
+
+void appl::grid::disable_threads(bool b) { appl::igrid::disable_threads(b); }
+
+
+bool duff() { 
+  appl::grid::disable_threads(true);
+  return true;
+}
+
+
+// takes pdf as the pdf lib wrapper for the pdf set for the convolution.
+// type specifies which sort of partons should be included:
+
+std::vector<double> appl::grid::vconvolute(void (*pdf)(const double& , const double&, double* ), 
+					   double (*alphas)(const double& ), 
+					   int     _nloops, 
+					   double  rscale_factor,
+					   double  fscale_factor,
+					   double Escale )
+{ 
+  return vconvolute( pdf, 0, alphas, _nloops, rscale_factor, fscale_factor, Escale, Escale );
+}
+
+
+
+std::vector<double> appl::grid::vconvolute(void (*pdf1)(const double& , const double&, double* ), 
+					   void (*pdf2)(const double& , const double&, double* ), 
+					   double (*alphas)(const double& ), 
+					   int     _nloops, 
+					   double  rscale_factor,
+					   double  fscale_factor,
+					   double  Escale1,
+					   double  Escale2 )
+{ 
+ 
+  NodeCache cache1( pdf1 );
+  NodeCache cache2;
+
+ 
+  cache1.reset();
+
+
+  NodeCache* _pdf1 = &cache1;
+  NodeCache* _pdf2 = 0;
+  
+  if ( !isDIS() && pdf2!=0 && pdf1!=pdf2 ) { 
+    cache2 = NodeCache( pdf2 );
+    cache2.reset();  
+    _pdf2    = &cache2;
+  }
+
+
+  //  struct timeval _ctimer = appl_timer_start();
+  
+  double EscaleSQ = 1;
+ 
+  if ( Escale1!=1 || Escale2!=1 ) EscaleSQ = Escale1*Escale2;
+  
+  std::vector<double> hvec(Nobs_internal(), 0);
+  //  hvec.clear(); /// explicit clear just in case
+
+  double invNruns = 1;
+  if ( (!m_normalised) && run() ) invNruns /= double(run());
+
+  //  appl::TH1D* h = new appl::TH1D(*m_ref);
+  //  h->SetName("xsec");
+
+  //  std::cout << "hvec: " << hvec.size() << " : " << hvec << std::endl;
+
+
+#ifdef HAVE_HOPPET
+  // check if we need to use the splitting function, and if so see if we 
+  // need to initialise it again, and do so if required
+
+
+  if ( fscale_factor!=1 || m_dynamicScale ) {
+
+    /// should initialise *especially* if requesting multiple PDFs  
+    //    if ( pdf2==0 || pdf1==pdf2 ) { 
+   
+      /// fixed and shared between 
+      /// all instances of the grid
+
+      static double Qmax = 26000;  /// should be generally ok
+      static double ymax = 12; 
+
+      //      char* Qenv =  getenv("QMAX"); 
+      
+      //      if ( Qenv != 0 ) Qmax = std::atof( Qenv );
+
+      //      double _xmin = xmin();
+      //      double _ymax = std::log(1/_xmin);
+
+      ///     grr - always use 12, even though a better max was computed ??
+      //      double _ymax = ymax;
+
+      double _ymax = m_ymax;
+
+      bool restart_hoppet = false;
+
+      /// Qmax limit needs to be recalulated
+      /// used to use (m_cmsScale>Qmax) but some grid 
+      /// generators use ludicrously high scales 
+      if ( fscale_factor*m_Qmax>Qmax ) { 
+	if ( hoppet ) restart_hoppet = true;
+	Qmax = m_Qmax*fscale_factor;
+      }
+
+      /// ymax limit needs to be recalculated
+      if ( _ymax>ymax ) { 
+	if ( hoppet ) restart_hoppet = true;
+
+	double dymax = (_ymax - ymax);
+
+	/// NB: this 0.1 is just dy from hoppet init - would be better to 
+	//      caluclate the values and set this in hoppet init
+	double iy = dymax/0.1; 
+
+	//	std::cout << "appl::grid changing hoppet yrange " << ymax << "\t" << iy << "\t" << (0.1+int(iy)*0.1) << std::endl;
+
+	/// make sure that we are on a dy integer multiple
+	if ( int(iy)!=iy )  ymax += 0.1+int(iy)*0.1;
+	else                ymax +=     int(iy)*0.1;
+      }
+
+      if ( hoppet && restart_hoppet ) { 
+	std::cout << "appl::grid deleting old hoppet" << std::endl;
+	delete hoppet;
+	hoppet = 0;
+      }  
+
+      if ( hoppet == 0 ) {  
+	std::cout << "appl::grid initialising hoppet" << std::endl;
+	//	double Qmax = 15000;
+	//	double ymax
+	hoppet = new appl::hoppet_init( Qmax, ymax );	
+      } 
+
+      bool newpdf = hoppet->compareCache( pdf1 );
+      
+      if ( newpdf ) hoppet->fillCache( pdf1 );
+
+      //    }
+
+  }
+#endif                                   
+
+
+  std::string label;
+
+  if ( _nloops>=m_order ) { 
+    std::cerr << "too many loops for grid: " << _nloops << " > " << nloops() << " " << std::endl;   
+    return hvec;
+  } 
+  
+  /// NB: in the following code, the return values is stored in a class variable that 
+  ///     must be accessed when each convolution has finished since the convolution 
+  ///     itself might not be happening in this thread
+
+
+  if ( m_type==STANDARD ) { 
+
+    static bool first = true;
+
+    if ( first && m_dynamicScale ) std::cout << "** grid::vconvolute() emulating dynamic scale **" << std::endl;
+
+    //    std::cout << "standard convolution" << std::endl;
+
+    if      ( _nloops==0  ) label = "lo        ";
+    else if ( _nloops==1  ) label = "nlo       ";
+    else if ( _nloops==2  ) label = "nnlo      ";
+    else if ( _nloops==-1 ) label = "nlo only  ";
+    else if ( _nloops==-2 ) label = "nnlo only ";
+    
+
+    //    std::exit(0);
+    
+    for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {  
+      // for ( int iobs=0 ; iobs<1 ; iobs++ ) {  
+
+      /// here we see whether we need to emulate a dynamic scale by simply 
+      /// changing the renormalisation and factorisation scale terms to give 
+      /// what a dynamic scale would be in this bin
+ 
+      //      if ( m_bin!=-1 && iobs!=m_bin ) continue;
+
+
+      ////  std::cout << "bin: " << iobs << std::endl;
+
+      double dynamic_factor = 1;
+
+
+      if ( m_dynamicScale ) {
+	double var = m_ref->GetBinCenter(iobs+1);
+	dynamic_factor = var/m_dynamicScale;
+	//	if ( first ) std::cout << "grid::vconvolute() bin " << iobs << "\tscale " << var << "\tdynamicScale " << m_dynamicScale << "\t scale factor " << dynamic_factor << std::endl;
+      } 
+
+      /// now do the convolution proper
+
+      //      std::cout << iobs << "\tnloops " << _nloops << std::endl;
+
+
+      if ( _nloops==0 ) {
+	/// leading order cross section
+	if ( subproc()==-1 ) {  
+	  m_grids[0][iobs]->convolute( _pdf1, _pdf2, m_genpdf[0], alphas, m_leading_order, 0, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2 );
+	}
+	else {
+
+ 
+	  /// fixme: for the subproceses, this is technically incorrect - the "LO" contribution 
+	  ///        includes the scale dependent "NLO" terms that are proportional to the LO 
+	  ///        coefficient functions - it could use the strict "LO" part without the scale 
+	  ///        dependent parts using order "0" rather than order "1" but see the comment 
+	  ///        for the "NLO only" contribution. The reason for this is that the subprocesses
+	  ///        can be different for LO and NLO, so this NLO part with "LO coefficients", 
+	  ///        can have different subprocesses from the actual NLO part, so the correct 
+	  ///        LO/NLO separation is only guaranteed for the full convolution, and not by 
+	  ///        subprocess
+	  m_grids[0][iobs]->convolute( _pdf1, _pdf2, m_genpdf[0], alphas, m_leading_order, 1, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2 );
+	}
+
+      }
+      else if ( _nloops==1 ) {
+
+
+	// next to leading order cross section
+	// std::cout << "convolute() nloop=1" << std::endl;
+	// leading order contribution and scale dependent born dependent terms
+	m_grids[0][iobs]->convolute( _pdf1, _pdf2, m_genpdf[0], alphas, m_leading_order, 1, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2 );
+	// std::cout << "dsigma_lo=" << dsigma_lo << std::endl;
+	// next to leading order contribution
+	//      double dsigma_nlo = m_grids[1][iobs]->convolute(pdf, m_genpdf, alphas, m_leading_order+1, 0);
+	// GPS: the NLO piece must use the same rscale_factor and fscale_factor as
+	//      the LO piece -- that's the convention that defines how NLO calculations
+	//      are done.
+	m_grids[1][iobs]->convolute( _pdf1, _pdf2, m_genpdf[1], alphas, m_leading_order+1, 0, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2 );
+	// std::cout << "dsigma_nlo=" << dsigma_nlo << std::endl;
+
+
+      }
+      else if ( _nloops==-1 ) {
+	label = "nlo only";
+
+	// nlo contribution only 
+	if ( subproc()==-1 ) { 
+	  m_grids[0][iobs]->convolute( _pdf1, _pdf2, m_genpdf[0], alphas, m_leading_order,   -1, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2 );
+	  m_grids[1][iobs]->convolute( _pdf1, _pdf2, m_genpdf[1], alphas, m_leading_order+1,  0, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2 );
+	}
+	else { 
+	  /// fixme: this is technically incorrect - the "LO" component contains the 
+	  ///        scale dependent NLO contribution dependent on the LO coefficient
+	  ///        functions. This part is difficult to include for individual 
+	  ///        subprocesses, since different subprocesses can be present 
+	  ///        at LO and NLO, so speifying subprocess X at NLO does not 
+	  ///        neccessarily correspond to subprocess X at LO, so adding the 
+	  ///        subprocesses - so these terms are only strict LO And NLO when 
+	  ///        *not* specifying subprocess
+	  m_grids[1][iobs]->convolute( _pdf1, _pdf2, m_genpdf[1], alphas, m_leading_order+1,  0, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2 );
+	}
+
+      } 
+      else if ( _nloops==2 ) {
+	label = "nnlo    ";
+	// next to next to leading order contribution 
+	// std::cout << " order: " << 0 << std::endl;
+	m_grids[0][iobs]->convolute( _pdf1, _pdf2, m_genpdf[0], alphas, m_leading_order, 2,   dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2  );
+	// next to leading order contribution      
+	// std::cout << " order: " << 1 << std::endl;
+	m_grids[1][iobs]->convolute( _pdf1, _pdf2, m_genpdf[1], alphas, m_leading_order+1, 1, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2  );
+	// next to next to leading order contribution
+	// std::cout << " order: " << 2 << std::endl;
+	m_grids[2][iobs]->convolute( _pdf1, _pdf2, m_genpdf[2], alphas, m_leading_order+2, 0, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2  );
+      }
+      else if ( _nloops==-2 ) {
+	// next to next to leading order contribution only
+	m_grids[2][iobs]->convolute( _pdf1, _pdf2, m_genpdf[2], alphas, m_leading_order+2, 0, dynamic_factor*rscale_factor, dynamic_factor*fscale_factor, Escale1, Escale2  );
+      }
+      else { 
+	std::stringstream s_;
+	s_ << "invalid value for nloops " << _nloops; 
+	throw grid::exception( s_.str() );
+      }
+
+      //      double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+      //      hvec.push_back( invNruns*Escale2*dsigma/deltaobs );
+
+    }
+
+
+    /// wait on convolution results and combine the values from the different igrids
+   
+    //    std::cout << "vconvolute:: nloops " << _nloops << std::endl; 
+
+    if ( _nloops==0 ) { 
+      /// LO only 
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigma  = 0;
+	if ( m_grids[0][iobs]->ready() ) dsigma  = m_grids[0][iobs]->xsec(); 
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+	//      printf("iobs %d   xsec raw: %lf   %lf \n",  iobs, dsigma, invNruns*EscaleSQ*dsigma/deltaobs );     
+      }    
+    }
+    else if ( _nloops==1 || ( _nloops==-1 && subproc()==-1 ) ) { 
+      /// NLO only or overall NLO only part
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigLO  = 0;
+	double dsigNLO = 0;
+	if ( m_grids[0][iobs]->ready() ) { 
+	  dsigLO  = m_grids[0][iobs]->xsec(); 
+	  dsigNLO = m_grids[0][iobs]->xsecNLO(); 
+	} 
+	if ( m_grids[1][iobs]->ready() ) dsigNLO += m_grids[1][iobs]->xsec(); 
+	double dsigma = dsigLO + dsigNLO;
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+	//      printf("iobs %d   xsec raw: %lf   %lf \n",  iobs, dsigma, invNruns*EscaleSQ*dsigma/deltaobs );     
+      }    
+    }
+    else if ( _nloops==-1 && subproc()!=-1 ) { 
+      /// NLO only suboprocess only
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigma = 0;
+	if ( m_grids[1][iobs]->ready() ) dsigma = m_grids[1][iobs]->xsec(); 
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+      }
+    }
+    else if ( _nloops==2 ) { 
+      /// NNLO only suboprocess only
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	// for ( int iobs=0 ; iobs<1 ; iobs++ ) {
+
+	double dsigLO  = 0;
+	if ( m_grids[0][iobs]->ready() ) dsigLO    = m_grids[0][iobs]->xsec(); 
+
+	double dsigNLO = 0;
+	if ( m_grids[0][iobs]->ready() ) dsigNLO   = m_grids[0][iobs]->xsecNLO(); 
+	if ( m_grids[1][iobs]->ready() ) dsigNLO  += m_grids[1][iobs]->xsec(); 
+
+	double dsigNNLO = 0;
+	if ( m_grids[0][iobs]->ready() ) dsigNNLO  = m_grids[0][iobs]->xsecNNLO(); 
+	if ( m_grids[1][iobs]->ready() ) dsigNNLO += m_grids[1][iobs]->xsecNLO(); 
+	if ( m_grids[2][iobs]->ready() ) dsigNNLO += m_grids[2][iobs]->xsec(); 
+
+#if 0
+	std::cout << "iobs: " << iobs << "  dsig: " << dsigLO << " " << dsigNLO << " " << dsigNNLO << std::endl;
+	std::cout << "          dsig: " << m_grids[0][iobs]->xsec() << " " << m_grids[1][iobs]->xsec() << " " << m_grids[2][iobs]->xsec() << std::endl;  
+	std::cout << "          dsig: " << m_grids[0][iobs]->xsec() << " " << m_grids[0][iobs]->xsecNLO() << " " << m_grids[0][iobs]->xsecNNLO() << " (LO)" << std::endl;  
+#endif
+	double dsigma = dsigLO + dsigNLO  + dsigNNLO;
+	// if ( mnobinwidth ) deltaobs = 1;
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+	// hvec[iobs] = invNruns*EscaleSQ*dsigma; /// don't divide by the bin width to compare with stupid fastnlo
+
+	//      printf("iobs %d   xsec raw: %lf   %lf \n",  iobs, dsigma, invNruns*EscaleSQ*dsigma/deltaobs );     
+      }    
+
+      ///      std::cout << "hvec: " << hvec.size() << " : " << hvec << "  (2)" << std::endl;
+
+    }
+    else if ( _nloops==-2 ) {
+      /// NNLO only
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigma = 0;
+	if ( m_grids[2][iobs]->ready() ) dsigma = m_grids[2][iobs]->xsec(); 
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	//  hvec.push_back( invNruns*EscaleSQ*dsigma/deltaobs );
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+      }
+    }
+
+    first = false;
+    
+
+  }
+  else if ( m_type==AMCATNLO ) {  
+
+    //    std::cout << "amc@NLO convolution " << _nloops << std::endl;
+
+    if      ( _nloops==0  )  label = "lo      ";
+    else if ( _nloops==1  )  label = "nlo     ";
+    else if ( _nloops==-1 )  label = "nlo only";
+    else if ( _nloops==-2 )  label = "nlo_w0  ";
+    else if ( _nloops==-3 )  label = "nlo_wR  ";
+    else if ( _nloops==-4 )  label = "nlo_wF  ";
+    
+    for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {  
+
+      if ( _nloops==0 ) {
+	/// this is the amcatnlo LO calculation (without FKS shower)
+	/// work out how to call from the igrid - maybe just implement additional 
+	m_grids[3][iobs]->amc_convolute( _pdf1, _pdf2, m_genpdf[3], alphas, m_leading_order,   0, rscale_factor, fscale_factor,  Escale1, Escale2 );
+      }
+      else if ( _nloops==1 || _nloops==-1 ) {
+	  /// this is the amcatnlo NLO calculation (without FKS shower)
+	  /// Next-to-leading order contribution
+
+	  // Scale independent contribution
+	m_grids[0][iobs]->amc_convolute( _pdf1, _pdf2, m_genpdf[0], alphas, m_leading_order+1, 0,  rscale_factor, fscale_factor,  Escale1, Escale2 );
+
+	  // Renormalization scale dependent contribution
+	  if ( rscale_factor!=1 ) { 
+	    m_grids[1][iobs]->amc_convolute( _pdf1, _pdf2, m_genpdf[1], alphas, m_leading_order+1, 0,  rscale_factor, fscale_factor,  Escale1, Escale2 );
+	  }
+
+	  // Factorization scale dependent contribution
+	  if ( fscale_factor!=1 ) { 
+	    m_grids[2][iobs]->amc_convolute( _pdf1, _pdf2, m_genpdf[2], alphas, m_leading_order+1, 0,  rscale_factor, fscale_factor,  Escale1, Escale2 );
+	  }
+      
+	  /// Add the LO contribution if we want full NLO 
+	  /// rather than specific NLO contribution only  
+	  if ( _nloops==1 ) { 
+	    /// this is the amcatnlo NLO calculation (without FKS shower)
+	    /// work out how to call from the igrid - maybe just implement additional 
+	    /// convolution routines and call them here
+	    m_grids[3][iobs]->amc_convolute( _pdf1, _pdf2, m_genpdf[3], alphas, m_leading_order,   0,  rscale_factor, fscale_factor,  Escale1, Escale2 );	
+	  }
+      }
+      else if ( _nloops==-2 ) { 
+        /// Only the convolution from the W0 grid
+	m_grids[0][iobs]->amc_convolute( _pdf1, _pdf2, m_genpdf[0], alphas, m_leading_order+1, 0,  rscale_factor, fscale_factor,  Escale1, Escale2 );
+      }
+      else if ( _nloops==-3 ) {
+	/// Only the convolution from the WR grid
+	m_grids[1][iobs]->amc_convolute( _pdf1, _pdf2, m_genpdf[1], alphas, m_leading_order+1, 0, rscale_factor, fscale_factor,  Escale1, Escale2 );
+      }
+      else if ( _nloops==-4 ) { 
+        /// Only the convolution from the WF grid
+	m_grids[2][iobs]->amc_convolute( _pdf1, _pdf2, m_genpdf[2], alphas, m_leading_order+1, 0,  rscale_factor, fscale_factor,  Escale1, Escale2 );
+      }
+      else { 
+	std::stringstream s_;
+	s_ << "invalid value for nloops " << _nloops; 
+	throw grid::exception( s_.str() );
+      }
+
+      //  double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+      //  hvec.push_back( invNruns*Escale2*dsigma/deltaobs );
+    }
+
+    /// wait on convolutions
+
+    if ( _nloops==0 ) { 
+      /// LO only 
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigma  = 0;
+	if ( m_grids[3][iobs]->ready() ) dsigma  = m_grids[3][iobs]->xsec(); 
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	// hvec.push_back( invNruns*EscaleSQ*dsigma/deltaobs );
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+	//      printf("iobs %d   xsec raw: %lf   %lf \n",  iobs, dsigma, invNruns*EscaleSQ*dsigma/deltaobs );     
+      }    
+    }
+    else if ( _nloops==1 || _nloops==-1 ) { 
+      /// NLO only or overall NLO only part
+	
+      double logF2 = 0;
+      double logR2 = 0;
+   
+      if ( rscale_factor!=1 ) logR2 = std::log(rscale_factor*rscale_factor);
+      if ( fscale_factor!=1 ) logF2 = std::log(fscale_factor*fscale_factor);
+     
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+
+	double dsig0 = 0;
+	double dsigF = 0;
+	double dsigR = 0;
+	double dsigB = 0;
+
+	if ( m_grids[0][iobs]->ready() ) dsig0  = m_grids[0][iobs]->xsec(); 
+	double dsigma = dsig0;
+
+	if ( _nloops==1 ) { 
+	  if ( m_grids[3][iobs]->ready() ) dsigB  = m_grids[3][iobs]->xsec(); 
+	  dsigma += dsigB;
+	}
+	
+	if ( rscale_factor!=1 ) { 
+	  if ( m_grids[1][iobs]->ready() ) dsigR  = m_grids[1][iobs]->xsec(); 
+	  dsigma += dsigR*logR2;
+	}
+
+	if ( fscale_factor!=1 ) { 
+	  if ( m_grids[2][iobs]->ready() ) dsigF  = m_grids[2][iobs]->xsec(); 
+	  dsigma += dsigF*logF2;
+	}
+
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	//	hvec.push_back( invNruns*EscaleSQ*dsigma/deltaobs );
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+	//      printf("iobs %d   xsec raw: %lf   %lf \n",  iobs, dsigma, invNruns*EscaleSQ*dsigma/deltaobs );     
+      }         
+    }
+    else if ( _nloops==-2) { 
+      /// NLO only suboprocess only
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigma = 0;
+	if ( m_grids[0][iobs]->ready() ) dsigma = m_grids[0][iobs]->xsec(); 
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	//	hvec.push_back( invNruns*EscaleSQ*dsigma/deltaobs );
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs; 
+      }
+    }
+    else if ( _nloops==-3) { 
+      /// NLO only suboprocess only
+
+      double logR2 = 0;
+      if ( rscale_factor!=1 ) logR2 = std::log(rscale_factor*rscale_factor);
+
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigma = 0;
+	if ( m_grids[1][iobs]->ready() ) dsigma = m_grids[1][iobs]->xsec(); 
+	dsigma  *= logR2;
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	// hvec.push_back( invNruns*EscaleSQ*dsigma/deltaobs );
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+      }
+    }
+    else if ( _nloops==-4) { 
+      /// NLO only suboprocess only
+
+      double logF2 = 0;
+      if ( fscale_factor!=1 ) logF2 = std::log(fscale_factor*fscale_factor);
+
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigma = 0;
+	if ( m_grids[2][iobs]->ready() ) dsigma = m_grids[2][iobs]->xsec(); 
+	dsigma  *= logF2;
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	// hvec.push_back( invNruns*EscaleSQ*dsigma/deltaobs );
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+      }
+    }
+
+  }   /// end amc@nlo
+  else if ( m_type == SHERPA ) { 
+    
+    //    std::cout << "sherpa convolution" << std::endl;
+
+    if      ( _nloops==0  ) label = "lo      ";
+    else if ( _nloops==1  ) label = "nlo     ";
+    else if ( _nloops==-1 ) label = "nlo only";
+
+    for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {  
+    
+      if ( _nloops==0 ) {
+	label = "lo      ";
+	// leading order cross section
+	m_grids[0][iobs]->convolute( _pdf1, _pdf2, m_genpdf[0], alphas, m_leading_order, 0, 1, 1, Escale1, Escale2);
+      }
+      else if ( _nloops==1 ) { 
+	label = "nlo     ";
+	// next to leading order cross section
+	// leading order contribution and scale dependent born dependent terms
+	// will eventually add the other nlo terms ...
+	m_grids[0][iobs]->convolute( _pdf1, _pdf2, m_genpdf[0], alphas, m_leading_order,   0, rscale_factor, fscale_factor, Escale1, Escale2 );
+	m_grids[1][iobs]->convolute( _pdf1, _pdf2, m_genpdf[1], alphas, m_leading_order+1, 0, rscale_factor, fscale_factor, Escale1, Escale2 );
+      }
+      else if ( _nloops==-1 ) { 
+	label = "nlo only";
+	// next to leading order cross section
+	// leading order contribution and scale dependent born dependent terms
+	m_grids[1][iobs]->convolute( _pdf1, _pdf2, m_genpdf[1], alphas, m_leading_order+1, 0, rscale_factor, fscale_factor, Escale1, Escale2 );
+      }
+
+    }
+
+    /// wait on convolutions
+
+    if    ( _nloops==0 ) { 
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigma = 0;
+	if ( m_grids[0][iobs]->ready() ) dsigma = m_grids[0][iobs]->xsec(); 
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	// hvec.push_back( invNruns*EscaleSQ*dsigma/deltaobs );
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs; 
+     }
+    }
+    else if ( _nloops==1 ) { 
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigLO = 0;
+	double dsigNLO = 0;
+	if ( m_grids[0][iobs]->ready() ) dsigLO  = m_grids[0][iobs]->xsec(); 
+	if ( m_grids[1][iobs]->ready() ) dsigNLO = m_grids[1][iobs]->xsec(); 
+	double dsigma  = dsigLO + dsigNLO;
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	//	hvec.push_back( invNruns*EscaleSQ*dsigma/deltaobs );
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+      }
+    }
+    else if ( _nloops==-1 ) { 
+      for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) {
+	double dsigma = 0;
+	if ( m_grids[1][iobs]->ready() ) dsigma = m_grids[1][iobs]->xsec(); 
+	double deltaobs = m_ref->GetBinLowEdge(iobs+2)-m_ref->GetBinLowEdge(iobs+1);      
+	//  hvec.push_back( invNruns*EscaleSQ*dsigma/deltaobs );
+	hvec[iobs] = invNruns*EscaleSQ*dsigma/deltaobs;
+      }
+    }
+
+
+  }  /// end SHERPA
+
+
+  /// now combine bins if required ...
+
+  std::vector<bool> applied(m_corrections.size(),false);
+
+  /// apply corrrections on the *uncombined* bins
+  if ( getApplyCorrections() ) applyCorrections(hvec, applied);
+  else { 
+    for ( unsigned i=m_corrections.size() ; i-- ; ) if ( getApplyCorrection(i) ) applied[i] = applyCorrection(i,hvec);
+  }
+
+
+  /// combine bins if required
+  if ( m_combine.size()!=0 ) { 
+    combineBins( hvec );
+
+    /// apply additional corrrections on the *combined* bins
+    /// NB: Only apply those that have not already been applied
+    if ( getApplyCorrections() ) applyCorrections(hvec, applied);
+    else { 
+      for ( unsigned i=m_corrections.size() ; i-- ; ) if ( getApplyCorrection(i) && !applied[i] ) applied[i] = applyCorrection(i,hvec);
+    }
+  }
+
+
+  /// check all corrections have been applied correctly
+  if ( getApplyCorrections() ) { 
+    unsigned appliedcorrections = 0;
+    for ( unsigned i=applied.size() ; i-- ; ) if ( applied[i] ) appliedcorrections++;
+    if ( appliedcorrections!=applied.size() ) throw grid::exception( "correction vector size does not match data "  ); 
+  }
+  else { 
+    unsigned Ncorrections = 0;
+    unsigned appliedcorrections = 0;
+    for ( unsigned i=m_corrections.size() ; i-- ; ) { 
+      if ( getApplyCorrection(i) ) { 
+	Ncorrections++;
+	if ( applied[i] ) appliedcorrections++;
+      }
+    }
+    if ( appliedcorrections!=Ncorrections ) throw grid::exception( "correction vector size does not match data "  ); 
+  }
+
+  //  double _ctime = appl_timer_stop(_ctimer);
+  //  std::cout << "grid::convolute() " << label << " convolution time=" << _ctime << " ms" << std::endl;
+
+  
+  cache1.stats();
+  if ( cache2.ncalls() ) cache2.stats();
+  
+  //  std::cout << "hvec: " << hvec.size() << " : " << hvec << "  (3)" << std::endl;
+
+
+  return hvec;
+}
+
+
+
+
+appl::TH1D* appl::grid::aconvolute(void (*pdf)(const double& , const double&, double* ), 
+			    double (*alphas)(const double& ), 
+			    int     _nloops, 
+			    double  rscale_factor,
+			    double  fscale_factor,
+			    double Escale )
+{
+  return aconvolute( pdf, 0, alphas, _nloops, rscale_factor, fscale_factor, Escale, Escale );
+}
+
+
+
+
+/// a dirty hack to tell the sub grid it should only 
+/// use a single subprocess
+
+std::vector<double> appl::grid::vconvolute_subproc(int subproc,
+						   void (*pdf)(const double& , const double&, double* ), 
+						   double (*alphas)(const double& ), 
+						   int     _nloops, 
+						   double  rscale_factor, double Escale ) 
+{ 
+  /// set the subprocess index - this is tested by the 
+  /// igrid convolution
+  m_subproc = subproc;
+  std::vector<double> xsec = vconvolute( pdf, 0, alphas, _nloops, rscale_factor, rscale_factor, Escale, Escale ); 
+  m_subproc = -1;
+
+  return xsec;
+
+}
+
+
+
+double appl::grid::vconvolute_bin(int bin,
+				  void (*pdf)(const double& , const double&, double* ), 
+				  double (*alphas)(const double&) ) {
+  /// set the subprocess index - this is tested by the 
+  /// igrid convolution
+  m_bin = bin;
+  std::vector<double> xsec = vconvolute( pdf, alphas );
+  m_bin = -1;
+  return xsec[bin];
+}
+
+
+
+
+
+appl::TH1D* appl::grid::aconvolute(void (*pdf1)(const double& , const double&, double* ), 
+			    void (*pdf2)(const double& , const double&, double* ), 
+			    double (*alphas)(const double& ), 
+			    int     _nloops, 
+			    double  rscale_factor,
+			    double  fscale_factor,
+			    double Escale1,
+			    double Escale2 ) 
+{
+  
+  if ( isDIS() ) pdf2 = 0;
+  
+  //  int nbins = m_ref->GetNbinsX();
+
+  appl::TH1D* h = 0;
+  if ( m_combine.size() ) { 
+    
+    //    nbins = m_combine.size();
+
+    std::vector<double> limits(m_combine.size()+1);
+    
+    int i=0;
+    limits[0] = m_ref->GetBinLowEdge(i+1);
+    for ( unsigned ib=0 ; ib<m_combine.size() ; ib++) { 
+      i += m_combine[ib]; 
+      limits[ib+1] = m_ref->GetBinLowEdge(i+1);
+    }
+
+    h = new appl::TH1D("xsec", "xsec", m_combine.size(), &limits[0] );
+    h->SetDirectory(0);
+  }
+  else { 
+    h = new appl::TH1D(*m_ref);
+    h->SetName("xsec");
+    h->SetDirectory(0);
+  }
+
+  std::vector<double> dvec = vconvolute( pdf1, pdf2, alphas, _nloops, rscale_factor, fscale_factor, Escale1, Escale2 );
+ 
+  for ( unsigned i=0 ; i<dvec.size() ; i++ ) { 
+      h->SetBinContent( i+1, dvec[i] );
+      h->SetBinError( i+1, 0 );
+
+      //      std::cout << "bin " << i << " " << h->GetBinContent( i+1 );
+      
+  }
+  
+  return h;
+  
+}
+
+
+
+
+
+appl::TH1D* appl::grid::aconvolute_subproc(int subproc,
+				    void (*pdf)(const double& , const double&, double* ), 
+				    double (*alphas)(const double& ), 
+				    int     _nloops, 
+				    double  rscale_factor, double Escale ) {
+  
+    appl::TH1D* h = new appl::TH1D(*m_ref);
+    h->SetName("xsec");
+    
+    std::vector<double> dvec = vconvolute_subproc( subproc, pdf, alphas, _nloops, rscale_factor, Escale );
+    
+    for ( unsigned i=0 ; i<dvec.size() ; i++ ) { 
+      h->SetBinContent( i+1, dvec[i] );
+      h->SetBinError( i+1, 0 );
+    }
+  
+    return h;
+  
+}
+
+
+
+
+
+void appl::grid::optimise(bool force, int extrabins ) {
+  if ( !force && m_optimised ) return;
+  m_optimised = true;
+  m_read = false;
+  std::cout << "grid::optimise() " << std::endl;
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ )  { 
+      m_grids[iorder][iobs]->optimise(extrabins);
+    }
+  }
+  m_ref->Reset();
+}
+
+
+
+void appl::grid::optimise(int NQ2, int Nx) {  optimise(NQ2, Nx, Nx);  }
+
+
+
+void appl::grid::optimise(int NQ2, int Nx1, int Nx2) {
+  m_optimised = true;
+  m_read = false;
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    for ( int iobs=0 ; iobs<Nobs_internal() ; iobs++ )  { 
+      std::cout << "grid::optimise() bin " << iobs << "\t";
+      m_grids[iorder][iobs]->optimise(NQ2, Nx1, Nx2);
+    }
+  }
+  m_ref->Reset();
+}
+
+
+
+
+// redefine the limits by hand
+void appl::grid::redefine(int iobs, int iorder,
+			  int NQ2, double Q2min, double Q2max, 
+			  int Nx,  double  xmin, double  xmax ) 
+{ 
+  
+  if  ( iorder>=m_order ) { 
+    std::cerr << "grid does not extend to this order" << std::endl;
+    return;
+  }
+  
+  if ( iobs<0 || iobs>=Nobs_internal() ) { 
+    std::cerr << "observable bin out of range" << std::endl;
+    return;
+  }
+  
+  if ( iorder==0 ) { 
+    std::cout << "grid::redefine() iobs=" << iobs 
+	      << "NQ2="  << NQ2 << "\tQmin=" << std::sqrt(Q2min) << "\tQmax=" << std::sqrt(Q2max) 
+	      << "\tNx=" << Nx  << "\txmin=" <<            xmin  << "\txmax=" <<            xmax << std::endl; 
+  }
+  
+  igrid* oldgrid = m_grids[iorder][iobs];
+  
+  //  m_grids[iorder][iobs]->redefine(NQ2, Q2min, Q2max, Nx, xmin, xmax);
+
+  m_grids[iorder][iobs] = new igrid(NQ2, Q2min, Q2max, oldgrid->tauorder(),
+				    Nx,  xmin,  xmax,  oldgrid->yorder(), 
+				    oldgrid->transform(), 
+				    oldgrid->qtransform(), 
+				    m_genpdf[iorder]->Nproc());
+
+  m_grids[iorder][iobs]->setparent( this ); 
+
+  delete oldgrid;
+}
+  
+
+
+void appl::grid::setBinRange(int ilower, int iupper, double xScaleFactor) { 
+  if ( ilower>=0 && iupper <Nobs_internal() ) {  
+    double lower = getReference()->GetBinLowEdge(ilower+1);
+    double upper = getReference()->GetBinLowEdge(iupper+2); 
+    setRange( lower, upper, xScaleFactor );
+  }
+}
+
+
+
+/// Fixme: need to fix this so that if the m_combine vector has values 
+///        then the range of this vector is also modified
+///        and then the combined reference recalculated:
+///        at the moment, if the range is changed, the combine vector
+///        needs to be recalculated and reset by hand
+
+void appl::grid::setRange(double lower, double upper, double xScaleFactor) { 
+  
+  std::cout << "grid::SetRange() " << lower << " " << upper << std::endl; 
+
+  std::vector<bool>   used;
+  std::vector<double> limits;
+  std::vector<double> contents;
+  std::vector<double> errors;
+
+  m_combine = std::vector<int>(0);
+
+  /// get the occupied bins
+  //  int Nbins = 0;
+  double last = 0;
+  for ( int i=1 ; i<=m_ref->GetNbinsX() ; i++ ) { 
+    double bin = m_ref->GetBinCenter(i);
+    if ( bin>lower && bin<upper ) { 
+      limits.push_back( m_ref->GetBinLowEdge(i) );
+      contents.push_back( m_ref->GetBinContent(i) );
+      errors.push_back( m_ref->GetBinError(i) );
+      //  std::cout << "keep bin " << i << " : " << m_ref->GetBinLowEdge(i) << " - " << m_ref->GetBinLowEdge(i+1) << std::endl;
+      last = m_ref->GetBinLowEdge(i+1);
+      used.push_back(true);
+    }
+    else { 
+      // std::cout << "skip bin " << i << " : " << m_ref->GetBinLowEdge(i) << " - " << m_ref->GetBinLowEdge(i+1) << std::endl;
+      used.push_back(false);
+    }
+  }
+
+  int firstbin = 0;
+  int lastbin  = 0;
+
+  for ( unsigned i=0 ; i<used.size() ; i++ ) { 
+    if ( used[i] ) { 
+      firstbin = i;
+      break;
+    }
+  }
+  
+  for ( unsigned i=used.size() ; i-- ; ) { 
+    if ( used[i] ) { 
+      lastbin = i;
+      break;
+    }
+  }
+
+  std::cout << "grid::SetRange() bins chosen " << firstbin << " - " << lastbin << std::endl;
+
+  std::cout << "before setrange: " << m_ref->GetNbinsX() << std::endl;
+
+
+  /// copy the range of the reference histogram
+  if ( limits.size()>0 ) limits.push_back( last );
+  else { 
+    throw grid::exception( "new range does not include any bins"  ); 
+  }
+
+  if ( xScaleFactor!=1 ) { 
+    for ( unsigned i=0 ; i<limits.size(); i++ ) limits[i] *= xScaleFactor;
+  }
+
+  /// delete combined reference if it exists
+
+  if ( m_ref_combined != m_ref ) delete m_ref_combined;
+  
+
+  /// save bins somewhere so can overwrite them 
+  appl::TH1D* h = m_ref;
+  
+  m_ref = new appl::TH1D( h->GetTitle(), h->GetName(), limits.size()-1, &(limits[0]) );
+  
+  for ( int i=0 ; i<m_ref->GetNbinsX() ; i++ ) { 
+    m_ref->SetBinContent( i+1, contents[i] );
+    m_ref->SetBinError( i+1, errors[i] );
+  }
+
+  std::cout << "after combine:  " << m_ref->GetNbinsX() << std::endl;
+
+  /// copy the igrids for the observable bins in the range 
+
+  std::vector<igrid*> grids[appl::MAXGRIDS];
+
+  /// save old grids - this is not a deep copy
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) grids[iorder] = m_grids[iorder];
+  
+  int _Nobs = m_ref->size();
+
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    m_grids[iorder] = std::vector<igrid*>(_Nobs);
+    int iobs = 0;
+    for ( int ig=0 ; ig<h->GetNbinsX() ; ig++ ) {
+      if ( used[ig] ) m_grids[iorder][iobs++] = grids[iorder][ig];
+      else            delete grids[iorder][ig];                           
+    }
+  }
+  
+  m_ref_combined = m_ref; 
+
+  delete h;
+
+}
+
+
+
+
+/// Fixme: need to fix this so that if the m_combine vector has values 
+///        then the range of this vector is also modified
+///        and then the combined reference recalculated:
+///        at the moment, if the range is changed, the combine vector
+///        needs to be recalculated and reset by hand
+
+
+void appl::grid::merge( int i, int j ) {
+  
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    std::cout << "order: " << iorder << std::endl;
+    
+    igrid* g0 = m_grids[iorder][i];
+
+    for ( int k=i+1; k<=j ; k++ ) {
+      std::cout << "add grid: " << k << std::endl; 
+      //    if ( m_ref->size()!=m_grids[iorder].size() ) matched = false;
+      igrid* g1 = m_grids[iorder][i+1];    
+      std::cout << "call igrid::merge()" << std::endl;
+      g0->merge( g1 );
+      m_grids[iorder].erase( m_grids[iorder].begin()+i+1 );
+    }
+  }
+  
+  std::cout << "merging reference..." << std::endl;
+  
+  for ( int k=i+1; k<=j ; k++ ) m_ref->merge_bins( i );
+
+}
+
+
+
+
+
+
+
+appl::igrid* newgrid( appl::igrid* g0, appl::igrid* g1 ) {
+  
+
+  g0->nodesQ2();
+  g1->nodesQ2();
+
+  std::cout << "newgrid() in" << std::endl;
+
+  int NQ2 = g0->getNQ2();
+ 
+  //  double q2min = std::min( g0->getQ2min(), g1->getQ2min() ); 
+  //  double q2max = std::max( g0->getQ2max(), g1->getQ2max() ); 
+
+  //  double _taumin = std::min( g0->taumin(), g1->taumin() ); 
+  //  double _taumax = std::max( g0->taumax(), g1->taumax() ); 
+  
+  
+  //   double dtau0 = g0->taumax() - g0->taumin(); 
+  //   double dtau1 = g1->taumax() - g1->taumin(); 
+  
+  //   if ( g0->taumin() < g1->taumin() ) { 
+  //   }
+  
+  int nt = (g1->taumax()-g0->taumin())/g0->deltatau() + 2; 
+  // double nt = g1->getNQ2();
+  
+
+  std::cout << "newgrid: " << g0->deltatau() << "  "
+	    << (g0->taumax()-g0->taumin())/g0->deltatau() << "  " << g0->getNQ2() << "  "     
+	    << (g0->taumax()-g0->taumin())/(g0->getNQ2()-1)   << std::endl;
+  
+  double _taumin = g0->taumin();
+  double _taumax = g0->taumin() + (nt-1)*g0->deltatau();  
+  // double _taumax = g0->taumin() + g0->getNQ2()*g0->deltatau();  
+  
+  double q2min = g1->fQ2( _taumin );
+  double q2max = g0->fQ2( _taumax );
+  
+  //  NQ2 = (_taumax - _taumin)/(g0->deltatau()-1);  
+  NQ2 = nt;
+
+  //  if ( delta1<delta0 ) delta = delta1;
+  
+  //  NQ2 = int( (std::log(q2max)-std::log(q2min))/delta + 0.99 );
+  
+  std::cout << "new Q2: " << NQ2 << "\t" << q2min << " " << q2max << std::endl;
+  
+  
+
+  int Nx1 = g0->getNx1();
+  
+  double x1min = g0->getx1min();
+  double x1max = g0->getx1max();
+  
+  {
+    double delta0 = (std::log(g0->getx1max()) - std::log(g0->getx1min()))/(g0->getNx1()-1); 
+    double delta1 = (std::log(g1->getx1max()) - std::log(g1->getx1min()))/(g1->getNx1()-1); 
+    
+    double delta = delta0;
+  
+    if ( delta1<delta0 ) delta = delta1;
+    
+    Nx1 = int( (std::log(x1max)-std::log(x1min))/delta + 1.5 );
+
+    std::cout << "new x1: " << Nx1 << std::endl;
+
+  }
+
+
+  int Nx2 = 0;
+
+  double x2min = std::min( g0->getx2min(), g1->getx2min() ); 
+  double x2max = std::max( g0->getx2max(), g1->getx2max() ); 
+
+  {
+    double delta0 = (std::log(g0->getx2max()) - std::log(g0->getx2min()))/(g0->getNx2()-1); 
+    double delta1 = (std::log(g1->getx2max()) - std::log(g1->getx2min()))/(g1->getNx2()-1); 
+    
+    double delta = delta0;
+    
+    if ( delta1<delta0 ) delta = delta1;
+    
+    Nx2 = int( (std::log(x2max)-std::log(x2min))/delta + 1.5 );
+
+    std::cout << "new x2: " << Nx2 << std::endl;
+
+  }
+
+
+  int Nx = Nx1;
+  double xmin = x1min;
+  double xmax = x1max;
+
+
+  if ( Nx2>Nx1 ) Nx = Nx1;
+  if ( x1min>x2min ) xmin = x2min;
+  if ( x1max<x2max ) xmax = x2max;
+
+  std::cout << "Q2 " << NQ2 << " " << q2min << " " <<  q2max << " " <<  g0->tauorder() << " " 
+	    << Nx << " " << xmin << " " <<  xmax << "  "
+	    << g0->yorder() << " " 
+	    << g0->transform() << " "  
+	    << g0->qtransform() << " "  
+	    << g0->SubProcesses() << " " << g0->isDISgrid() << std::endl;
+
+#if 0
+  appl::igrid* ig = new appl::igrid( NQ2, q2min, q2max, g0->tauorder(), 
+				     Nx, xmin, xmax, g0->yorder(), 
+				     g0->transform(), g0->qtransform(),
+				     g0->SubProcesses(), g0->isDISgrid() );
+#endif 
+
+  appl::igrid* ig = new appl::igrid( NQ2, q2min, q2max, 5, 
+				     Nx, xmin, xmax, 5, 
+				     g0->transform(), g0->qtransform(),
+				     g0->SubProcesses(), g0->isDISgrid() );
+
+  ig->nodesQ2();
+
+
+  std::cout << "new grid: " << ig << std::endl;
+
+  std::cout << "newgrid() out" << std::endl;
+
+
+  return ig;
+
+} 
+
+
+
+
+#if 0
+
+
+
+appl::igrid* newgrid( appl::igrid* g0, appl::igrid* g1 ) {
+  
+  std::cout << "newgrid() in" << std::endl;
+
+  int NQ2 = 0;
+ 
+  double q2min = std::min( g0->getQ2min(), g1->getQ2min() ); 
+  double q2max = std::max( g0->getQ2max(), g1->getQ2max() ); 
+
+  //  double taumin = std::min( g0->gettaumin(), g1->gettaumin() ); 
+  //  double taumax = std::max( g0->gettaumax(), g1->gettaumax() ); 
+  
+  {  
+    double delta0 = (std::log(g0->getQ2max()) - std::log(g0->getQ2min()))/g0->getNQ2(); 
+    double delta1 = (std::log(g1->getQ2max()) - std::log(g1->getQ2min()))/g1->getNQ2(); 
+    
+    double delta = delta0;
+    
+    if ( delta1<delta0 ) delta = delta1;
+    
+    NQ2 = int( (std::log(q2max)-std::log(q2min))/delta + 0.99 );
+
+    std::cout << "new Q2: " << NQ2 << std::endl;
+
+  }
+  
+
+  int Nx1 = 0;
+  
+  double x1min = std::min( g0->getx1min(), g1->getx1min() ); 
+  double x1max = std::max( g0->getx1max(), g1->getx1max() ); 
+  
+  {
+    double delta0 = (std::log(g0->getx1max()) - std::log(g0->getx1min()))/g0->getNx1(); 
+    double delta1 = (std::log(g1->getx1max()) - std::log(g1->getx1min()))/g1->getNx1(); 
+    
+    double delta = delta0;
+  
+    if ( delta1<delta0 ) delta = delta1;
+    
+    Nx1 = int( (std::log(x1max)-std::log(x1min))/delta + 0.99 );
+
+    std::cout << "new x1: " << Nx1 << std::endl;
+
+  }
+
+
+  int Nx2 = 0;
+
+  double x2min = std::min( g0->getx2min(), g1->getx2min() ); 
+  double x2max = std::max( g0->getx2max(), g1->getx2max() ); 
+
+  {
+    double delta0 = (std::log(g0->getx2max()) - std::log(g0->getx2min()))/g0->getNx2(); 
+    double delta1 = (std::log(g1->getx2max()) - std::log(g1->getx2min()))/g1->getNx2(); 
+    
+    double delta = delta0;
+    
+    if ( delta1<delta0 ) delta = delta1;
+    
+    Nx2 = int( (std::log(x2max)-std::log(x2min))/delta + 0.99 );
+
+    std::cout << "new x2: " << Nx2 << std::endl;
+
+  }
+
+
+  int Nx = Nx1;
+  double xmin = x1min;
+  double xmax = x1max;
+
+
+  if ( Nx2>Nx1 ) Nx = Nx1;
+  if ( x1min>x2min ) xmin = x2min;
+  if ( x1max<x2max ) xmax = x2max;
+
+  std::cout << "Q2 " << NQ2 << " " << q2min << " " <<  q2max << " " <<  g0->tauorder() << " " 
+	    << Nx << " " << xmin << " " <<  xmax << "  "
+	    << g0->yorder() << " " 
+	    << g0->transform() << " "  
+	    << g0->qtransform() << " "  
+	    << g0->SubProcesses() << " " << g0->isDISgrid() << std::endl;
+
+#if 0
+  appl::igrid* ig = new appl::igrid( NQ2, q2min, q2max, g0->tauorder(), 
+				     Nx, xmin, xmax, g0->yorder(), 
+				     g0->transform(), g0->qtransform(),
+				     g0->SubProcesses(), g0->isDISgrid() );
+#endif 
+
+  appl::igrid* ig = new appl::igrid( 2*NQ2, q2min, q2max, 5, 
+				     2*Nx, xmin, xmax, 5, 
+				     g0->transform(), g0->qtransform(),
+				     g0->SubProcesses(), g0->isDISgrid() );
+
+  std::cout << "new grid: " << ig << std::endl;
+
+  std::cout << "newgrid() out" << std::endl;
+
+
+  return ig;
+
+} 
+
+
+
+appl::igrid* limits( appl::igrid* g0, appl::igrid* g1 ) { 
+
+
+  double q2min = std::min( g0->getQ2min(), g1->getQ2min() ); 
+  double q2max = std::max( g0->getQ2max(), g1->getQ2max() ); 
+    
+  double x1min = std::min( g0->getx1min(), g1->getx1min() ); 
+  double x1max = std::max( g0->getx1max(), g1->getx1max() ); 
+  
+  double x2min = std::min( g0->getx2min(), g1->getx2min() ); 
+  double x2max = std::max( g0->getx2max(), g1->getx2max() ); 
+  
+
+  double nQ2 = newN( q2min, q2max,  = (std::log10(hi)-std::log10(lo))/N;
+  
+  std::cout << "delta: " << delta << std::endl;
+    
+
+
+
+    std::cout << "\t" << q2min << " " << q2max << "\t::\t" 
+	      << x1min << " " << x1max << " :: "  
+	      << x2min << " " << x2max << std::endl;  
+
+    std::cout << "delta: " << (std::log10(q2max)-std::log10(q2min))/g0->getNQ2() << std::endl;
+    std::cout << "delta: " << (std::log10(q2max)-std::log10(q2min))/delta << std::endl;
+
+    double ndelta = (std::log10(q2max)-std::log10(q2min))/delta;
+    
+    int ndel = int(ndelta+0.99);
+      
+    if ( ndel>2*g0->getNQ2() ) ndel = int( (ndel+1)*0.5 );
+
+
+}
+
+#endif
+
+
+
+void appl::grid::merge_bins( size_t i ) { 
+  
+  std::cout << "grid::merge_bins()`" << std::endl; 
+
+  int n = m_ref->size();
+
+  if ( n<2 ) return;
+
+  m_combine.clear();
+
+  std::cout << "ref::size() " << m_ref->size() << " " << m_ref_combined->size() << std::endl;
+
+  //  bool matched = true;
+
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+
+    //    if ( m_ref->size()!=m_grids[iorder].size() ) matched = false;
+
+    std::cout << "limits\n";
+
+    igrid* g0 = m_grids[iorder][i];
+    igrid* g1 = m_grids[iorder][i+1];    
+
+    std::cout << "g0: \t";
+    std::cout << g0->SubProcesses() << "\t::\t";  
+    std::cout << g0->getNQ2() << "\t";  
+    std::cout << g0->getQ2min() << "\t";  
+    std::cout << g0->getQ2max() << "\t::\t";  
+
+    std::cout << g0->getNx1() << "\t";  
+    std::cout << g0->getx1min() << "\t";  
+    std::cout << g0->getx1max() << "\t";  
+
+    std::cout << g0->getNx2() << "\t";  
+    std::cout << g0->getx2min() << "\t";  
+    std::cout << g0->getx2max() << "\n";  
+
+    std::cout << "g1: \t";
+    std::cout << g1->SubProcesses() << "\t::\t";  
+    std::cout << g1->getNQ2() << "\t";  
+    std::cout << g1->getQ2min() << "\t";  
+    std::cout << g1->getQ2max() << "\t::\t";  
+
+    std::cout << g1->getNx1() << "\t";  
+    std::cout << g1->getx1min() << "\t";  
+    std::cout << g1->getx1max() << "\t";  
+
+    std::cout << g1->getNx2() << "\t";  
+    std::cout << g1->getx2min() << "\t";  
+    std::cout << g1->getx2max() << "\n";  
+
+    double delta = (std::log10(g0->getQ2max())-std::log10(g0->getQ2min()))/g0->getNQ2();
+    
+    std::cout << "delta: " << delta << std::endl;
+    
+    double q2min = std::min( g0->getQ2min(), g1->getQ2min() ); 
+    double q2max = std::max( g0->getQ2max(), g1->getQ2max() ); 
+
+    double x1min = std::min( g0->getx1min(), g1->getx1min() ); 
+    double x1max = std::max( g0->getx1max(), g1->getx1max() ); 
+
+    double x2min = std::min( g0->getx2min(), g1->getx2min() ); 
+    double x2max = std::max( g0->getx2max(), g1->getx2max() ); 
+
+    std::cout << "ranges:\t" << q2min << " " << q2max << "\t::\t" 
+	      << x1min << " " << x1max << " :: "  
+	      << x2min << " " << x2max << std::endl;  
+
+    std::cout << "g0: " << *g0 << std::endl;
+    std::cout << "g1: " << *g1 << std::endl;
+
+#if 0
+    g0->nodesQ2();
+    g1->nodesQ2();
+
+    g0->nodesx1();
+    g1->nodesx1();
+
+    g0->nodesx2();
+    g1->nodesx2();
+#endif
+
+    appl::igrid* ng  = newgrid( g0, g1 ); 
+    appl::igrid* ng1 = newgrid( g0, g1 ); 
+    
+    std::cout << "new grid: \nng: " << *ng << std::endl;  
+
+    g0->remap( ng );
+    g1->remap( ng1 );
+
+    m_grids[iorder][i]   = ng;
+    m_grids[iorder][i+1] = ng1;
+    
+    // no serialise grid g0 and g1 onto the new gerid
+
+    delete g0; 
+    delete g1; 
+
+    // m_grids[iorder].erase( m_grids[iorder].begin()+i+1 );
+  }
+
+#if 0
+
+    return;
+
+    //    g0->add( g1 );
+    //    delete g1;
+    /// Fixme: should really get the superset of the x and Q2 ranges, 
+    ///        and then remap both grids on to that
+    g1->add( g0 );
+    delete g0;
+    m_grids[iorder][i] = g1;
+    m_grids[iorder].erase( m_grids[iorder].begin()+i+1 );
+  }
+#endif
+
+  //  std::cout << "merge_bins:: don't match - need to handle combinations" << std::endl;
+
+  //  m_ref->merge_bins( i );
+
+}
+
+
+
+
+
+/// methods to handle the documentation
+void appl::grid::setDocumentation(const std::string& s) { m_documentation = s; }
+void appl::grid::addDocumentation(const std::string& s) {   
+  if ( m_documentation.size() ) m_documentation += s;
+  else                          setDocumentation(s);    
+}
+
+
+
+void appl::grid::include_photon( bool b ) {
+  m_photon = b;
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    for ( int iobs=0 ; iobs<Nobs() ; iobs++ ) {
+      m_grids[iorder][iobs]->include_photon( b ); 
+    }
+  }
+}
+
+
+/// methods to handle bin-by-bin corrections
+
+/// add a correction as a std::vector
+void appl::grid::addCorrection( std::vector<double>& v, const std::string& label, bool ) {
+  //  std::cout << "addCorrections(vector) " << v.size() << " " << m_ref->GetNbinsX() << std::endl;
+  if ( v.size()==unsigned(m_ref->GetNbinsX()) || v.size()==unsigned(m_ref_combined->GetNbinsX()) ) {
+    m_corrections.push_back( *getReference());
+    m_corrections.back().name() = label;
+    m_corrections.back().set( v );
+    m_correctionLabels.push_back(label);
+    m_applyCorrection.push_back(false);
+    ///  std::cout << "appl::grid::addCorrection(vector) now " << m_corrections.size() << " corrections" << std::endl;
+  }
+}
+
+
+/// add a correction by histogram
+void appl::grid::addCorrection(appl::TH1D* h, const std::string& label, double scale, bool ) {
+  
+  appl::TH1D* hobs = 0;
+  
+  if      ( h->GetNbinsX()==m_ref->GetNbinsX() )          hobs = m_ref;
+  else if ( h->GetNbinsX()==m_ref_combined->GetNbinsX() ) hobs = m_ref_combined;
+
+
+  if ( hobs ) { 
+    for ( int i=1 ; i<=h->GetNbinsX()+1 ; i++ ) { 
+      if ( std::fabs(h->GetBinLowEdge(i+1)*scale-hobs->GetBinLowEdge(i+1))>1e-10 ) { 
+	std::cerr << "bins " << h->GetBinLowEdge(i+1) << " " << hobs->GetBinLowEdge(i+1) << std::endl; 
+	std::cerr << "grid::addCorrection(appl::TH1D* h): bin mismatch, not adding correction" << std::endl;
+	return;
+      }
+    }
+
+    std::vector<double> v(h->GetNbinsX());
+    for ( int i=0 ; i<h->GetNbinsX() ; i++ ) v[i] = h->GetBinContent(i+1);
+    if ( label=="" ) addCorrection(v, h->GetName());
+    else             addCorrection(v, label);
+  }
+  else { 
+    std::cerr << "grid::addCorrection(appl::TH1D* h): bin mismatch, not adding correction" << std::endl;
+  }
+  
+}
+
+
+
+// find the number of words used for storage
+int appl::grid::size() const { 
+    int _size = 0;
+    for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+      for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) _size += m_grids[iorder][iobs]->size();
+    }
+    return _size;
+}
+
+
+/// apply corrections to a std::vector
+void appl::grid::applyCorrections(std::vector<double>& v, std::vector<bool>& applied) {
+ 
+  if ( applied.size()!=m_corrections.size() ) throw grid::exception( "wrong number of corrections expected" ); 
+ 
+  for ( unsigned i=m_corrections.size() ; i-- ; ) { 
+ 
+    const std::vector<double>& corry = m_corrections[i].y();
+    
+    if ( applied[i] || v.size()!=corry.size() ) continue; /// correction applied already or wrong size     
+
+    for ( unsigned j=v.size() ; j-- ; ) v[j] *= corry[j];
+    applied[i] = true;
+  }
+  //  std::cout << "grid::applyCorrections(vector) done" << std::endl;
+}
+
+
+
+
+/// apply correction to a std::vector
+bool appl::grid::applyCorrection(unsigned i, std::vector<double>& v) {
+
+  if ( i>=m_corrections.size() ) throw grid::exception( "correction index out of range"  ); 
+ 
+  const std::vector<double>& correction = m_corrections[i].y();
+
+  if ( v.size()!=correction.size() ) return false; /// wrong size
+
+  for ( unsigned k=v.size() ; k-- ; ) v[k] *= correction[k];
+  return true;
+}
+
+
+
+
+
+/// do a deep comparison of all the different sub processes - if any 
+/// are the same, then remove them
+void appl::grid::shrink(const std::string& name, int ckmcharge) { 
+
+  std::cout << "appl::grid::shrink()" << std::endl;
+
+  std::string label[3] = { "LO", "NLO", "NNLO" };
+
+  std::string genpdfname="";
+
+  std::vector<std::vector<int> > keep(m_order);
+
+  bool found = false;
+
+
+  /// loop over orders 
+  for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+
+    std::cout << "appl::grid::shrink() order " << iorder << std::endl;
+    
+    std::vector< std::vector<int> > pdf_combinations;
+    pdf_combinations.reserve( Nobs_internal() );
+    
+    //    std::cout << "appl::grid::shrink() observable bins " << Nobs_internal() << std::endl;
+
+    /// ... for each observable bin ...
+ 
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) { 
+      
+      //      std::cout << "shrink() order: " << iorder << "\t obs: " << iobs;
+      
+      igrid* ig = m_grids[iorder][iobs];
+    
+      std::map< int, std::vector<int> > same;
+
+      std::set<int> duplicates;
+
+      std::set<int> empty;
+
+      for ( int i=0 ; i<ig->SubProcesses() ; i++ ) { 
+
+	if ( duplicates.find(i)!=duplicates.end() ) continue;
+
+	int isize = ig->weightgrid(i)->size();
+
+	if ( isize==0 ) { 
+	  empty.insert(i); 
+	  continue;
+	}
+
+	//	std::cout << i << " : ";
+
+	keep[iorder].push_back( i );
+
+	std::vector<int> vec; 
+	
+	found = true;
+
+	for ( int j=i+1 ; j<ig->SubProcesses() ; j++ ) {
+
+	  if ( duplicates.find(j)!=duplicates.end() ) continue;
+	  if ( empty.find(j)!=empty.end() ) continue;
+
+	  int jsize = ig->weightgrid(j)->size();
+	  
+	  if ( jsize==0 ) { 
+	    empty.insert(j); 
+	    continue;
+	  }
+	  
+	  // std::cout << "\t" << j << ":" << jsize;
+	   
+	  if ( (*ig->weightgrid(i)) == (*ig->weightgrid(j)) ) { 
+	    //  std::cout << "!"; 
+	    vec.push_back(j);
+	    duplicates.insert(j);
+	  }
+
+	}
+	//	std::cout << std::endl;
+
+	same.insert( std::map< int, std::vector<int> >::value_type( i, vec ) );
+
+      }
+
+
+      if ( same.empty() ) continue;
+
+      //      for ( int ik=0 ; ik<m_order ; ik++ ) std::cout << m_genpdf[iorder]->name() << std::endl;
+
+      lumi_pdf*  _pdf = 0;
+      if ( m_genpdf[iorder]->name().find(".config")!=std::string::npos ){ 
+	_pdf = dynamic_cast<lumi_pdf*>(m_genpdf[iorder]);
+	if ( _pdf ) std::cout << "read lumi pdf: " << *_pdf << std::endl; 
+      }
+      else { 
+	std::cerr << __FUNCTION__ << "\tpdf not found:" << m_genpdf[iorder]->name() << std::endl;
+	return;
+      }
+
+      std::vector<combination> combinations;
+
+      int i=0;
+
+      std::map< int, std::vector<int> >::iterator itr  = same.begin();
+      std::map< int, std::vector<int> >::iterator iend = same.end();
+
+      while ( itr!=iend ) { 
+
+	std::vector<int>& v = itr->second;
+	//	std::cout << "\t" << i++ << "\tproc: " << itr->first << ":" << sizeitr->second << "\t" << itr->second << " ( " << (*_pdf)[itr->first] << " )" << std::endl;
+	//	std::cout << "\t" << i << "\tproc: " << itr->first << ":" << sizeitr->second << "\t" << (*_pdf)[itr->first] << std::endl;
+	//	for ( unsigned iv=0 ; iv<v.size() ; iv++ ) std::cout << "\t\t\t" << v[iv] << "\t" << (*_pdf)[v[iv]] << std::endl;
+
+	std::vector<int> c(2);
+	c[0] = i;
+	c[1] = 0;
+
+	const combination& comb = (*_pdf)[itr->first]; 
+
+	for ( unsigned ic=0 ; ic<comb.size() ; ic++ ) { 
+	  c[1]++;
+	  c.push_back( comb[ic].first );
+	  c.push_back( comb[ic].second );
+	}
+
+	for ( unsigned iv=0 ; iv<v.size() ; iv++ ) { 
+
+	  const combination& comb = (*_pdf)[v[iv]]; 
+	  
+	  for ( unsigned ic=0 ; ic<comb.size() ; ic++ ) { 
+	    c[1]++;
+	    c.push_back( comb[ic].first );
+	    c.push_back( comb[ic].second );
+	  }
+	}
+
+	combinations.push_back( combination( c ) );
+
+	i++;
+	itr++;
+      }
+
+      //      std::set< int>::iterator eitr  = empty.begin();
+      //      std::set< int>::iterator eend  = empty.end();
+
+      //      std::cout << "\tempty: " << empty.size() << " sub-processes";
+      //      while ( eitr!=eend ) std::cout << " " << (*eitr++);
+      //      std::cout << std::endl; 
+     
+      lumi_pdf newpdf( "newpdf.config", combinations, 0 );
+
+      pdf_combinations.push_back( newpdf.serialise() );
+      
+      //      std::cout << newpdf << std::endl;
+
+      if ( found ) break;
+
+    }
+
+    bool common = true;
+    for ( unsigned ipdf=1 ; ipdf<pdf_combinations.size() ; ipdf++ ) { 
+    
+      if ( pdf_combinations[ipdf].size()>0 ) {
+	if ( pdf_combinations[ipdf]!=pdf_combinations[ipdf-1] ) { 
+	  std::cout << "pdfs " << ipdf << " and " << ipdf-1 << " don't match" << std::endl; 
+	  // << lumi_pdf("duff.config", pdf_combinations[ipdf]) 
+	  common = false;
+	}  
+      } 
+    }      
+    
+    /// create the actual lumi_pdfs from these combinations
+
+    if ( common && pdf_combinations.size()>0 ) { 
+      pdf_combinations[0].push_back(ckmcharge);
+
+      lumi_pdf lpdf( name+label[iorder]+".config",   pdf_combinations[0]);
+
+      std::cout << lpdf.name() << std::endl;
+
+      lpdf.write( lpdf.name() );
+
+      if ( genpdfname.size() ) genpdfname += ":";
+      genpdfname += name+label[iorder]+".config";
+    }  
+  }
+
+  std::cout << "appl::grid::shrink() genpdfname " << genpdfname << std::endl;
+
+  //  if ( addpdf(m_genpdfname) )  findgenpdf( m_genpdfname );
+
+  /// horray!! here we have the optimised pdf combinations written out, so now we 
+  /// to delete the duplicated (or empty) grids ...
+
+  /// loop over the igrids, telling each grid which processes to keep
+  
+  for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) { 
+    for( int iorder=0 ; iorder<m_order ; iorder++ ) {
+      //      std::cout << "appl::grid::shrink()  obs " << iobs << "\torder " << iorder << std::endl;       
+      m_grids[iorder][iobs]->shrink( keep[iorder] );
+    }
+  }
+
+  /// ... and need to replace the genpdf with these new ones
+  
+  m_genpdfname = genpdfname;
+  addpdf( m_genpdfname );
+  findgenpdf( genpdfname );
+
+  std::cout << "appl::grid::shrink()  new " 
+	    << "\t" << m_genpdf[0]->name() << ":" << m_genpdf[0]->size() 
+	    << "\t" << m_genpdf[1]->name() << ":" << m_genpdf[1]->size() 
+	    <<  std::endl;
+
+}
+
+
+
+
+
+
+
+/// do a deep comparison of all the different sub processes - if any 
+/// are the same, then remove them
+void appl::grid::fixbroken(const std::string& name ) { 
+
+  std::string label[3] = { "LO", "NLO", "NNLO" };
+
+  std::string genpdfname="";
+
+  std::cout << "appl::grid::fixbroken(): " << name << std::endl; 
+
+  std::vector<std::vector<int> > keep(m_order);
+
+
+  const lumi_pdf* _pdf[3];
+
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+   
+    const lumi_pdf*& pdf = _pdf[iorder];
+
+    pdf = dynamic_cast<const lumi_pdf*>( genpdf(iorder) );
+
+    //    std::cout << "\tdetermined duplicate sets :  order: " << iorder << "\tnum lumi: " << pdf->size() << std::endl;
+        
+    std::vector<combination> c(pdf->size());
+    std::vector<double>      sums(pdf->size());
+
+    for ( unsigned ic=0 ; ic<pdf->size() ; ic++ ) c[ic] = pdf->at(ic);  
+
+    std::vector<unsigned> togo;
+
+    for ( unsigned ic=0 ; ic<pdf->size() ; ic++ ) {
+      c[ic] = pdf->at(ic);  
+
+      combination& cc = c[ic];
+
+      std::vector<unsigned> to(cc.size(),99999);
+
+      if ( cc.size()>1 ) { 
+	std::cout << "combination: " << cc;
+	
+	unsigned copy_count = 0;
+ 
+	for ( unsigned i=0 ; i<cc.size() ; i++ ) {
+	  for ( unsigned ic0=ic+1 ; ic0<pdf->size() ; ic0++ ) {
+	    if ( c[ic0].size()<2 && c[ic0].contains( cc[i] ) ) { 
+	      std::cout << "\n\t adding " << cc[i] << " from " << ic << " to combination " << ic0 << " " << c[ic0];
+	      copy_count++;
+	      to[i] = ic0;
+	      break;
+	    }
+	  }
+	}
+	
+	bool keep_combination = true;
+	if ( copy_count == cc.size() ) keep_combination = false;
+
+	if ( !keep_combination ) std::cout << "\twill delete this combination";
+	std::cout << std::endl;
+
+	if ( !keep_combination ) { 
+	  togo.push_back(ic);
+	  for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) { 	    
+	    for ( unsigned ti=0 ; ti<to.size() ; ti++ ) { 
+	      if ( to[ti]!=99999 ) m_grids[iorder][iobs]->combine_proc( to[ti], ic ); 
+	    }
+	  }
+	}
+
+      } // if ( cc.size()>1 )
+
+    } // loop over parton luminosities
+
+
+    /// work out which parton luminosities are actually duplicates
+
+    std::vector< std::vector<int> > realc( pdf->size(), std::vector<int>(0) );
+
+    std::vector<bool> duplicate( pdf->size(), false );
+
+    for ( unsigned i=0 ; i<pdf->size() ; i++ ) { 
+
+      bool ok=true;
+      for ( unsigned ik=togo.size() ; ik-- ; ) if ( i==togo[ik] ) ok=false;
+      if ( !ok ) continue;
+
+      if ( duplicate[i] ) continue;
+      
+      for ( unsigned j=i+1 ; j<pdf->size() ; j++ ) {
+	if ( i==j ) continue;
+	
+        if ( c[i] == c[j] ) { 
+	  std::cout << "combination " << i << " " << j << " are identical" 
+		    << "\t\tsums: " << sums[i] << "  " << sums[j] << "\t( " << (sums[i]-sums[j]) << " )" << std::endl; 
+	  ok = false;
+	  duplicate[j] = true;
+	  realc[i].push_back(j);
+	}
+      }
+    }
+
+
+    std::vector<combination> actual_combinations;
+    std::vector<int> keep;
+
+    for ( unsigned i=0 ; i<pdf->size() ; i++ ) { 
+
+      bool ok=true;
+      for ( unsigned ik=togo.size() ; ik-- ; ) if ( i==togo[ik] ) ok=false;
+      if ( !ok ) continue;
+
+      if ( !duplicate[i] ) { 
+	actual_combinations.push_back(c[i]);
+	keep.push_back(i);
+      }
+    }  
+
+
+    /// label for the new lumi_pdf
+
+    std::string newconfig = label[iorder]+"-"+pdf->name();
+
+    lumi_pdf lpdf( newconfig, actual_combinations, pdf->getckmcharge() ); 
+
+    lpdf.write( lpdf.name() );
+
+    if ( genpdfname.size() ) genpdfname += ":";
+    genpdfname += newconfig;
+
+    /// actually combine the grids   
+
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) { 
+   
+      for ( unsigned ti=0 ; ti<keep.size() ; ti++ ) { 
+
+	int i = keep[ti];
+
+	for ( unsigned tj=0 ; tj<realc[i].size() ; tj++ ) { 
+	  
+	  int j = realc[i][tj];
+
+	  //	  std::cout << "\t\t combine " << i << " " << j << "\t\t" 
+	  //	    <<  (*m_grids[iorder][iobs]->weightgrid(i))(5,5,5) << " + " 
+	  //	    <<  (*m_grids[iorder][iobs]->weightgrid(j))(5,5,5) << std::endl;
+
+	  m_grids[iorder][iobs]->combine_proc( i, j ); 
+
+	  //	  std::cout << "\t\t combine " << i << " " << j << "\t\t" <<  (*m_grids[iorder][iobs]->weightgrid(i))(5,5,5) << std::endl;
+
+	}
+      }
+     
+
+      m_grids[iorder][iobs]->shrink( keep ); 
+
+    }
+  }
+    
+ 
+  m_genpdfname = genpdfname;
+  addpdf( m_genpdfname );
+  findgenpdf( m_genpdfname );
+
+  std::cout << "appl::grid::fixbroken()  new " 
+	    << "\t" << m_genpdf[0]->name() << ":" << m_genpdf[0]->size(); 
+  if (m_order>1) std::cout << "\t" << m_genpdf[1]->name() << ":" << m_genpdf[1]->size();
+  if (m_order>2) std::cout << "\t" << m_genpdf[2]->name() << ":" << m_genpdf[2]->size();
+  std::cout <<  std::endl;
+
+}
+
+
+
+
+/// add the weights from a src subprocess to some number of other 
+/// destination processes and delete the original
+
+void appl::grid::move( int iorder, int isrc, std::vector<int>& idest ) { 
+
+  lumi_pdf* pdf = dynamic_cast<lumi_pdf*>( genpdf(iorder) );
+  
+  if ( size_t(isrc)>=pdf->size() ) return; 
+
+  int ic = isrc;
+
+  std::cout << "appl::grid::move(): " << std::endl;
+
+  bool ok = false;
+  for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) { 	    
+    for ( size_t it=0 ; it<idest.size() ; it++ ) { 
+      m_grids[iorder][iobs]->combine_proc( idest[it], ic ); 
+      ok = true;
+    }
+    if ( ok ) m_grids[iorder][iobs]->remove( ic );
+  }
+
+  pdf->remove( ic );
+
+  std::cout << "appl::grid::move() "; 
+  for ( int i=0 ; i<m_order ; i++ ) std::cout  << "\t" << m_genpdf[i]->name() << ":" << m_genpdf[i]->size(); 
+  std::cout << std::endl;
+
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+void appl::grid::combineReference(bool force) { 
+  //  std::cout << "grid::combineReference() " << m_ref->GetNbinsX();
+
+  if ( m_combine.empty() ) return;
+
+  if ( force ) { 
+    if ( m_ref_combined ) { 
+      if ( m_ref_combined!=m_ref ) delete m_ref_combined;
+      m_ref_combined = 0;
+    }
+  }
+
+  //  std::cout << "\tcombine " << m_ref_combined << " " << m_ref << std::endl;; 
+
+  if ( m_ref_combined && m_ref_combined!=m_ref) return; 
+
+  std::vector<double> hvec(  m_ref->GetNbinsX(), 0 );
+  std::vector<double> hvece( m_ref->GetNbinsX(), 0 );
+
+
+  for ( int i=m_ref->GetNbinsX() ; i-- ; )  { 
+    hvec[i]  = m_ref->GetBinContent( i+1 );
+    hvece[i] = m_ref->GetBinError( i+1 );
+  }
+
+  combineBins( hvec );
+  combineBins( hvece, 2 );
+
+  std::vector<double> limits(m_combine.size()+1);
+  
+  int i=0;
+  limits[0] = m_ref->GetBinLowEdge(i+1);
+  for ( unsigned ib=0 ; ib<m_combine.size() ; ib++) { 
+    i += m_combine[ib]; 
+    limits[ib+1] = m_ref->GetBinLowEdge(i+1);
+  }
+  
+  /// need to make this a class variable set to 0 so we don't 
+  /// recalculate this every time, only if m_combine changes 
+  appl::TH1D* h = new appl::TH1D("reference", "xsec", m_combine.size(), &limits[0] );
+  h->SetDirectory(0);
+
+  for ( unsigned i=0 ; i<hvec.size() ; i++ ) { 
+    h->SetBinContent( i+1, hvec[i] );
+    h->SetBinError( i+1, hvece[i] );
+  }
+
+  m_ref_combined = h;
+
+  //  std::cout << " -> " << m_ref->GetNbinsX() << std::endl;
+  
+}
+
+
+
+void appl::grid::combineBins(std::vector<double>& hvec, int power ) const { 
+  
+  /// now combine bins if required ...
+  
+  if ( m_combine.size()!=0 ) { 
+    /// need to go through, scaling by bin width, adding and then dividing by bin width again
+    /// in the appl::TH1D* version, will need to recalculate the bin limits to create the new histogram
+    
+    std::vector<double> _hvec(m_combine.size(),0);
+    
+    unsigned nbins = 0;
+
+    unsigned i=0;
+
+    for ( unsigned ic=0 ; ic<m_combine.size() ; ic++ ) { 
+
+      nbins += m_combine[ic];
+
+      if ( nbins>hvec.size() ) throw grid::exception( "too many bins specified for rebinning"  ); 
+
+      double sigma = 0;
+      double width = 0;
+
+      for ( int ib=0 ; ib<m_combine[ic] && i<hvec.size() ; ib++, i++ ) { 
+	double deltaobs = m_ref->GetBinLowEdge(i+2)-m_ref->GetBinLowEdge(i+1);
+	if ( power==1 ) sigma +=  hvec[i]*deltaobs;
+	if ( power==2 ) sigma += (hvec[i]*deltaobs*hvec[i]*deltaobs);
+	width += deltaobs;
+      }
+
+      if ( power==1 ) _hvec[ic] = sigma/width;
+      if ( power==2 ) _hvec[ic] = std::sqrt(sigma)/width;
+    }
+
+    hvec = _hvec;
+  }
+}
+
+
+std::ostream& operator<<(std::ostream& s, const appl::grid& g) {
+  
+  s << "==================================================" << std::endl;
+  
+  //  s << "appl::grid version " << g.version() << "\t(" << g.subProcesses(0) << " initial states, " << g.Nobs_internal() << " observable bins)" << std::endl;
+
+  std::string basis[5] = {  "-LO, ",  "-NLO, ",  "-NNLO, ", "-Xtra0", "-Xtra1" };  
+  std::string order[appl::MAXGRIDS];
+  for ( int i=0 ; i<appl::MAXGRIDS ; i++ ) { 
+    if ( i<5) order[i] = basis[i];
+    else      order[i] = "-Unknown";
+  }
+
+  s << "appl::grid version " << g.version() << "\t( "; 
+
+  for ( int i=0 ; i<g.nloops()+1 ; i++ ) s << g.subProcesses(i) << order[i];
+  
+  s << "initial states, " << g.Nobs_internal() << " observable bins )" << std::endl;
+  
+  if ( g.isOptimised() ) s << "Optimised grid" << std::endl;
+  
+  if ( g.isSymmetric() ) s << "Symmetrised in x1, x2" << std::endl;
+  else                   s << "Unsymmetrised in x1, x2" << std::endl;
+  
+  if ( g.getNormalised() ) s << "Normalised " << std::endl;
+  
+  s << "leading order of processes  "  << g.leadingOrder() << std::endl;
+  s << "number of loops for grid    "  << g.nloops() << std::endl;   
+  s << "x->y coordinate transform:  "  << g.getTransform() << std::endl;
+
+  s << "genpdf in use: " << g.getGenpdf() << std::endl;
+  s << "--------------------------------------------------" << std::endl;
+  s << "Observable binning: [ " << g.Nobs_internal() 
+    << " bins : " << g.obsmin() << ",  " << g.obsmax() << " ]" << std::endl;
+
+  //  for( int iorder=0 ; iorder<g.nloops()+1 ; iorder++ ) {
+  for( int iorder=0 ; iorder<g.ninternalgrids() ; iorder++ ) {
+    s << "order: " << iorder << "\n";
+    for( int iobs=0 ; iobs<g.Nobs_internal() ; iobs++ ) {
+      s << "  " 
+	<< iobs << "\t" 
+	<< std::setprecision(5) << std::setw(5) << g.getReference()->GetBinLowEdge(iobs+1) << "\t- " 
+	<< std::setprecision(5) << std::setw(5) << g.getReference()->GetBinLowEdge(iobs+2) << "\t"; 
+      s << "   " << *(g.weightgrid(iorder,iobs)) << std::endl;
+    }
+  }
+
+  s << std::endl;
+  
+  return s;
+}
+
+
+void appl::grid::replaceBin( int iobs, grid& g ) { 
+
+  std::cout << "replace bin " << iobs << std::endl;
+ 
+  for ( int iorder=0 ; iorder<m_order ; iorder++ ) { 
+    add_igrid( iobs, iorder, g.m_grids[iorder][iobs] );
+  }
+
+#if 0
+  int iorder = 2;
+  //  for ( int iorder=0 ; iorder<m_order ; iorder++ ) 
+  { 
+    std::cout << "order: " << iorder << std::endl;
+    
+    std::cout <<  m_grids[iorder][iobs] << " " << g.m_grids[iorder][iobs] << std::endl;
+
+    igrid* ig = m_grids[iorder][iobs];
+
+    igrid* hig = new igrid( *ig );
+
+    //    m_grids[iorder][iobs] = 
+    //    m_grids[iorder][iobs]->setparent( this ); 
+  }
+#endif
+
+  std::cout << "fixing reference: " << iobs << std::endl;
+  getReference_internal()->SetBinContent(iobs+1, g.getReference_internal()->GetBinContent(iobs+1) );
+  getReference_internal()->SetBinError(iobs+1, g.getReference_internal()->GetBinError(iobs+1) );
+
+  /// grrr, is this correct - why, why, why, why why oh why can't we 
+  /// just use the correct binning in the first place !!!
+  combineReference(true); 
+}
+
+
+std::vector<correction> appl::grid::corrections() const { 
+  std::vector<correction> corr;
+  for ( size_t i=0 ; i<m_corrections.size() ; i++ ) {
+    const appl::TH1D& h = m_corrections[i];
+    corr.push_back( correction( h.y(), h.name() ) );
+  } 
+  return corr;
+}
+
+
+const appl::TH1D* appl::grid::getReference( int i ) const {
+  if ( i>nloops() ) throw exception( std::string("getReference() index out of range: ") + std::to_string(i)  );
+  return &m_refstore.m_ref_order[i]; 
+}
+
+appl::TH1D* appl::grid::getReference( int i ) { 
+  if ( i>nloops() ) throw exception( std::string("getReference() index out of range: ") + std::to_string(i)   );
+  return &m_refstore.m_ref_order[i];  
+}
+
+
+void appl::grid::setReference( const appl::TH1D* h, int i ) { 
+  if ( i>nloops() ) throw exception( std::string("getReference() index out of range: ") + std::to_string(i)   );
+  m_refstore.m_ref_order[i] = *h;
+}
+
+
+
+void appl::grid::clear() { 
+  for( int i=0 ; i<m_order ; i++ ) {
+    for( int iobs=0 ; iobs<Nobs_internal() ; iobs++ ) m_grids[i][iobs]->clear(); 
+  }
+  m_ref->clear();
+  m_ref_combined->clear();
+}
+
+
+static bool  no_threads = duff();
Index: applgrid/tags/applgrid-01-06-36/src/Splitting.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/Splitting.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/Splitting.h	(revision 2010)
@@ -0,0 +1,31 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    Splitting.h
+ **
+ **     @author  mark sutton
+ **     @date    Tue 24 Mar 2015 14:21:48 CET 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: Splitting.h, v0.0   Tue 24 Mar 2015 14:21:48 CET sutt $
+ **
+ **/
+
+
+#ifndef  SPLITTING_H
+#define  SPLITTING_H
+
+void Splitting(const double& x, const double& Q, double* f, int nLoops=1);
+
+
+#endif  // SPLITTING_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/mcfmwjet_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmwjet_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmwjet_pdf.cxx	(revision 2010)
@@ -0,0 +1,38 @@
+/**
+ **     @file    mcfmwjet_pdf.cxx
+ **
+ **     @brief   pdf transform functions                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: mcfmw_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "mcfmwjet_pdf.h"
+
+// fortran callable wrapper
+
+extern "C" void fmcfmwpjet_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmwpjet_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+// fortran callable wrapper
+
+extern "C" void fmcfmwmjet_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmwmjet_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/mcfmwc_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmwc_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmwc_pdf.cxx	(revision 2010)
@@ -0,0 +1,22 @@
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "mcfmwc_pdf.h"
+
+
+// fortran callable wrapper
+
+extern "C" void fmcfmwpc_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmwpc_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+extern "C" void fmcfmwmc_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmwmc_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/integral.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/integral.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/integral.cxx	(revision 2010)
@@ -0,0 +1,48 @@
+/**
+ **     @file    integral.cxx
+ **
+ **     @author  mark sutton
+ **     @date    Mon 24 Mar 2014 13:16:48 CET 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: integral.cxx, v0.0   Mon 24 Mar 2014 13:16:48 CET sutt $
+ **
+ **/
+
+
+#include <iostream>
+
+#include "appl_grid/integral.h"
+
+
+namespace appl { 
+
+
+double integral( const std::vector<double>& d, const grid& g ) {
+
+  if ( d.size()!=unsigned(g.Nobs()) ) { 
+    std::cerr << "integral: number of bins doesn't match" << std::endl;
+    return 0;
+  }
+
+  double sum = 0;
+  for ( unsigned i=d.size() ; i-- ; ) {
+    double delta = g.deltaobs(i);
+    sum += d[i]*delta;
+  }
+  
+  return sum;
+} 
+
+double integral( const appl::TH1D* h ) {  
+  double in = 0;
+  for ( size_t i=0 ; i<h->size() ; i++ ) { 
+    double width = h->hi(i)-h->lo(i);
+    in += h->y(i)*width;
+  }
+  return in;
+  //  return h->Integral( 1, h->GetNbinsX(), "width" ); 
+}
+
+}
Index: applgrid/tags/applgrid-01-06-36/src/generic_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/generic_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/generic_pdf.cxx	(revision 2010)
@@ -0,0 +1,443 @@
+/**
+ **     @file    generic_pdf.cxx
+ **
+ **     @brief   a generic pdf type - to read in the combinations
+ **              for the subprocesses from a file, with the file 
+ **              format determined by Tancredi (details will be 
+ **              filled in as the implementation becomes more complete)
+ **
+ **     @author  mark sutton
+ **     @date    Mon 28 Jan 2013 15:40:45 GMT 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: generic_pdf.cxx, v0.0   Mon 28 Jan 2013 15:40:45 GMT sutt $
+ **
+ **/
+
+
+// #include <stdlib.h>
+
+
+#include <iostream>
+#include <fstream>
+
+
+
+#include "appl_grid/generic_pdf.h"
+
+
+/// don't want a fortran implementation
+//  extern "C" void fgeneric_pdf__(const double* fA, const double* fB, double* H) { 
+//    static generic_pdf pdf;
+//    pdf.evaluate(fA, fB, H);
+//  }
+
+
+// generic_pdf::generic_pdf(const std::string& s, const std::vector<int> combinations)  
+generic_pdf::generic_pdf(const std::string& s)
+  : appl_pdf(s), 
+    m_initialised(false), 
+    m_ckmflag(false)
+ {
+
+   /// need to check has the appropriate form, 
+   if ( s!="" ) { 
+     if ( s.find(".dat")==std::string::npos ) throw exception( std::string("generic_pdf() file ") + s + " does not have .dat extension - will not be able to save this grid to file" );
+     else initialise( s );
+   }
+
+   //debug=false;
+   std::cout << " initialize generic pdf " << s << " debug= " << m_debug << std::endl;
+} 
+
+
+
+void generic_pdf::initialise(const std::string& filename) { 
+
+
+  if ( m_initialised ) { 
+    std::cerr << "generic_pdf::initialise() already initialised" << std::endl;
+    return;
+  }
+
+
+  /// first rename me with an appropriate file name
+  /// we chose the structure "filename.dat"
+  /// so the grid can tell it is generic, and can also 
+  /// store the filename 
+  /// NB: at some point we will encode the file contents in 
+  ///     the grid itself  
+  rename(filename);
+
+  /// add tancredi's code here ...
+
+  std::cout << "generic_pdf::initialise() " << name() << std::endl; 
+
+  m_initialised = true;
+
+
+  /// tancredi's code 
+
+  m_debug=false;
+  
+  currentprocess=-1;
+  currentsubprocess=-1;
+  m_nQuark=6; // offset to go from 0,..,14 to  -6,..,0.,..,6 (7 including photon) 
+
+#if 0
+  std::vector<std::string> names;
+  names.push_back("topbar");
+  names.push_back("beautybar");
+  names.push_back("charmbar");
+  names.push_back("strangebar");
+  names.push_back("upbar");
+  names.push_back("downbar");
+  names.push_back("gluon");
+  names.push_back("down");
+  names.push_back("up");
+  names.push_back("strange");
+  names.push_back("charm");
+  names.push_back("beauty");
+  names.push_back("top");
+ 
+  std::string names[14] = { "topbar", "beautybar", "charmbar", "strangebar", "upbar", "downbar", 
+			    "gluon", 
+			    "down", "up", "strange" "charm", "beauty", "top", "photon" };
+#endif
+
+  std::string _names[14] = { "tbar", "bbar", "cbar", "sbar", "ubar", "dbar", 
+		             "g", "d", "u", "s", "c", "b", "t", "pht" };
+
+ 
+  std::string* names = (&_names[0])+6; /// so we can use -6..6 indexing
+
+  for (int i=-m_nQuark; i<=m_nQuark+1; i++) {
+    flavname[i] = names[i];
+    iflavour[names[i]]=i;    
+    if (m_debug)
+      std::cout<<"generic_pdf "<<" iflavour[" <<names[i] << "]= " << iflavour[names[i]] << std::endl;
+  }
+  
+  //pdfsumtypes1.clear();
+  //pdfsumtypes2.clear();
+  
+  Flav1.clear();
+  Flav2.clear();
+  
+  ReadSubprocessSteering(filename);
+  
+  PrintSubprocess();
+  
+  flavourtype.clear();
+  int ifltype = -99;  
+  for(int ifl = -m_nQuark; ifl <= m_nQuark+1; ifl++) {
+    if ((ifl== 2)||(ifl== 4)||(ifl== 6))  ifltype =  2; 
+    if ((ifl==-2)||(ifl==-4)||(ifl==-6))  ifltype = -2; 
+    if ((ifl== 1)||(ifl== 3)||(ifl== 5))  ifltype =  1;
+    if ((ifl==-1)||(ifl==-3)||(ifl==-5))  ifltype = -1;
+    
+    if (ifl==0 || ifl==7 ) ifltype= 0;
+    flavourtype[ifl]=ifltype;
+  }
+
+  //this->MakeCkm();
+  //if (debug) PrintFlavourMap();
+  
+  const int nsub = GetSubProcessNumber();
+  
+  if (m_debug) 
+    std::cout << "generic_pdf::initialize nsub = " << nsub << std::endl;
+
+} 
+
+
+void  generic_pdf::evaluate(const double* fA, const double* fB, double* H) const {  
+  //  fill this in with tancredi's code ...
+  if ( !m_initialised ) {
+    std::cout << "  generic_pdf::evaluate not initialised " << std::endl;
+    return; 
+  }
+  
+  /// off set the input std::vectors so we can address them from -n .. n with 0 = gluon  
+  //  double* fA = _fA+m_nQuark; 
+  //  double* fB = _fB+m_nQuark; 
+  
+  fA += 6;
+  fB += 6;
+
+  if (m_debug) std::cout << "generic_pdf:evaluate " << std::endl;
+  
+  //this->PrintFlavourMap();
+  
+  /*
+    if (debug) {
+    for(int i = -m_nQuark; i <= m_nQuark; i++) {
+    std::cout << " fA[" << i << "]= " << fA[i+m_nQuark]
+    << " fB[" << i << "]= " << fB[i+m_nQuark] 
+    << std::endl;
+    }
+    }  
+  */
+
+  std::vector<double> pdfA(14,0); //hadron A
+  std::vector<double> pdfB(14,0); //hadron B
+
+
+  // reset pdf sums per flavour -2,-1,0,1,2 downbar, upbar, gluon, up, down
+  // these are the pdf weights x by the ckm matrix 
+  //  pdfA.clear();
+  //  pdfB.clear();
+  
+  for ( int i=-2; i<=2 ; i++ ) { 
+    pdfA[i]=0.; 
+    pdfB[i]=0.;
+  }
+  
+  pdfA[0]=fA[0]; // gluon
+  pdfB[0]=fB[0];
+  
+  /*
+    std::vector<double> myckmsum = std::vector<double>(myckm2.size(),0);
+    for ( unsigned i=0 ; i<myckm2.size() ; i++ ) { 
+    for ( unsigned j=0 ; j<myckm2[i].size() ; j++ ) myckmsum[i] += myckm2[i][j]; 
+    }
+  */
+  
+  /// offset so can be adressed from -n .. n as with the quark ids
+  const double* _ckmsum = (&m_ckmsum[0])+m_nQuark;
+  
+  if ( m_ckmflag ) {
+    /// do we need the ckm matrix ??
+    for(int i=-6; i <=7; i++) {
+      int j=flavourtype.find(i)->second;
+      if (j==0) continue;
+      pdfA[j] += fA[i]*_ckmsum[j];
+      pdfB[j] += fB[i]*_ckmsum[j];
+      if (m_debug)
+	std::cout << " i= " << i 
+		  << " j= " << j
+		  << " fA[" << i << "]= " << fA[i]
+		  << " fB[" << i << "]= " << fB[i]
+		  << " pdfA[" << j << "]= " << pdfA[j] << " pdfB[" << j << "]= " << pdfB[j]
+		  << " m_ckmsum[" << j << "]= " << _ckmsum[j]
+		  << std::endl;
+    }
+  }
+  else { 
+    for(int i=-6; i <=7; i++) {
+      int j=flavourtype.find(i)->second;
+      if (j==0) continue;
+      pdfA[j] += fA[i];
+      pdfB[j] += fB[i];
+      if (m_debug)
+	std::cout<<" i= "<<i<<" j= "<<j
+		 <<" fA["<<i<<"]= "<<fA[i]
+		 <<" fB["<<i<<"]= "<<fB[i]
+		 <<" pdfA["<<j<<"]= "<<pdfA[j]<<" pdfB["<<j<<"]= "<<pdfB[j]
+		 <<std::endl;
+    }
+  }
+  
+  
+  for ( unsigned iproc=0 ; iproc<procname.size() ; iproc++ ) {
+    int ifl1=Flav1.find(iproc)->second;  
+    int ifl2=Flav2.find(iproc)->second;
+    
+    H[iproc]=pdfA[ifl1]*pdfB[ifl2];
+    if (ifl1==ifl2)   H[iproc]*=2.; // symetric contributions are counted twice
+    
+    if (m_debug) std::cout<<iproc<<" ifl1= "<<ifl1<<" ifl2= "<<ifl2
+			  <<" pdfA["<<ifl1<<"]= "<<pdfA[ifl1]
+			  <<" pdfB["<<ifl2<<"]= "<<pdfB[ifl2]
+			  <<" H= "<<H[iproc]
+			  <<" name= "<<procname[iproc]
+			  <<std::endl;
+  }
+  
+  
+  return;
+  
+}; 
+
+
+
+
+
+
+void generic_pdf::ReadSubprocessSteering(const std::string& fname){
+  
+  // 
+  // read steering deciding on subprocesses
+  //
+  
+  if (m_debug)
+    std::cout << "generic_pdf::ReadSubprocessSteering: read subprocess configuration file: " << fname << std::endl; 
+  
+  /// use the search path to find config files
+  std::ifstream& infile = openpdf( fname );
+
+  //  if ( !infile ) { // Check open
+  //    //    std::cerr << "Can't open " << fname << std::endl;
+  //    infile.close(); /// why close it if it can't open?
+  //    throw exception( std::cerr << "generic_pdf() cannot open file " << fname << std::endl );  
+  //  } else {
+  //    std::cout <<" generic_pdf:ReadData: read data file: " << fname << std::endl;
+  //  }
+  
+  
+  char line[256];
+  int iproc=0;
+
+  while (infile.good()) {
+
+    //if (m_debug) std::cout << " good: " << infile.good() << " eof: " << infile.eof() << std::endl;
+
+    if (!infile.good()) break;
+
+    infile.getline(line,sizeof(line),'\n');
+    std::string mytest = std::string(line);
+    //std::cout<< "sizof test= "<< mytest.size() << std::endl;
+    
+    //if (m_debug) std::cout<< "line= "<< line << "\n";
+
+    if ( !m_ckmflag ) { 
+      if ( mytest=="Wplus" )  {
+   	std::cout << "generic_pdf::ReadSubprocessSteering() setting W+ cmk matrix" << std::endl;
+	make_ckm( true );
+	m_ckmflag = true;
+	continue;
+      }
+      else if ( mytest=="Wminus" ) { 
+   	std::cout << "generic_pdf::ReadSubprocessSteering() setting W- cmk matrix" << std::endl;
+	make_ckm( false );
+	m_ckmflag = true;
+	continue;
+      }
+    }
+    
+    if(line[0] != '%'  && mytest.size()!=0) {
+      char flav1[100];char flav2[100];
+      sscanf(line,"%s %s",flav1,flav2);
+      std::cout << " ReadSubProcesses: flav1 = " << flav1 << " flav2 = " << flav2 << std::endl;
+      
+      int ifl1=iflavour[flav1];
+      int ifl2=iflavour[flav2];
+
+      if (m_debug)
+	std::cout<<"ReadSubProcesses: ifl1 = " << ifl1 << " ifl2 = " << ifl2 << std::endl;
+  
+      Flav1[iproc]=ifl1;
+      Flav2[iproc]=ifl2;
+      iproc++;
+      
+      std::string myprocname = std::string(flav1) + "-" + std::string(flav2);
+      if (m_debug) std::cout << iproc << " process name = " << myprocname << std::endl;;
+      procname.push_back(myprocname);
+
+    }
+
+  }
+
+  m_Nproc = procname.size();
+
+}
+
+
+
+int generic_pdf::decideSubProcess(const int iflav1, const int iflav2) const
+{
+  // 
+  // iflav1 change from 0 to 21 (convention for gluons in sherpa)
+  // assume that ckm comes with weights
+
+  int    iProcess = -1;
+ 
+  if (m_debug) std::cout << "generic_pdf::decideSubProces: " << std::endl; 
+  if (m_debug) std::cout << " iflav1 = " << iflav1 << " iflav2 = " << iflav2 << std::endl;
+  
+  std::map<int,int>::const_iterator flav1 = flavourtype.find(iflav1);
+  std::map<int,int>::const_iterator flav2 = flavourtype.find(iflav2);
+
+  if ( flav1==flavourtype.end() || flav2==flavourtype.end() ) return iProcess;
+
+  int ifl1=flav1->second;
+  int ifl2=flav2->second;
+  
+  for ( unsigned i=0 ; i<procname.size() ; i++ ) {
+    if (iProcess!=-1) continue;
+    if (m_debug) std::cout << " " << i << " name= " << procname[i]
+			 << " Flav1, Flav2 = " << (Flav1.find(i)->second) << " " << (Flav1.find(i)->second)
+			 << std::endl; 
+
+    if ( (Flav1.find(i)->second)==ifl1 && (Flav2.find(i)->second)==ifl2 ) {
+      iProcess=i;
+      // std::cout << " iProcess found " << iProcess << std::endl;
+    }
+  }
+  
+  if (m_debug) std::cout << "generic_pdf:decideSubprocess iProcess found " << iProcess << std::endl;
+  
+  if ( iProcess==-1 ) { 
+    std::cout << "generic_pdf:decideSubprocess " << iflav1 << " <> " << iflav2 << std::endl; 
+  }
+  // else
+  //    std::cout << " ***decideSubProcess " << iflav1 << " <> " << iflav2 << " iProcess = " << iProcess << std::endl; 
+  //  currentsubprocess=iProcess;
+
+  return iProcess;
+}
+
+
+
+// get ckm related information
+
+void generic_pdf::Print_ckm() {  
+  //
+  // print ckm matrix for W+ and W-
+  // 
+  
+  std::cout << "generic_pdf::Print_ckm = " << std::endl;
+  std::cout <<" ckm size= "<<m_ckm2.size() << std::endl;
+  if (m_ckm2.size()<=0) return;
+
+  //
+  for ( int i=0 ; i<14 ; i++ ) {
+    for ( int j=0 ; j<14 ; j++ ) {
+     if (m_ckm2[i][j]!=0)
+      std::cout << " ckm[" << i << "][" << j << "]\t =\t " << m_ckm2[i][j] << std::endl;
+    }
+  }
+  
+  return;
+}
+
+
+void generic_pdf::PrintSubprocess() { 
+  
+  std::cout << "generic_pdf::PrintSubprocess:" << std::endl;
+
+  // std::cout<<" size of Flav1: "<<  Flav1.size()<<endl;
+  // std::map<int,int>::iterator imap;
+  // for (imap = Flav1.begin(); imap!=Flav1.end(); ++imap){
+  //  std::cout<<" "<<imap->first
+  //           <<" flav1, flav2= "<<imap->second<<" "<<imap->second<<endl;
+  // }
+
+  std::cout << "\t Number of subprocesses: " << procname.size() << std::endl;
+  
+  for (unsigned i=0; i< procname.size(); i++) {
+    std::cout << "\t" << i << " Flav1, Flav2 = " << Flav1[i] << " " << Flav2[i]
+	      << "  " << procname[i]
+	      << std::endl;
+  };
+
+  return;
+
+}
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/Makefile.am
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/Makefile.am	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/Makefile.am	(revision 2010)
@@ -0,0 +1,146 @@
+##########################################################################
+#
+#   File:         Makefile.am
+#
+#                 Copyright (C) M.Sutton (sutt@cern.ch) 
+#
+#   Description:  generate the Makefile for the appl_grid project
+#                 
+#
+#   Created:    M.Sutton (sutt@hep.ucl.ac.uk) Fri  5 Nov 2010 00:05:29 GMT
+#   
+#   $Id: makefile 20 2008-05-05 01:55:42Z sutt $                
+#                   
+#########################################################################
+
+
+if USE_ROOT
+  ROOTCFLAGS = $(shell root-config --cflags)
+  ROOTLIBS   = $(shell root-config --nonew --libs)
+  ROOTGLIBS  = $(shell root-config --nonew --glibs)
+  ROOTARCH   = $(shell root-config --ldflags)
+  ROOTCINT=rootcint
+else
+  ROOTCFLAGS = 
+  ROOTLIBS   = 
+  ROOTGLIBS  = 
+  ROOTARCH   = 
+  ROOTCINT   =
+endif
+
+
+# hoppet flags
+if USE_HOPPET 
+  HOPPETDIR    = $(shell hoppet-config --prefix)
+  HOPPETCFLAGS = $(shell hoppet-config --cxxflags)
+  HOPPETLIBS   = $(shell hoppet-config --libs) 
+else
+  HOPPETDIR    = 
+  HOPPETCFLAGS = 
+  HOPPETLIBS   = 
+endif
+
+
+# fotran RTL
+
+FRTLLIB =
+
+if USE_FRTLSO
+    FRTLIB   = $(shell gfortran -print-file-name=libgfortran.so)
+    FRTLLIB  = -L$(subst /libgfortran.so, ,$(FRTLIB) ) -lgfortran 
+else
+    FRTLIB   = $(shell gfortran -print-file-name=libgfortran.a)
+    FRTLLIB  = -L$(subst /libgfortran.a, ,$(FRTLIB) ) -lgfortran 
+endif
+
+
+# LHAPDF flags 
+
+if USE_LHAPDF
+  # in preparation for including the lhapdf version 6 interface
+  LHAPDFCFLAGS = $(shell lhapdf-config --cxxflags)
+else
+  LHAPDFCFLAGS = 
+endif
+
+AM_CXXFLAGS = $(ROOTARCH) -O3 -DDATADIR=\"@datadir@/$(PACKAGE)\" -Wall -Wextra -fPIC  -I. -I.. -I$(srcdir) $(LHAPDFCXXFLAGS) $(ROOTCFLAGS)  $(HOPPETCFLAGS) -fPIC
+
+AM_LDFLAGS = $(ROOTARCH) -O3  $(ROOTLIBS) $(FCLIBS) $(HOPPETLIBS) $(FRTLLIB)  
+
+
+bin_PROGRAMS = applgrid-combine applgrid-convert
+
+applgrid_combine_SOURCES = combine.cxx
+applgrid_combine_LDADD   = libAPPLgrid.la
+applgrid_combine_CXXFLAGS =  $(AM_CXXFLAGS) 
+applgrid_combine_LDFLAGS = $(ROOTARCH) -L. -lAPPLgrid $(ROOTLIBS) $(HOPPETLIBS) $(FRTLLIB)
+
+applgrid_convert_SOURCES = convert.cxx
+applgrid_convert_LDADD   = libAPPLgrid.la
+applgrid_convert_CXXFLAGS =  $(AM_CXXFLAGS) 
+applgrid_convert_LDFLAGS = $(ROOTARCH) -L. -lAPPLgrid $(ROOTLIBS) $(HOPPETLIBS) $(FRTLLIB)
+
+
+lib_LTLIBRARIES= libAPPLgrid.la libfAPPLgrid.la 
+libAPPLgrid_la_SOURCES = \
+	appl_grid.cxx		appl_igrid.cxx       fastnlo.cxx \
+	appl_timer.cxx          appl_pdf.cxx         \
+	appl_file.cxx                                \
+	file_index.cxx                               \
+	histogram.cxx                                \
+	nlojet_pdf.cxx		nlojetpp_pdf.cxx     \
+	mcfmw_pdf.cxx		mcfmwjet_pdf.cxx     \
+	mcfmwc_pdf.cxx                               \
+	mcfmz_pdf.cxx mcfmzjet_pdf.cxx               \
+	mcfmQQ_pdf.cxx                               \
+	jetrad_pdf.cxx		dis_pdf.cxx          \
+	generic_pdf.cxx         basic_pdf.cxx        \
+	combination.cxx         lumi_pdf.cxx         \
+	SparseMatrix3d.cxx	hoppet_init.cxx       \
+	TFileString.cxx        \
+	TFileVector.cxx	\
+	integral.cxx \
+	*.h
+
+if USE_ROOT
+  libAPPLgrid_la_SOURCES += TFileStringDict.cxx TFileVectorDict.cxx 
+endif
+
+libfAPPLgrid_la_SOURCES = fappl_grid.cxx fappl_fitter.cxx
+
+library_includedir=$(includedir)/appl_grid
+
+library_include_HEADERS=../appl_grid/*.h
+
+
+# GSLLIBS=$(shell gsl-config --libs)
+# WDIR = $(shell pwd)
+
+
+# if WITHCLING
+### ROOT-6: ###
+
+# rootmapdir = $(libdir)
+# rootmap_DATA = .libs/*.pcm
+
+# endif
+
+
+AM_FCFLAGS = -c
+AM_FCLIBS  = -lg2c
+AM_SOFLAGS = -shared
+CINT       = rootcint
+
+clean-local:
+	rm -rf *.o *.lo *Dict*
+.cxx.o : 
+	$(CXX) $(AM_CXXFLAGS) -c $<
+.c.o : 
+	$(CC) $(AM_CFLAGS) -c $<
+
+if USE_ROOT
+%Dict.cxx : %.h %.cxx
+	$(CINT) -f $@ -c $< -I.. 
+endif
+
+#../appl_grid/$*LinkDef.h
Index: applgrid/tags/applgrid-01-06-36/src/nlojetpp_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/nlojetpp_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/nlojetpp_pdf.h	(revision 2010)
@@ -0,0 +1,71 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    nlojetpp_pdf.h
+ **
+ **     @brief   pdf transform functions for nlojet ppbar production                  
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: nlojetpp_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+#ifndef __NLOJETPP_PDF_H
+#define __NLOJETPP_PDF_H
+
+#include "appl_grid/appl_pdf.h" 
+
+
+
+// nlojet pdf combination class 
+
+class nlojetpp_pdf : public appl::appl_pdf {
+
+public:
+
+  nlojetpp_pdf() : appl_pdf("nlojetpp") { m_Nproc=7; } 
+
+  void evaluate(const double* fA, const double* fB, double* H) const;
+
+};  
+
+
+inline void  nlojetpp_pdf::evaluate(const double* fA, const double* fB, double* H) const {  
+  
+  fA += 6;  // offset internal pointers so can use fA[-6]..fA[6] for simplicity
+  fB += 6; 
+
+  double Q1=0, Q1bar=0, Q2=0, Q2bar=0, D=0, Dbar=0, G1=fA[0], G2=fB[0]; 
+
+  for ( int i=1  ; i<=6  ; i++ ) {  Q1    += fA[i];   Q2    += fB[i]; }
+  for ( int i=-1 ; i>=-6 ; i-- ) {  Q1bar += fA[i];   Q2bar += fB[i]; }  
+
+  // don't include the gluon (fA[0], fB[0])
+  for ( int i=-6 ; i<=6 ; i++ ) if ( i ) D += fA[i]*fB[i];
+
+  for ( int i=1 ; i<=6 ; i++ ) { 
+    Dbar += fA[i]  * fB[-i];  
+    Dbar += fA[-i] * fB[i]; 
+  }
+  
+  H[0] =  G1*G2 ;
+  H[1] =  (Q1+Q1bar)*G2 ;
+  H[2] =  G1*(Q2+Q2bar) ;
+
+  H[6] =  Q1*Q2+Q1bar*Q2bar-D ;
+  H[5] =  D ; 
+  H[4] =  Dbar ;
+  H[3] =  Q1*Q2bar+Q1bar*Q2-Dbar ;
+}
+
+
+extern "C" void fnlojet_pdf__(const double* fA, const double* fB, double* H);
+
+
+#endif  // __NLOJETPP_PDF_H
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/SparseMatrix3d.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/SparseMatrix3d.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/SparseMatrix3d.cxx	(revision 2010)
@@ -0,0 +1,231 @@
+/**
+ **     @file    SparseMatrix3d.cxx
+ **
+ **     @author  mark sutton
+ **     @date    Fri Nov 23 00:49:26 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: SparseMatrix3d.cxx, v1.0   Fri Nov 23 00:49:26 GMT 2007 sutt $
+ **
+ **/
+
+
+#include "SparseMatrix3d.h"
+
+#include "appl_grid/stream_grid.h"
+
+
+SparseMatrix3d::SparseMatrix3d( int Nx, double lx, double ux, 
+				int Ny, double ly, double uy, 
+				int Nz, double lz, double uz) :
+  sparse3d(Nx, Ny, Nz), 
+  m_xaxis(Nx, lx, ux),
+  m_yaxis(Ny, ly, uy),
+  m_zaxis(Nz, lz, uz),
+  m_fastindex(NULL) { 
+  setup_fast();
+} 
+
+
+SparseMatrix3d::SparseMatrix3d(const SparseMatrix3d& s) : 
+  // sparse3d(*(sparse3d*)(&s)), 
+  sparse3d(s), 
+  m_xaxis(s.m_xaxis),
+  m_yaxis(s.m_yaxis),
+  m_zaxis(s.m_zaxis),
+  m_fastindex(NULL) { 
+  setup_fast();
+} 
+
+
+#ifdef USEROOT 
+SparseMatrix3d::SparseMatrix3d(const TH3D* h) : 
+  sparse3d( h->GetNbinsX(), h->GetNbinsY(), h->GetNbinsZ() ),
+  m_fastindex(NULL) {
+  
+  TH1D* hx = (TH1D*)h->Project3D("x");
+  TH1D* hy = (TH1D*)h->Project3D("y");
+  TH1D* hz = (TH1D*)h->Project3D("z");
+  
+  m_xaxis = axis<double>(hx->GetNbinsX(), hx->GetBinCenter(1),  hx->GetBinCenter(hx->GetNbinsX() ) ); 
+  m_yaxis = axis<double>(hy->GetNbinsX(), hy->GetBinCenter(1),  hy->GetBinCenter(hy->GetNbinsX() ) ); 
+  m_zaxis = axis<double>(hz->GetNbinsX(), hz->GetBinCenter(1),  hz->GetBinCenter(hz->GetNbinsX() ) ); 
+  
+  delete hx;
+  delete hy;
+  delete hz;
+  
+  for ( int i=0 ; i<m_xaxis.N() ; i++ ) { 
+    for ( int j=0 ; j<m_yaxis.N() ; j++ ) { 
+      for ( int k=0 ; k<m_zaxis.N() ; k++ ) (*this)(i,j,k) = h->GetBinContent(i+1,j+1,k+1);
+    }  
+  }
+  setup_fast();
+}
+#endif
+
+
+
+
+ 
+SparseMatrix3d::SparseMatrix3d(const stream_grid& h) :
+  sparse3d( h.xaxis().size(), h.yaxis().size(), h.zaxis().size() ), 
+  m_fastindex(NULL) {
+  
+  m_xaxis = axis<double>( h.xaxis().size(), h.xaxis(0),  h.xaxis(h.xaxis().size()-1) );
+  m_yaxis = axis<double>( h.yaxis().size(), h.yaxis(0),  h.yaxis(h.yaxis().size()-1) );
+  m_zaxis = axis<double>( h.zaxis().size(), h.zaxis(0),  h.zaxis(h.zaxis().size()-1) );
+  
+  for ( int i=0 ; i<m_xaxis.N() ; i++ ) { 
+    for ( int j=0 ; j<m_yaxis.N() ; j++ ) { 
+      for ( int k=0 ; k<m_zaxis.N() ; k++ ) (*this)(i,j,k) = h(i,j,k);
+    }  
+  }
+
+  setup_fast();
+}
+
+
+
+
+
+SparseMatrix3d::~SparseMatrix3d() { empty_fast(); } 
+  
+
+/// check if the actual contents are the same
+bool SparseMatrix3d::operator==(const SparseMatrix3d& s) const { 
+
+  /// first if the axes are different, cannot be the same
+  if ( !compare_axes( s ) ) return false;
+
+  int nvalue = 0;
+
+  /// now do a deep comparison ...
+  for ( int i=0 ; i<m_xaxis.N() ; i++ ) { 
+    for ( int j=0 ; j<m_yaxis.N() ; j++ ) { 
+      for ( int k=0 ; k<m_zaxis.N() ; k++ ) { 
+
+	//	std::cout << i << " " << j << " " << k
+	//		  << " : " << (*this)(i,j,k) 
+	//		  << " " << s(i,j,k)  << "  : " << ((*this)(i,j,k)-s(i,j,k) ) << std::endl;
+
+	if ( (*this)(i,j,k) != s(i,j,k) ) return false;
+	if ( (*this)(i,j,k)!=0 ) nvalue++;
+      }  
+    }  
+  }
+
+  return true;
+}
+
+
+
+#ifdef USEROOT
+// utilities for file access and storage
+
+TH3D* SparseMatrix3d::getTH3D(const std::string& s) const { 
+  
+  double delx = xaxis().delta();
+  double dely = yaxis().delta();
+  double delz = zaxis().delta();
+  
+  TH3D* h=new TH3D(s.c_str(), s.c_str(), 
+		   xaxis().N(),  xaxis().min()-0.5*delx, xaxis().max()+0.5*delx, 
+		   yaxis().N(),  yaxis().min()-0.5*dely, yaxis().max()+0.5*dely, 
+		   zaxis().N(),  zaxis().min()-0.5*delz, zaxis().max()+0.5*delz);
+  
+  const SparseMatrix3d& sm = (*this);
+  
+  int N=0;
+  
+  // some printout ???
+  //  print();
+
+  for ( int i=lo() ; i<=hi() ; i++ ) { 
+    const sparse2d* s2d = sm[i]; 
+    if ( s2d==NULL ) continue;
+    for ( int j=s2d->lo() ; j<=s2d->hi() ; j++ ) { 
+      const sparse1d* s1d = (*s2d)[j];
+      if ( s1d==NULL ) continue;
+      for ( int k=s1d->lo() ; k<=s1d->hi() ; k++ ) {
+	N++;
+	// std::cout << "\tsm(" << i << "\t, " << j << "\t, " << k << ")=" << (*s1d)(k) << std::endl; 
+	// h->SetBinContent(i+1, j+1, k+1, sm(i,j,k) );
+	h->SetBinContent(i+1, j+1, k+1, (*s1d)(k) );
+      }
+    }
+  }
+  
+  //  std::cout << "SparseMatrix3d::getTH3D() s=" << s 
+  //       << "\tN=" << N << "\tfilled bins" << std::endl;
+  
+  return h;
+}
+#endif
+
+
+
+// utilities for file access and storage
+
+stream_grid* SparseMatrix3d::get(const std::string& s) const { 
+
+  stream_grid* weightgp = new stream_grid( s, xaxis().v(), yaxis().v(), zaxis().v() );
+  
+  stream_grid& weightg = *weightgp;
+    
+  const SparseMatrix3d& sm = (*this);
+  
+  int N=0;
+  
+  // some printout ???
+  //  print();
+
+  for ( int i=lo() ; i<=hi() ; i++ ) { 
+    const sparse2d* s2d = sm[i]; 
+    if ( s2d==NULL ) continue;
+    for ( int j=s2d->lo() ; j<=s2d->hi() ; j++ ) { 
+      const sparse1d* s1d = (*s2d)[j];
+      if ( s1d==NULL ) continue;
+      for ( int k=s1d->lo() ; k<=s1d->hi() ; k++ ) {
+	N++;
+	// std::cout << "\tsm(" << i << "\t, " << j << "\t, " << k << ")=" << (*s1d)(k) << std::endl; 
+	// h->SetBinContent(i+1, j+1, k+1, sm(i,j,k) );
+	weightg(i, j, k) =  (*s1d)(k);
+      }
+    }
+  }
+  
+  //  std::cout << "SparseMatrix3d::getTH3D() s=" << s 
+  //       << "\tN=" << N << "\tfilled bins" << std::endl;
+  
+  return weightgp;
+}
+
+
+
+
+void SparseMatrix3d::clear() { 
+  untrim();
+  for ( int i=0 ; i<m_xaxis.N() ; i++ ) { 
+    for ( int j=0 ; j<m_yaxis.N() ; j++ ) { 
+      for ( int k=0 ; k<m_zaxis.N() ; k++ ) { 
+	if ( (*this)(i,j,k)!=0 ) (*this)(i,j,k)=0;
+      }
+    }
+  }
+}
+
+
+
+
+
+std::ostream& operator<<(std::ostream& s, const SparseMatrix3d& sm) { 
+  const tsparse3d<double>* sp = &sm;
+  s << "x:" << sm.xaxis() << "\ny:" << sm.yaxis() << "\nz:" << sm.zaxis()
+    << "\n" << *sp;
+  return s;
+}
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/basic_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/basic_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/basic_pdf.cxx	(revision 2010)
@@ -0,0 +1,54 @@
+/**
+ **     @file    basic_pdf.cxx
+ **
+ **     @brief   pdf transform function for basic 121 ( 11 x 11 ) pdf combinations                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: basic_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/basic_pdf.h"
+
+
+// fortran callable wrapper
+extern "C" void fbasic_pdf__(const double* fA, const double* fB, double* H) { 
+  static basic_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+
+
+void basic_pdf::evaluate(const double* _fA, const double* _fB, double* H) const {  
+
+  // remapping from pdg -6..6 convention to u..t ubar..tbar g internal
+  // basic convention
+
+  const double* f1 = _fA+6;
+  const double* f2 = _fB+6;
+
+  ///  double H[121]; /// do not include top: 11x11 rather than 13x13
+  int ih=0;
+  /// from -b to b rather than -t to t
+  for ( int i=-5 ; i<=5 ; i++ )  { 
+    for ( int j=-5 ; j<=5 ; j++ )  { 
+      /// f1 partons first !!! 
+      H[ih++] = f1[i]*f2[j];
+    }
+  }
+}
+  
+
+int  basic_pdf::decideSubProcess(const int iflav1, const int iflav2) const { 
+  if ( std::fabs(iflav1)>5 || std::fabs(iflav2)>5 ) return -1;
+  return (iflav1+5)*11+(iflav2+5);
+}
+
+
Index: applgrid/tags/applgrid-01-06-36/src/combine.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/combine.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/combine.cxx	(revision 2010)
@@ -0,0 +1,928 @@
+/**
+ **     @file    combine.cxx
+ **
+ **     @brief   increasingly complex code to add together many 
+ **              grids 
+ **
+ **     @author  mark sutton
+ **     @date    Sat 13 Jul 2013 09:54:51 CEST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: combine.cxx, v0.0   Sat 13 Jul 2013 09:54:51 CEST sutt $
+ **
+ **/
+
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+#include <algorithm>
+#include <vector>
+#include <iterator>
+#include <string>
+#include <cstdlib>
+#include <sys/stat.h>
+
+#include <algorithm>
+
+#include <regex>
+#include <map>
+
+#include "appl_grid/simpletimer.h"
+
+#include "appl_grid/appl_root.h"
+
+#include "appl_grid/appl_grid.h"
+#include "appl_grid/appl_file.h"
+#include "amconfig.h"
+
+#include "appl_grid/appl_timer.h"
+
+
+#ifdef USEROOT
+#include "TFile.h"
+#include "TPad.h"
+#endif
+
+
+template<typename T>
+bool bdelete( std::vector<T>& v, const T& t ) {
+  for ( typename std::vector<T>::iterator itr=v.begin() ; itr!=v.end() ; ) { 
+    if ( *itr == t ) v.erase( itr );
+    else itr++;
+  }
+  return true;
+} 
+
+
+double integral( appl::TH1D* h ) { 
+  double d = 0;
+  for ( int i=0 ; i<h->GetNbinsX() ; i++ ) d += h->GetBinContent(i+1);
+  return d;
+}
+
+
+void print( appl::TH1D* h ) { 
+  for ( int i=1 ; i<=h->GetNbinsX() ; i++ ) std::cout << h->GetBinContent(i) << " ";
+  std::cout << std::endl;
+}
+
+
+int usage(std::ostream& s, int argc, char** argv) { 
+  if ( argc<1 ) return -1; /// should never be the case 
+  s << "Usage: " << argv[0] << " [OPTIONS] -o output_grid.root  input_grid.root [input_grid1.root ... input_gridN.root]\n\n";
+  s << "  APPLgrid \'" << argv[0] << "\' adds " << PACKAGE_STRING << " grid files together into a single grid\n\n"; 
+  s << "Configuration: \n";
+  s << "    -o filename   \t name of output grid (filename required)\n\n";
+  s << "Options: \n";
+  s << "    -g, --gscale   value\t rescale output grid by value, \n";
+  s << "    -r, --rscale   value\t rescale reference histogram by value, \n";
+  s << "    -s, --scale    value\t rescale both output grid and reference histogram by value\n";
+  s << "    -w, --wscale   value\t rescale the weight normalisation for the output grid\n";
+  s << "        --weight   value\t set the value of the weight normalisation for the output grid directly\n";
+  s << "    -n, --normfile value\t file containing bin-by-bin normalisations for adding grids\n";
+  s << "    -p, --pdf      value\t set the default pdf to value\n";
+  s << "    -i, --iset     value\t set the default set from the pdf group  to value\n";
+  s << "    -a, --all           \t add all grids (default)\n";
+  s << "        --optimise      \t optimise the output grid\n";
+  s << "        --compress value\t try to reduce the number of parton luminosity\n"
+    << "                        \t combinations\n";
+  s << "    -c, --chi2     value\t if set, exclude grids with a chi2 with respect\n"
+    << "                        \t to the median larger than value\n";
+  s << "        --verbose       \t display grid documentation during add\n";
+  s << "    -v, --version       \t displays the APPLgrid version\n";
+  s << "    -h, --help          \t display this help\n";
+  s << "\nSee " << PACKAGE_URL << " for more details\n"; 
+  s << "\nReport bugs to <" << PACKAGE_BUGREPORT << ">";
+  s << std::endl;
+  return 0;
+}
+
+
+
+
+
+/// bins 
+
+
+struct bin {
+
+  bin( double y=0, double ye=0, int n=1 ) : _x(0), _y(y), _ye(ye), _y2(_y*_y), _n(n) { 
+    _ys.push_back(_y);
+  } 
+
+  bin( appl::TH1D* h, int i ) : _x(h->GetBinCenter(i)), _y(h->GetBinContent(i)), _ye(h->GetBinError(i)), _y2(_y*_y), _n(1) { 
+    _ys.push_back(_y);
+  } 
+
+  bin( const bin& b) : _x(b._x), _y(b._y), _ye(b._ye), _y2(b._y2), _n(b._n), _ys(b._ys) { } 
+
+  unsigned size() const { return _ys.size(); }
+
+  bin& operator+=( const bin& b) { 
+    _y  += b._y;
+    _y2 += b._y2;
+    _ye  = std::sqrt( _ye*_ye + b._ye*b._ye );
+    _n  += b._n;
+    _ys.push_back(b._y);
+    return *this;
+  }
+
+  bin& operator-=( const bin& b) { 
+    _y  -= b._y;
+    _y2 -= b._y2;
+    _ye  = std::sqrt( _ye*_ye - b._ye*b._ye );
+    _n  -= b._n;
+    bdelete( _ys, b._y );
+    return *this;
+  }
+
+
+
+  bin& operator*=( double d ) { 
+    _y  *= d;
+    _ye *= d;
+    _y2 *= d*d;
+    //    _n  *= d;
+    for ( unsigned i=0 ; i<_ys.size() ; i++ ) _ys[i] *= d;
+    return *this;
+  }
+
+  bin& operator/=( double d ) { return operator*=(1/d); } 
+
+
+  bool operator<( const bin& b)  const { return _y<b._y; }
+  bool operator>( const bin& b)  const { return _y>b._y; }
+  bool operator<=( const bin& b) const { return _y<=b._y; }
+  bool operator>=( const bin& b) const { return _y>=b._y; }
+  bool operator==( const bin& b) const { return _y==b._y; }
+  bool operator!=( const bin& b) const { return _y!=b._y; }
+  
+
+  double _x;
+  double _y;
+  double _ye;
+
+  double _y2;
+
+  int    _n;
+
+  std::vector<double> _ys;
+
+
+  double mean()   const { return _y/_n; }  
+  double rms()    const { return std::sqrt( ( _y2 - _y*_y/_n )/_n ); }  
+  double mean_error()  const { return std::sqrt( ( _y2 - _y*_y/_n )/(_n*_n) ); }  
+
+
+  double median() const {
+    std::vector<double> v = _ys;
+    std::sort( v.begin(), v.end() );
+    if ( (v.size()&1) ) return v[v.size()/2];
+    return 0.5*(v[v.size()/2] + v[(v.size()-1)/2]);
+  }
+
+
+};
+
+
+
+
+bin operator+( const bin& b0, const bin& b1 ) { return bin( b0._y+b1._y, std::sqrt( b0._ye*b0._ye* + b1._ye*b1._ye), b0._n+b1._n ); }
+bin operator*( double d, const bin& b )       { return bin( d*b._y, d*b._ye, d*b._n ); }
+
+
+
+/// cross section 
+
+struct Xsection { // : public std::vector<bin> {
+  
+  Xsection() { }
+
+  Xsection(appl::TH1D* h) { // : _bins(*this) { 
+    add( h ); //for ( int i=0 ; i<h->GetNbinsX() ; i++ ) _bins.push_back( bin( h, i+1 ) );
+  }
+  
+  void add( bin& b ) {  _bins.push_back( b ); }
+
+  
+  void add( appl::TH1D* h ) { 
+    if ( _bins.empty() ) { 
+      for ( int i=0 ; i<h->GetNbinsX() ; i++ ) _bins.push_back( bin( h, i+1 ) );
+    }
+    else { 
+      if ( _bins.size()!=unsigned(h->GetNbinsX()) ) { 
+	std::cerr << "Xsection::add() bin mismatch " << _bins.size() << " " << h->GetNbinsX() << std::endl;
+	return;
+      }
+      for ( int i=0 ; i<h->GetNbinsX() ; i++ ) _bins[i] += bin( h, i+1 );
+    }
+  }
+
+
+  bin  operator[](int i) const { return _bins[i]; }
+  bin& operator[](int i)       { return _bins[i]; }
+
+  unsigned size()  const { return _bins.size(); };
+  bool     empty() const { return _bins.empty(); };
+
+  Xsection& operator+=( const Xsection& x) { 
+    for ( unsigned i=0 ; i<_bins.size() ; i++ ) _bins[i] += x._bins[i];
+    return *this;
+  }
+
+
+  Xsection& operator-=( const Xsection& x) { 
+    for ( unsigned i=0 ; i<_bins.size() ; i++ ) _bins[i] -= x._bins[i];
+    return *this;
+  }
+
+
+  Xsection& operator+=( appl::TH1D* h ) { 
+    add( h );
+    return *this;
+  }
+
+  Xsection& operator*=( double d ) { 
+    for ( unsigned i=0 ; i<_bins.size() ; i++ ) _bins[i] *= d;
+    return *this;
+  }
+
+
+  std::vector<bin>  mean() const {
+    std::vector<bin> _mean;
+    for ( unsigned i=0 ; i<_bins.size() ; i++ ) {
+      _mean.push_back( bin( _bins[i].mean(), _bins[i].mean_error(), _bins[i]._n ) ); 
+    }
+    return _mean;
+  }
+
+
+  std::vector<bin>  median() const {
+    std::vector<bin> _median;
+    for ( unsigned i=0 ; i<_bins.size() ; i++ ) {
+      _median.push_back( bin( _bins[i].median(), _bins[i].mean_error(), _bins[i]._n ) ); 
+    }
+    return _median;
+  }
+
+  /// data members 
+
+  std::vector<bin> _bins;
+
+}; 
+
+
+
+Xsection operator*( const Xsection& x, double d ) { 
+  Xsection xs(x);
+  return (xs *= d);
+}
+
+
+Xsection operator*( double d, const Xsection& x )  { return x*d; }
+
+
+Xsection operator-( const Xsection& x1, const Xsection& x2 ) { 
+  Xsection xs = x1;
+  xs += (-1*x2);
+  return xs;
+}
+
+
+
+
+
+
+double chi2( const Xsection& xs1, const Xsection& xs2 ) { 
+
+  if ( xs1.empty() ) return 0;
+
+  if ( xs1.size()!=xs2.size() ) return 0;
+  
+  double c2 = 0;
+
+  for ( unsigned i=0 ; i<xs1.size() ; i++ ) {
+ 
+    double d = xs1[i].mean() - xs2[i]._y;
+    
+    /// take fractional sigma and turn it to actual sigma
+    double s2 = ( xs1[i].mean_error()*xs1[i].mean_error() + xs2[i]._ye*xs2[i]._ye );
+
+    /// 5*"sigma on the mean" will be treated as our
+    /// ad hoc figure of merrit
+    if ( s2>0 ) c2 += d*d/s2;
+    
+  }
+
+  /// return chi2 per dof
+  return c2/xs1.size();
+
+}
+
+
+
+
+double chi2_diff( Xsection& v1,  Xsection& v2, double chi2_limit=5 ) { 
+
+  if ( v1.size()!=v2.size() ) return 0;
+  
+  double chi2 = 0;
+  
+  bool neg = false;
+  bool pos = false;
+
+  double max_chi2 = 0;
+
+  for ( size_t i=0 ; i<v1.size() ; i++ ) { 
+    
+    //    double d  = v1[i]._y - v2[i]._y;
+    //    double s2 = v1[i]._ye*v1[i]._ye + v2[i]._ye*v2[i]._ye;
+
+    double d = v1[i].mean() - v2[i]._y;
+    /// take fractional sigma and turn it to actual sigma
+    double s2 = ( v1[i].mean_error()*v1[i].mean_error() + v2[i]._ye*v2[i]._ye );
+
+       
+    double c2 = 0;
+
+    if ( s2!=0 ) c2 = d*d/s2;
+   
+    chi2 +=  c2;
+    
+    //    std::cout << "diff " << i << " " << d << " " << c2 << std::endl;
+
+    /// ensure inly large excursions in adjacent bins 
+    /// are counted
+    if ( c2 > chi2_limit ) { 
+      if ( d < 0 ) { 
+	neg = true;
+	if ( pos && c2>max_chi2 ) max_chi2 = c2;  
+      }
+      else if ( d > 0 ) { 
+	pos = true; 
+	if ( neg && c2>max_chi2 ) max_chi2 = c2;  
+      }
+    }
+    else { 
+      neg = false;
+      pos = false;
+    }
+
+  }
+  
+  return max_chi2;
+}
+
+
+
+
+
+
+
+
+
+/// streamers 
+
+std::ostream& operator<<( std::ostream& s, const bin& b ) { 
+  return s << "( " << b._x << " :\t" << b._y << " +- " << b._ye << " " << ( b._y!=0 ? 100*b._ye/b._y : 0 ) << "%   n: " << b._n << " )";
+}
+
+#if 0
+template<class T>
+std::ostream& operator<<( std::ostream& s, const std::vector<T>& v ) { 
+  for ( unsigned i=0 ; i<v.size() ; i++ ) s << " " << v[i] << "\n";
+  return s;
+}
+#endif
+
+std::ostream& operator<<( std::ostream& s, const Xsection& x ) {  return s << x._bins; }
+
+
+
+
+
+double exclude( Xsection xsec, appl::TH1D* ref, double chi2_limit=5 ) { 
+
+    Xsection txsec( ref );
+    Xsection diff = xsec;
+      
+    diff -= txsec;
+
+    /// chi2 of this xsec with respect to the mean *excluding* this one
+    double c2 = chi2_diff( diff, txsec, chi2_limit ); 
+
+    if ( c2 < chi2_limit ) return c2;
+
+    return 0;
+}
+
+
+std::vector<std::string> split( const std::string& input) { 
+    std::istringstream buffer(input);
+    std::vector<std::string> ret( (std::istream_iterator<std::string>(buffer)), 
+		          	   std::istream_iterator<std::string>());
+    return ret;
+}
+
+
+double purestod( const std::string& s ) { return std::stod(s); }
+
+class NormVal { 
+
+public:
+
+  typedef std::map< std::string, std::vector<double> > map_type;
+
+public:
+
+  NormVal( const std::string& f ) : mfile(f), mif(0) { 
+    if ( mfile=="" ) return; 
+    
+    /// first test if file actually exists ...
+    if ( !appl::file_exists( mfile)  ) {
+      std::cerr << "whoops, specified grid weight file does not exist" << std::endl;
+      return;
+    }
+    
+    /// then open it ...
+    mif = new std::ifstream(mfile.c_str());
+
+  } 
+  
+  
+
+ 
+ 
+  void read() { 
+
+    if ( mif==0 || mgridnames.size() ) return;
+
+    std::cout << "NormVal::read() " << mfile << std::endl;
+
+    mgridnames.clear();
+
+    std::string line = "";
+
+    int Ngrids = 0;
+
+    while ( std::getline( *mif, line ) ) { 
+
+      if ( line.find("#")==0 ) continue;
+
+      std::vector<std::string> vs = split( line );
+
+      if ( vs.size() ) { 
+	Ngrids++;
+	mgridnames.push_back(vs[0]);
+	std::vector<double> v; v.reserve( vs.size()-1 );
+	for ( size_t i=1 ; i<vs.size() ; i++ ) v.push_back( std::stod(vs[i]) );
+	mmap.insert( map_type::value_type( mgridnames.back(), v ) );
+      }
+    }
+
+  }
+
+    
+  std::ifstream* stream() { return mif; }
+
+  const std::vector<double>& weights( const std::string& s ) {
+    map_type::const_iterator it = mmap.find( s );
+    if ( it==mmap.end() ) throw appl::grid::exception( std::string( "grid::grid() find weights for grid : ")+s ); 
+    return it->second;
+  } 
+
+  const std::vector<std::string>& grids() const { return mgridnames; }
+
+  map_type::const_iterator begin() const { return mmap.begin(); }
+  map_type::const_iterator end()   const { return mmap.end(); }
+
+  map_type gridmap() const { return mmap; }
+
+private:
+  
+  std::string                mfile;
+  std::ifstream*             mif;
+  std::vector<std::string>   mgridnames;
+  
+  //  std::vector< std::vector<double> > mnormval;
+  map_type  mmap;
+
+};
+
+
+std::ostream& operator<<( std::ostream& s, const NormVal& n ) { 
+  NormVal::map_type::const_iterator it = n.begin();
+  while ( it!=n.end() ) { 
+    s << "\t" << it->first << " : " << it->second.size() << "\n";
+    it++;
+  }
+  return s;
+}
+
+///  if requested, calculate the mean, median etc of all the cross sections
+///  from the grids, and exclude any (complete) grids where the xsection in 
+///  any bin is larger than some  chi2 limit for that bin with respect to 
+///  the mean ( or median ? ) without including that grid
+///  - returns the updated ref vector, and fgrids vector with only the 
+///    accepted grids
+///
+///  NB: developing changes to exclude grids due to large, oposite sign 
+///      chi2 variations in *individual* adjacent bins only.
+
+bool thin_grids( double chi2_limit, std::vector<appl::TH1D*>& ref, std::vector<std::string>& fgrids ) { 
+
+  /// need at least 2 grids for an rms
+
+  if ( ref.size()<2 ) return false; 
+ 
+  /// calculate mean and median ...
+
+  Xsection xsec;
+  
+  for ( size_t i=0 ; i<ref.size() ; i++ ) xsec += ref[i];
+
+  //  std::cout << "median " << xsec.median() << std::endl;
+  //  std::cout << "mean   " << xsec.mean()   << std::endl;
+  
+  /// now find grids outside the relevant ranges ...
+
+  std::vector<std::string> fgrids_;
+
+  for ( size_t i=0 ; i<ref.size() ; i++ ) { 
+
+    double c2 = exclude( xsec, ref[i], chi2_limit );
+
+    std::cout << "grid: " << i << " " << c2 << std::endl;
+
+    if ( c2<chi2_limit ) fgrids_.push_back( fgrids[i] );
+
+  }
+  
+  if ( fgrids.size()>fgrids_.size() ) fgrids = fgrids_;
+
+  return true;
+}
+
+
+
+
+
+
+int main(int argc, char** argv) { 
+
+  /// check correct number ofg parameters
+  if ( argc<2 ) return usage( std::cerr, argc, argv );
+
+
+  std::string output_grid = "";
+
+  /// handle the "perform and exit" parameters 
+  for ( int i=1 ; i<argc ; i++ ) { 
+    if ( std::string(argv[i])=="-h" || std::string(argv[i])=="--help" )    return usage( std::cout, argc, argv ); 
+    if ( std::string(argv[i])=="-v" || std::string(argv[i])=="--version" ) {
+      std::cout << argv[0] << " APPLgrid version " << PACKAGE_VERSION << std::endl; 
+      return 0;
+    }
+  }
+
+  /// scaling factors
+  double dscale = 1; /// overall
+  double hscale = 1; /// histogram only
+  double rscale = 1; /// grid only
+
+  /// flags to see whether hscale of rscale have been set
+  bool hset = false;
+  bool rset = false;
+
+
+  bool verbose = false; 
+
+  /// by default add all histograms
+  bool addall = true;
+
+  /// if set, the following will disable adding all histigrams 
+  /// exclude those with a chi2 larger than this value
+  double chi2_limit = 0;
+
+  /// the list of grids to process
+  std::vector<std::string> fgrids;
+
+  double weight = 0;
+  double wscale   = 1;
+
+  bool optimise = false;
+  bool shrink   = false;
+
+  std::string newpdfname = "";
+  std::string normfile   = "";
+
+  std::string pdfname = ""; 
+  int         ipdf    = 0;
+
+  /// handle configuration parameters
+  for ( int i=1 ; i<argc ; i++ ) { 
+    if      ( std::string(argv[i])=="--verbose" ) verbose = true;
+    else if ( std::string(argv[i])=="-o" ) { 
+      ++i;
+      if ( i<argc ) output_grid = argv[i];
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="-s" || std::string(argv[i])=="--scale" ) { 
+      ++i;
+      if ( i<argc ) dscale = std::atof(argv[i]);
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="-g" || std::string(argv[i])=="--gscale" ) { 
+      ++i;
+      if ( i<argc ) { 
+	rscale = std::atof(argv[i]);
+	rset = true;
+      }
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="--optimise" ) optimise = true;
+    else if ( std::string(argv[i])=="--compress" ) { 
+      ++i;
+      if ( i<argc ) { 
+	newpdfname = argv[i];
+	shrink = true;
+      }
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="--weight" ) {  
+      ++i;
+      if ( i<argc ) { 
+	weight = std::atof(argv[i]);
+      }
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="-w" || std::string(argv[i])=="--wscale" ) {  
+      ++i;
+      if ( i<argc ) { 
+	wscale = std::atof(argv[i]);
+      }
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="-r" || std::string(argv[i])=="--rscale" ) { 
+      ++i;
+      if ( i<argc ) { 
+	hscale = std::atof(argv[i]);
+	hset = true;
+      }
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="-c" || std::string(argv[i])=="--chi2" ) { 
+      ++i;
+      if ( i<argc ) { 
+	chi2_limit = std::atof(argv[i]);
+	addall = false;
+      }
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="-a" || std::string(argv[i])=="--all" ) addall = true;
+    else if ( std::string(argv[i])=="-n" || std::string(argv[i])=="--normfile" ) { 
+      ++i;
+      if ( i<argc ) normfile = argv[i];
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="-p" || std::string(argv[i])=="--pdf" ) { 
+      ++i;
+      if ( i<argc ) pdfname = argv[i];
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])=="-i" || std::string(argv[i])=="--ipdf" ) { 
+      ++i;
+      if ( i<argc ) ipdf = std::atoi(argv[i]);
+      else  return usage( std::cerr, argc, argv );
+    }
+    else if ( std::string(argv[i])[0]=='-' ) return usage( std::cerr, argc, argv ); 
+    else { 
+      if ( appl::file_exists(argv[i]) ) fgrids.push_back(argv[i]);
+    }
+  }
+
+  if ( dscale!=1 ) { 
+    if ( !hset ) hscale = dscale;
+    if ( !rset ) rscale = dscale; 
+  }
+
+  if ( output_grid=="" ) return usage(std::cerr, argc, argv);
+
+  /// if reading in normalisation file ...
+
+  NormVal* nv = 0;
+
+  std::cout << "normfile: " << normfile << std::endl;
+
+  if ( normfile!="" ) {
+    
+    /// read in all the coefficients and the grid file names ...
+    
+    if ( nv==0 ) nv = new NormVal( normfile );
+
+    nv->read();  //    std::cout << "NormVal:\n" << *nv << std::endl;
+
+    /// get all the grid files names to open ...
+
+    fgrids = nv->grids(); 
+
+  }
+
+
+  //  if ( fgrids.size()<1 && normfile!="" ) return usage(std::cerr, argc, argv);
+  if ( fgrids.size()<1 ) return usage(std::cerr, argc, argv);
+
+
+  /// before adding the grids together, need tpo go through them to reject
+  /// the outliers
+
+  /// start by reading all the reference histograms and calculating the means etc
+
+  std::cout << "reading grids:\n" << fgrids << std::endl;
+  std::cout << "output to: "      << output_grid << std::endl;
+
+  std::vector<appl::TH1D*> ref;
+  ref.reserve(fgrids.size());
+
+  std::vector<std::string>::iterator gitr=fgrids.begin();
+
+  while ( gitr!=fgrids.end()  ) { 
+
+    appl::TH1D* h = 0;
+
+    if ( gitr->find(".root")==(gitr->size()-5) ) { 
+#ifdef USEROOT
+	TFile f(gitr->c_str());
+	TH1D* h_ = (TH1D*)f.Get("grid/reference");
+	h = convert( h_ );
+	delete h_;
+	f.Close();
+#else
+	std::cerr << "compiled without root - cannot read root files" << std::endl;
+	std::exit(-1);
+#endif
+    }
+    else { 
+      appl::file f( *gitr );
+      h = new appl::TH1D(f.Read<appl::TH1D>("reference"));
+      f.Close();
+    }
+
+    /// remove obvious non-grid files
+    if ( h ) { 
+      ref.push_back(h);
+      gitr++;
+    }
+    else {
+      std::cout << "skipping file: " << *gitr << std::endl;
+      fgrids.erase( gitr );
+    }
+  }
+
+  std::cout << "main() read reference histograms" << std::endl;
+
+
+  if ( ref.empty() ) { 
+    std::cerr << "grid list empty " << std::endl;
+    return 0;
+  }
+
+
+  if ( fgrids.empty() ) return 0;
+
+  if ( fgrids.size()==1 ) addall = true;
+
+  if ( !addall ) thin_grids( chi2_limit, ref, fgrids );
+
+  struct timeval tstart = appl_timer_start();
+
+
+  /// now add the grids together
+  
+  appl::grid*  gp = 0;
+
+  /// will use the rms of the different reference histograms to estimate the proper 
+  /// uncertainties
+
+#if 0  
+
+  int ig = 1;
+
+  g.getReference()->SetLineColor( ig++ );
+  g.getReference()->DrawCopy();
+
+  gPad->SetLogy(true);
+
+#endif
+
+  for ( unsigned i=0 ; i<fgrids.size() ; i++ ) { 
+
+    double t = appl_timer_stop( tstart )*0.001; 
+
+    double remaining = fgrids.size()*t/i - t;
+
+    std::cout << "applgrid-combine: adding grid " << i+1 << " of " << fgrids.size() 
+	      << "\ttime so far " << t << "s"  
+	      << "\testimated time remaining " << remaining << "s"  
+	      << std::endl;  
+
+    appl::grid*  _gp = new appl::grid( fgrids[i] );
+    
+    appl::grid& _g = *_gp;
+
+    _g.untrim();
+    if ( verbose ) std::cout << _g.getDocumentation() << std::endl;
+
+    if ( nv!=0 ) { 
+
+      std::vector<double> vc = nv->weights( fgrids[i] );
+
+      if ( vc.size() != size_t(_g.Nobs()) ) { 
+	std::cerr << "bin size mismatch: " << vc.size() << " " << _g.Nobs() << std::endl;
+	std::exit(-1);
+      }
+
+      if ( _g.run() ) { 
+	_g *= 1/_g.run();
+	_g.run() = 0;
+      }
+
+      //      std::cout << "vc.size: " << vc.size() << std::endl;
+      //      for ( size_t iv=0 ; iv<vc.size() ; iv++ ) vc[iv] *= fgrids.size(); 
+
+      std::cout << "scaling grid: " << fgrids[i] << std::endl;
+
+      _g *= vc;
+
+    }
+
+    if ( i==0 )  gp  = _gp;  
+    else         { 
+      (*gp) += _g;
+      delete _gp;
+    }
+
+  }
+
+  if ( gp==0 ) { std::cerr << "couldn't open grid " << std::endl; std::exit(-1); }
+
+  appl::grid& g = *gp;
+  
+  ///  for ( int i=0 ; i<h->GetNbinsX() ; i++ ) { std::cout << "h " << i << " " << h->GetBinContent(i+1) << std::endl; }   
+  ///  std::cout << "raw " << xsec << "\n" << xsec.mean() << "\n" << (xsec*fgrids.size()).mean() << std::endl;
+  
+  //  std::cout << "rintegral " << integral( g.getReference() ) << std::endl;
+
+
+  if ( rscale!=1 ) { 
+    g *= rscale;                         /// scales the grid *and* the reference histogram
+    g.getReference()->Scale( 1/rscale ); /// scale back down the reference histogram
+    //    g.Write(output_grid);
+  }
+
+
+  //  if ( hscale!=rscale ) g.getReference()->Scale( hscale/rscale );
+  if ( hscale!=1 ) { 
+    g.getReference()->Scale( hscale );
+  }
+
+  if      ( wscale!=1 ) g.run() *= wscale;
+  else if ( weight!=0 ) g.run()  = weight;
+
+
+  if ( shrink ) { 
+    struct timeval toptstart = appl_timer_start(); 
+    g.shrink( newpdfname ); 
+    double topt = appl_timer_stop( toptstart ); 
+    std::cout << argv[0] << ": compressed grid in " << topt << " ms" << std::endl;   
+  }
+
+
+  if ( optimise ) { 
+    struct timeval toptstart = appl_timer_start(); 
+    g.optimise();
+    double topt = appl_timer_stop( toptstart ); 
+    std::cout << argv[0] << ": optimised grid in " << topt << " ms" << std::endl;   
+  }
+
+
+  if ( pdfname!="" ) {
+    g.setGeneratedPDF( pdfname );
+    g.setGeneratediPDF( ipdf );
+  }
+
+  //  std::cout << "writing " << output_grid << std::endl;
+  g.Write(output_grid);
+
+  double t = appl_timer_stop( tstart )*0.001; 
+  
+  std::cout << argv[0] << ": added " << fgrids.size() << " grids in " << t << " s" << std::endl; 
+  std::cout << argv[0] << ": output to  " << output_grid << std::endl; 
+  
+
+  /// stupid threads don't always die properly
+  std::exit(0);
+  return 0;
+}
Index: applgrid/tags/applgrid-01-06-36/src/mcfmQQ_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmQQ_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmQQ_pdf.cxx	(revision 2010)
@@ -0,0 +1,21 @@
+
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "mcfmQQ_pdf.h"
+
+
+// fortran callable wrapper
+extern "C" void fmcfmBB_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmBB_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+extern "C" void fmcfmCC_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmCC_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+extern "C" void fmcfmTT_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmTT_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
Index: applgrid/tags/applgrid-01-06-36/src/TFileString.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/TFileString.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/TFileString.h	(revision 2010)
@@ -0,0 +1,87 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    TFileString.h
+ **
+ **     @brief   root TObject std::string std::vector class for writing std::string std::vectors
+ **              to root files               
+ **
+ **     @author  mark sutton
+ **     @date    Sat Mar 15 19:49:16 GMT 2008 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: TFileString.h, v0.0   Sat Mar 15 19:49:16 GMT 2008 sutt $
+ **
+ **/
+
+
+#ifndef __TFILESTRING_H
+#define __TFILESTRING_H
+
+#include "appl_grid/appl_root.h"
+
+#ifdef USEROOT
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include "TObjString.h"
+#include "TObject.h"
+
+
+class TFileString : public TObjString { 
+
+public:
+  
+  TFileString(const std::string& name="") : TObjString(name.c_str()) { } 
+
+  TFileString(const std::string& name, const std::string& tag) : 
+    TObjString(name.c_str())
+  { mstring.push_back(tag.c_str()); } 
+ 
+  std::vector<std::string>&       tags()       { return mstring; }
+  const std::vector<std::string>& tags() const { return mstring; }
+
+  // get the name
+  std::string  name() const { return GetName(); } 
+
+  // get a value 
+  std::string& operator[](int i)       { return mstring[i]; }
+  std::string  operator[](int i) const { return mstring[i]; }
+
+  std::string& at(int i)       { return mstring.at(i); }
+  std::string  at(int i) const { return mstring.at(i); }
+  
+  // get the size
+  size_t size()           const { return mstring.size(); } 
+
+  // add an element
+  void push_back(const std::string& s) { mstring.push_back(s); }
+  void add(const std::string& s)       { mstring.push_back(s); }
+
+private:
+  
+  std::vector<std::string> mstring;
+
+  ClassDef(TFileString, 1)
+
+}; 
+
+
+std::ostream& operator<<(std::ostream& s, const TFileString& fs);
+
+
+#endif
+
+#endif  // __TFILESTRING_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/vrapz_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/vrapz_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/vrapz_pdf.h	(revision 2010)
@@ -0,0 +1,273 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    vrapz_pdf.h
+ **
+ **     @brief   dis_pdf.h        
+ **              pdf transform functions for nlojet                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: dis_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+#ifndef __VRAPZ_PDF_H
+#define __VRAPZ_PDF_H
+
+// #include "EWparams.h"
+
+#include "appl_grid/appl_pdf.h" 
+
+
+
+// LO vrap pdf combination class 
+
+class vrapzLO_pdf : public appl::appl_pdf {
+
+public:
+
+  vrapzLO_pdf() : appl_pdf("vrapzLO") { 
+    m_Nproc=2; 
+  } 
+
+  void evaluate(const double* fA, const double* fB, double* H) const ;
+
+};  
+
+
+inline void  vrapzLO_pdf::evaluate(const double* fA, const double* fB, double* H) const {  
+
+  fA += 6;  // offset internal pointers so can use fA[-6]..fA[6] for simplicity
+  fB += 6;  // offset internal pointers so can use fB[-6]..fB[6] for simplicity
+
+  // qqbar_lumi for LO tree level contributions
+  double U=0,  D=0;
+
+  for ( int i=1 ; i<=5 ; i+=2 ) D +=  fA[i]*fB[-i] + fA[-i]*fB[i] ;
+  for ( int i=2 ; i<6 ; i+=2 )  U +=  fA[i]*fB[-i] + fA[-i]*fB[i] ;
+
+  H[0] =  D ;  
+  H[1] =  U ;  
+
+}
+
+
+
+
+
+
+
+
+// NLO vrap pdf combination class 
+
+class vrapzNLO_pdf : public appl::appl_pdf {
+
+public:
+
+  vrapzNLO_pdf() : appl_pdf("vrapzNLO") { 
+    m_Nproc=6; 
+  } 
+
+  void evaluate(const double* fA, const double* fB, double* H) const;
+
+};  
+
+
+
+
+inline void  vrapzNLO_pdf::evaluate(const double* fA, const double* fB, double* H) const {  
+
+  fA += 6;  // offset internal pointers so can use fA[-6]..fA[6] for simplicity
+  fB += 6;  // offset internal pointers so can use fB[-6]..fB[6] for simplicity
+
+  double GA=fA[0];
+  double GB=fB[0]; 
+
+  // qqbar_lumi for LO tree level contributions
+  double U=0,  D=0;
+  double UG=0, DG=0;
+  double GU=0, GD=0;
+
+  for ( int i=1 ; i<=5 ; i+=2 ) D +=  fA[i]*fB[-i] + fA[-i]*fB[i] ;
+  for ( int i=2 ; i<6 ; i+=2 )  U +=  fA[i]*fB[-i] + fA[-i]*fB[i] ;
+
+  H[0] =  D ;  
+  H[1] =  U ;  
+
+  /// qqbar_ax_lumi same as for qqbar_lumi only multiplied by aaf_d, aadf_u rather 
+  /// than qpref_d and qpref_u
+
+  /// for gq_lumi
+  for ( int i=1 ; i<=5 ; i+=2 ) GD +=  GA*(fB[-i] + fB[i]) ;
+  for ( int i=2 ; i<6 ; i+=2 )  GU +=  GA*(fB[-i] + fB[i]) ;
+
+  H[2] = GD; 
+  H[3] = GU; 
+ 
+  /// for qg_lumi
+  for ( int i=1 ; i<=5 ; i+=2 ) DG +=  GB*(fA[i] + fA[-i]);
+  for ( int i=2 ; i<6 ; i+=2 )  UG +=  GB*(fA[i] + fA[-i]);
+
+  /// need the vsqasq_d and vsqasq_u terms
+  H[4] = DG; 
+  H[5] = UG; 
+
+#if 0
+  std::cout << "appl::vrpz fA ";  for ( int i=-6 ; i<=6 ; i++ ) std::cout << "\t" << fA[i];  std::cout << std::endl;
+  std::cout << "appl::vrpz fB ";  for ( int i=-6 ; i<=6 ; i++ ) std::cout << "\t" << fB[i];  std::cout << std::endl;
+  std::cout << "appl::vrpz  H ";  for ( int i=0  ; i<6 ; i++ ) std::cout << "\t" << H[i];   std::cout << std::endl;
+#endif
+
+
+}
+
+
+
+
+
+// vrap pdf combination class 
+
+class vrapzNNLO_pdf : public appl::appl_pdf {
+
+public:
+
+  vrapzNNLO_pdf() : appl_pdf("vrapzNNLO") { 
+    m_Nproc=19; 
+  } 
+
+  void evaluate(const double* fA, const double* fB, double* H) const;
+
+};  
+
+
+inline void  vrapzNNLO_pdf::evaluate(const double* fA, const double* fB, double* H) const {  
+
+  fA += 6;  // offset internal pointers so can use fA[-6]..fA[6] for simplicity
+  fB += 6;  // offset internal pointers so can use fB[-6]..fB[6] for simplicity
+
+  double GA=fA[0];
+  double GB=fB[0]; 
+
+  // qqbar_lumi for LO tree level contributions
+  double U=0,  D=0;
+  double UG=0, DG=0;
+  double GU=0, GD=0;
+
+  for ( int i=1 ; i<=5 ; i+=2 ) D +=  fA[i]*fB[-i] + fA[-i]*fB[i] ;
+  for ( int i=2 ; i<6 ; i+=2 )  U +=  fA[i]*fB[-i] + fA[-i]*fB[i] ;
+
+  H[0] =  D ;  
+  H[1] =  U ;  
+
+  /// qqbar_ax_lumi same as for qqbar_lumi only multiplied by aaf_d, aadf_u rather 
+  /// than qpref_d and qpref_u
+
+  /// for gq_lumi
+  for ( int i=1 ; i<=5 ; i+=2 ) GD +=  GA*(fB[-i] + fB[i]) ;
+  for ( int i=2 ; i<6 ; i+=2 )  GU +=  GA*(fB[-i] + fB[i]) ;
+
+  H[2] = GD; 
+  H[3] = GU; 
+ 
+  /// for qg_lumi
+  for ( int i=1 ; i<=5 ; i+=2 ) DG +=  GB*(fA[i] + fA[-i]);
+  for ( int i=2 ; i<6 ; i+=2 )  UG +=  GB*(fA[i] + fA[-i]);
+
+  /// need the vsqasq_d and vsqasq_u terms
+  H[4] = DG; 
+  H[5] = UG; 
+
+  /// qq_11_lumi
+  double UQ11 = 0;
+  double DQ11 = 0;
+
+  double QB = 0;
+
+  //  for ( int i=1 ; i<=5 ; i++ )    QB +=  fB[i] + fB[-i];
+  for ( int i=1 ; i<=5 ; i+=2 ) DQ11 +=  fA[i] + fA[-i];
+  for ( int i=2 ; i<=6 ; i+=2 ) UQ11 +=  fA[i] + fA[-i];
+  
+
+  /// need the vsqasq_d and vsqasq_u terms
+  H[6] = DQ11 * QB;
+  H[7] = UQ11 * QB;
+
+
+  /// qq_22_lumi
+  double UQ22 = 0;
+  double DQ22 = 0;
+
+  double QA = 0;
+
+  for ( int i=1 ; i<=5 ; i++ )    QA +=  fA[i] + fA[-i];
+  for ( int i=1 ; i<=5 ; i+=2 ) DQ22 +=  fB[i] + fB[-i];
+  for ( int i=2 ; i<=6 ; i+=2 ) UQ22 +=  fB[i] + fB[-i];
+  
+  /// need the vsqasq_d and vsqasq_u terms
+  H[8] = DQ22 * QA;
+  H[9] = UQ22 * QA;
+
+
+  /// qq_12_lumi NB: these *assume* that c=cbar, s=sbar etc.
+  double UU12 = 0;
+  double UD12 = 0;
+  double DD12 = 0;
+
+
+  /// need the vdvd, vuvd, vuvu terms
+  H[10] = DD12 = ( fA[1] - fA[-1] ) * ( fB[1] - fB[-1] ) ; 
+  H[11] = UD12 = ( fA[2] - fA[-2] ) * ( fB[1] - fB[-1] ) + ( fA[1] - fA[-1] ) * ( fB[2] - fB[-2] ) ;
+  H[12] = UU12 = ( fA[2] - fA[-2] ) * ( fB[2] - fB[-2] ) ;
+
+  /// NB: for ppbar H{10-12] -> - H[10-12] ??? 
+
+
+  /// qq_12_ax_lumi
+  /// need the adad, auad and auau terms
+  H[13] =  ( fA[1] + fA[-1] + fA[3] + fA[-3] + fA[5] - fA[-5] ) * ( fB[1] + fB[-1] + fB[3] + fB[-3] + fB[5] - fB[-5] ); 
+  H[14] =  ( fA[2] + fA[-2] + fA[4] + fA[-4] ) * ( fB[1] + fB[-1] + fB[3] + fB[-3] + fB[5] - fB[-5] ) + 
+           ( fB[2] + fB[-2] + fB[4] + fB[-4] ) * ( fA[1] + fA[-1] + fA[3] + fA[-3] + fA[5] - fA[-5] ); 
+  H[15] =  ( fA[2] + fA[-2] + fA[4] + fA[-4] ) * ( fB[2] + fB[-2] + fB[4] + fB[-4] ) ;
+
+
+  /// qq_CE1_lumi
+
+  double UUCE = 0;
+  double DDCE = 0;
+
+  for ( int i=1 ; i<=5 ; i+=2 )  DDCE +=  fA[i]*fB[i] + fA[-i]*fB[-i] ;
+  for ( int i=2 ; i<6  ; i+=2 )  UUCE +=  fA[i]*fB[i] + fA[-i]*fB[-i] ;
+
+  /// need the vsqasq_d and vsqasq_u terms
+  H[16] = DDCE;
+  H[17] = UUCE;
+
+  /// qq_CE2_lumi for Z production is the same as qq_CE1_lumi
+  /// qq_CF_lumi  for Z production is the same as qq_CE1_lumi
+  
+  /// qqbar_BC_lumi for Z production  is the same as 2 * qqbar_lumi 
+
+  /// gg_lumi
+  H[18] = GA*GB;
+
+#if 0
+  std::cout << "appl::vrpz fA ";  for ( int i=-6 ; i<=6 ; i++ ) std::cout << "\t" << fA[i];  std::cout << std::endl;
+  std::cout << "appl::vrpz fB ";  for ( int i=-6 ; i<=6 ; i++ ) std::cout << "\t" << fB[i];  std::cout << std::endl;
+  std::cout << "appl::vrpz  H ";  for ( int i=0  ; i<18 ; i++ ) std::cout << "\t" << H[i];   std::cout << std::endl;
+#endif
+
+}
+
+
+extern "C" void fvrapzLO_pdf__(const double* fA, const double* fB, double* H);
+extern "C" void fvrapzNLO_pdf__(const double* fA, const double* fB, double* H);
+extern "C" void fvrapzNNLO_pdf__(const double* fA, const double* fB, double* H);
+
+
+#endif  // __VRAPZ_PDF_H
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/Makefile.in
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/Makefile.in	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/Makefile.in	(revision 2010)
@@ -0,0 +1,1036 @@
+# Makefile.in generated by automake 1.16.5 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2021 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+##########################################################################
+#
+#   File:         Makefile.am
+#
+#                 Copyright (C) M.Sutton (sutt@cern.ch) 
+#
+#   Description:  generate the Makefile for the appl_grid project
+#                 
+#
+#   Created:    M.Sutton (sutt@hep.ucl.ac.uk) Fri  5 Nov 2010 00:05:29 GMT
+#   
+#   $Id: makefile 20 2008-05-05 01:55:42Z sutt $                
+#                   
+#########################################################################
+
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+  if test -z '$(MAKELEVEL)'; then \
+    false; \
+  elif test -n '$(MAKE_HOST)'; then \
+    true; \
+  elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+    true; \
+  else \
+    false; \
+  fi; \
+}
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+bin_PROGRAMS = applgrid-combine$(EXEEXT) applgrid-convert$(EXEEXT)
+@USE_ROOT_TRUE@am__append_1 = TFileStringDict.cxx TFileVectorDict.cxx 
+subdir = src
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(library_include_HEADERS) \
+	$(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/amconfig.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(libdir)" \
+	"$(DESTDIR)$(library_includedir)"
+PROGRAMS = $(bin_PROGRAMS)
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+LTLIBRARIES = $(lib_LTLIBRARIES)
+libAPPLgrid_la_LIBADD =
+am__libAPPLgrid_la_SOURCES_DIST = appl_grid.cxx appl_igrid.cxx \
+	fastnlo.cxx appl_timer.cxx appl_pdf.cxx appl_file.cxx \
+	file_index.cxx histogram.cxx nlojet_pdf.cxx nlojetpp_pdf.cxx \
+	mcfmw_pdf.cxx mcfmwjet_pdf.cxx mcfmwc_pdf.cxx mcfmz_pdf.cxx \
+	mcfmzjet_pdf.cxx mcfmQQ_pdf.cxx jetrad_pdf.cxx dis_pdf.cxx \
+	generic_pdf.cxx basic_pdf.cxx combination.cxx lumi_pdf.cxx \
+	SparseMatrix3d.cxx hoppet_init.cxx TFileString.cxx \
+	TFileVector.cxx integral.cxx *.h TFileStringDict.cxx \
+	TFileVectorDict.cxx
+@USE_ROOT_TRUE@am__objects_1 = TFileStringDict.lo TFileVectorDict.lo
+am_libAPPLgrid_la_OBJECTS = appl_grid.lo appl_igrid.lo fastnlo.lo \
+	appl_timer.lo appl_pdf.lo appl_file.lo file_index.lo \
+	histogram.lo nlojet_pdf.lo nlojetpp_pdf.lo mcfmw_pdf.lo \
+	mcfmwjet_pdf.lo mcfmwc_pdf.lo mcfmz_pdf.lo mcfmzjet_pdf.lo \
+	mcfmQQ_pdf.lo jetrad_pdf.lo dis_pdf.lo generic_pdf.lo \
+	basic_pdf.lo combination.lo lumi_pdf.lo SparseMatrix3d.lo \
+	hoppet_init.lo TFileString.lo TFileVector.lo integral.lo \
+	$(am__objects_1)
+libAPPLgrid_la_OBJECTS = $(am_libAPPLgrid_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+libfAPPLgrid_la_LIBADD =
+am_libfAPPLgrid_la_OBJECTS = fappl_grid.lo fappl_fitter.lo
+libfAPPLgrid_la_OBJECTS = $(am_libfAPPLgrid_la_OBJECTS)
+am_applgrid_combine_OBJECTS = applgrid_combine-combine.$(OBJEXT)
+applgrid_combine_OBJECTS = $(am_applgrid_combine_OBJECTS)
+applgrid_combine_DEPENDENCIES = libAPPLgrid.la
+applgrid_combine_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
+	$(applgrid_combine_CXXFLAGS) $(CXXFLAGS) \
+	$(applgrid_combine_LDFLAGS) $(LDFLAGS) -o $@
+am_applgrid_convert_OBJECTS = applgrid_convert-convert.$(OBJEXT)
+applgrid_convert_OBJECTS = $(am_applgrid_convert_OBJECTS)
+applgrid_convert_DEPENDENCIES = libAPPLgrid.la
+applgrid_convert_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
+	$(applgrid_convert_CXXFLAGS) $(CXXFLAGS) \
+	$(applgrid_convert_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__maybe_remake_depfiles = depfiles
+am__depfiles_remade = ./$(DEPDIR)/SparseMatrix3d.Plo \
+	./$(DEPDIR)/TFileString.Plo ./$(DEPDIR)/TFileStringDict.Plo \
+	./$(DEPDIR)/TFileVector.Plo ./$(DEPDIR)/TFileVectorDict.Plo \
+	./$(DEPDIR)/appl_file.Plo ./$(DEPDIR)/appl_grid.Plo \
+	./$(DEPDIR)/appl_igrid.Plo ./$(DEPDIR)/appl_pdf.Plo \
+	./$(DEPDIR)/appl_timer.Plo \
+	./$(DEPDIR)/applgrid_combine-combine.Po \
+	./$(DEPDIR)/applgrid_convert-convert.Po \
+	./$(DEPDIR)/basic_pdf.Plo ./$(DEPDIR)/combination.Plo \
+	./$(DEPDIR)/dis_pdf.Plo ./$(DEPDIR)/fappl_fitter.Plo \
+	./$(DEPDIR)/fappl_grid.Plo ./$(DEPDIR)/fastnlo.Plo \
+	./$(DEPDIR)/file_index.Plo ./$(DEPDIR)/generic_pdf.Plo \
+	./$(DEPDIR)/histogram.Plo ./$(DEPDIR)/hoppet_init.Plo \
+	./$(DEPDIR)/integral.Plo ./$(DEPDIR)/jetrad_pdf.Plo \
+	./$(DEPDIR)/lumi_pdf.Plo ./$(DEPDIR)/mcfmQQ_pdf.Plo \
+	./$(DEPDIR)/mcfmw_pdf.Plo ./$(DEPDIR)/mcfmwc_pdf.Plo \
+	./$(DEPDIR)/mcfmwjet_pdf.Plo ./$(DEPDIR)/mcfmz_pdf.Plo \
+	./$(DEPDIR)/mcfmzjet_pdf.Plo ./$(DEPDIR)/nlojet_pdf.Plo \
+	./$(DEPDIR)/nlojetpp_pdf.Plo
+am__mv = mv -f
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CXXFLAGS) $(CXXFLAGS)
+AM_V_CXX = $(am__v_CXX_@AM_V@)
+am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@)
+am__v_CXX_0 = @echo "  CXX     " $@;
+am__v_CXX_1 = 
+CXXLD = $(CXX)
+CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
+	$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CXXLD = $(am__v_CXXLD_@AM_V@)
+am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@)
+am__v_CXXLD_0 = @echo "  CXXLD   " $@;
+am__v_CXXLD_1 = 
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(libAPPLgrid_la_SOURCES) $(libfAPPLgrid_la_SOURCES) \
+	$(applgrid_combine_SOURCES) $(applgrid_convert_SOURCES)
+DIST_SOURCES = $(am__libAPPLgrid_la_SOURCES_DIST) \
+	$(libfAPPLgrid_la_SOURCES) $(applgrid_combine_SOURCES) \
+	$(applgrid_convert_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+HEADERS = $(library_include_HEADERS)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPPFLAGS = @CPPFLAGS@
+CSCOPE = @CSCOPE@
+CTAGS = @CTAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ETAGS = @ETAGS@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+
+# fotran RTL
+FRTLLIB = 
+@USE_FRTLSO_FALSE@FRTLLIB = -L$(subst /libgfortran.a, ,$(FRTLIB) ) -lgfortran 
+@USE_FRTLSO_TRUE@FRTLLIB = -L$(subst /libgfortran.so, ,$(FRTLIB) ) -lgfortran 
+GREP = @GREP@
+HOPPETPATH = @HOPPETPATH@
+HOPPET_CXXFLAGS = @HOPPET_CXXFLAGS@
+HOPPET_LDFLAGS = @HOPPET_LDFLAGS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LHAPDFPATH = @LHAPDFPATH@
+LHAPDF_CXXFLAGS = @LHAPDF_CXXFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+ROOTPATH = @ROOTPATH@
+ROOT_CXXFLAGS = @ROOT_CXXFLAGS@
+ROOT_LDFLAGS = @ROOT_LDFLAGS@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+USEROOT = @USEROOT@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+@USE_ROOT_FALSE@ROOTCFLAGS = 
+@USE_ROOT_TRUE@ROOTCFLAGS = $(shell root-config --cflags)
+@USE_ROOT_FALSE@ROOTLIBS = 
+@USE_ROOT_TRUE@ROOTLIBS = $(shell root-config --nonew --libs)
+@USE_ROOT_FALSE@ROOTGLIBS = 
+@USE_ROOT_TRUE@ROOTGLIBS = $(shell root-config --nonew --glibs)
+@USE_ROOT_FALSE@ROOTARCH = 
+@USE_ROOT_TRUE@ROOTARCH = $(shell root-config --ldflags)
+@USE_ROOT_FALSE@ROOTCINT = 
+@USE_ROOT_TRUE@ROOTCINT = rootcint
+@USE_HOPPET_FALSE@HOPPETDIR = 
+
+# hoppet flags
+@USE_HOPPET_TRUE@HOPPETDIR = $(shell hoppet-config --prefix)
+@USE_HOPPET_FALSE@HOPPETCFLAGS = 
+@USE_HOPPET_TRUE@HOPPETCFLAGS = $(shell hoppet-config --cxxflags)
+@USE_HOPPET_FALSE@HOPPETLIBS = 
+@USE_HOPPET_TRUE@HOPPETLIBS = $(shell hoppet-config --libs) 
+@USE_FRTLSO_FALSE@FRTLIB = $(shell gfortran -print-file-name=libgfortran.a)
+@USE_FRTLSO_TRUE@FRTLIB = $(shell gfortran -print-file-name=libgfortran.so)
+@USE_LHAPDF_FALSE@LHAPDFCFLAGS = 
+@USE_LHAPDF_TRUE@LHAPDFCFLAGS = $(shell lhapdf-config --cxxflags)
+AM_CXXFLAGS = $(ROOTARCH) -O3 -DDATADIR=\"@datadir@/$(PACKAGE)\" -Wall -Wextra -fPIC  -I. -I.. -I$(srcdir) $(LHAPDFCXXFLAGS) $(ROOTCFLAGS)  $(HOPPETCFLAGS) -fPIC
+AM_LDFLAGS = $(ROOTARCH) -O3  $(ROOTLIBS) $(FCLIBS) $(HOPPETLIBS) $(FRTLLIB)  
+applgrid_combine_SOURCES = combine.cxx
+applgrid_combine_LDADD = libAPPLgrid.la
+applgrid_combine_CXXFLAGS = $(AM_CXXFLAGS) 
+applgrid_combine_LDFLAGS = $(ROOTARCH) -L. -lAPPLgrid $(ROOTLIBS) $(HOPPETLIBS) $(FRTLLIB)
+applgrid_convert_SOURCES = convert.cxx
+applgrid_convert_LDADD = libAPPLgrid.la
+applgrid_convert_CXXFLAGS = $(AM_CXXFLAGS) 
+applgrid_convert_LDFLAGS = $(ROOTARCH) -L. -lAPPLgrid $(ROOTLIBS) $(HOPPETLIBS) $(FRTLLIB)
+lib_LTLIBRARIES = libAPPLgrid.la libfAPPLgrid.la 
+libAPPLgrid_la_SOURCES = appl_grid.cxx appl_igrid.cxx fastnlo.cxx \
+	appl_timer.cxx appl_pdf.cxx appl_file.cxx file_index.cxx \
+	histogram.cxx nlojet_pdf.cxx nlojetpp_pdf.cxx mcfmw_pdf.cxx \
+	mcfmwjet_pdf.cxx mcfmwc_pdf.cxx mcfmz_pdf.cxx mcfmzjet_pdf.cxx \
+	mcfmQQ_pdf.cxx jetrad_pdf.cxx dis_pdf.cxx generic_pdf.cxx \
+	basic_pdf.cxx combination.cxx lumi_pdf.cxx SparseMatrix3d.cxx \
+	hoppet_init.cxx TFileString.cxx TFileVector.cxx integral.cxx \
+	*.h $(am__append_1)
+libfAPPLgrid_la_SOURCES = fappl_grid.cxx fappl_fitter.cxx
+library_includedir = $(includedir)/appl_grid
+library_include_HEADERS = ../appl_grid/*.h
+
+# GSLLIBS=$(shell gsl-config --libs)
+# WDIR = $(shell pwd)
+
+# if WITHCLING
+### ROOT-6: ###
+
+# rootmapdir = $(libdir)
+# rootmap_DATA = .libs/*.pcm
+
+# endif
+AM_FCFLAGS = -c
+AM_FCLIBS = -lg2c
+AM_SOFLAGS = -shared
+CINT = rootcint
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .cxx .lo .o .obj
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu src/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-binPROGRAMS: $(bin_PROGRAMS)
+	@$(NORMAL_INSTALL)
+	@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
+	fi; \
+	for p in $$list; do echo "$$p $$p"; done | \
+	sed 's/$(EXEEXT)$$//' | \
+	while read p p1; do if test -f $$p \
+	 || test -f $$p1 \
+	  ; then echo "$$p"; echo "$$p"; else :; fi; \
+	done | \
+	sed -e 'p;s,.*/,,;n;h' \
+	    -e 's|.*|.|' \
+	    -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+	sed 'N;N;N;s,\n, ,g' | \
+	$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+	  { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+	    if ($$2 == $$4) files[d] = files[d] " " $$1; \
+	    else { print "f", $$3 "/" $$4, $$1; } } \
+	  END { for (d in files) print "f", d, files[d] }' | \
+	while read type dir files; do \
+	    if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+	    test -z "$$files" || { \
+	    echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+	    $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+	    } \
+	; done
+
+uninstall-binPROGRAMS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+	files=`for p in $$list; do echo "$$p"; done | \
+	  sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+	      -e 's/$$/$(EXEEXT)/' \
+	`; \
+	test -n "$$list" || exit 0; \
+	echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
+	cd "$(DESTDIR)$(bindir)" && rm -f $$files
+
+clean-binPROGRAMS:
+	@list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
+	echo " rm -f" $$list; \
+	rm -f $$list || exit $$?; \
+	test -n "$(EXEEXT)" || exit 0; \
+	list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+	echo " rm -f" $$list; \
+	rm -f $$list
+
+install-libLTLIBRARIES: $(lib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
+	}
+
+uninstall-libLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
+	done
+
+clean-libLTLIBRARIES:
+	-test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
+	@list='$(lib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+libAPPLgrid.la: $(libAPPLgrid_la_OBJECTS) $(libAPPLgrid_la_DEPENDENCIES) $(EXTRA_libAPPLgrid_la_DEPENDENCIES) 
+	$(AM_V_CXXLD)$(CXXLINK) -rpath $(libdir) $(libAPPLgrid_la_OBJECTS) $(libAPPLgrid_la_LIBADD) $(LIBS)
+
+libfAPPLgrid.la: $(libfAPPLgrid_la_OBJECTS) $(libfAPPLgrid_la_DEPENDENCIES) $(EXTRA_libfAPPLgrid_la_DEPENDENCIES) 
+	$(AM_V_CXXLD)$(CXXLINK) -rpath $(libdir) $(libfAPPLgrid_la_OBJECTS) $(libfAPPLgrid_la_LIBADD) $(LIBS)
+
+applgrid-combine$(EXEEXT): $(applgrid_combine_OBJECTS) $(applgrid_combine_DEPENDENCIES) $(EXTRA_applgrid_combine_DEPENDENCIES) 
+	@rm -f applgrid-combine$(EXEEXT)
+	$(AM_V_CXXLD)$(applgrid_combine_LINK) $(applgrid_combine_OBJECTS) $(applgrid_combine_LDADD) $(LIBS)
+
+applgrid-convert$(EXEEXT): $(applgrid_convert_OBJECTS) $(applgrid_convert_DEPENDENCIES) $(EXTRA_applgrid_convert_DEPENDENCIES) 
+	@rm -f applgrid-convert$(EXEEXT)
+	$(AM_V_CXXLD)$(applgrid_convert_LINK) $(applgrid_convert_OBJECTS) $(applgrid_convert_LDADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/SparseMatrix3d.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/TFileString.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/TFileStringDict.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/TFileVector.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/TFileVectorDict.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/appl_file.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/appl_grid.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/appl_igrid.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/appl_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/appl_timer.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/applgrid_combine-combine.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/applgrid_convert-convert.Po@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/basic_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/combination.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dis_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fappl_fitter.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fappl_grid.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fastnlo.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/file_index.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/generic_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/histogram.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hoppet_init.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/integral.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jetrad_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lumi_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcfmQQ_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcfmw_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcfmwc_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcfmwjet_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcfmz_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcfmzjet_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nlojet_pdf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nlojetpp_pdf.Plo@am__quote@ # am--include-marker
+
+$(am__depfiles_remade):
+	@$(MKDIR_P) $(@D)
+	@echo '# dummy' >$@-t && $(am__mv) $@-t $@
+
+am--depfiles: $(am__depfiles_remade)
+
+.cxx.obj:
+@am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.cxx.lo:
+@am__fastdepCXX_TRUE@	$(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $<
+
+applgrid_combine-combine.o: combine.cxx
+@am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(applgrid_combine_CXXFLAGS) $(CXXFLAGS) -MT applgrid_combine-combine.o -MD -MP -MF $(DEPDIR)/applgrid_combine-combine.Tpo -c -o applgrid_combine-combine.o `test -f 'combine.cxx' || echo '$(srcdir)/'`combine.cxx
+@am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/applgrid_combine-combine.Tpo $(DEPDIR)/applgrid_combine-combine.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='combine.cxx' object='applgrid_combine-combine.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(applgrid_combine_CXXFLAGS) $(CXXFLAGS) -c -o applgrid_combine-combine.o `test -f 'combine.cxx' || echo '$(srcdir)/'`combine.cxx
+
+applgrid_combine-combine.obj: combine.cxx
+@am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(applgrid_combine_CXXFLAGS) $(CXXFLAGS) -MT applgrid_combine-combine.obj -MD -MP -MF $(DEPDIR)/applgrid_combine-combine.Tpo -c -o applgrid_combine-combine.obj `if test -f 'combine.cxx'; then $(CYGPATH_W) 'combine.cxx'; else $(CYGPATH_W) '$(srcdir)/combine.cxx'; fi`
+@am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/applgrid_combine-combine.Tpo $(DEPDIR)/applgrid_combine-combine.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='combine.cxx' object='applgrid_combine-combine.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(applgrid_combine_CXXFLAGS) $(CXXFLAGS) -c -o applgrid_combine-combine.obj `if test -f 'combine.cxx'; then $(CYGPATH_W) 'combine.cxx'; else $(CYGPATH_W) '$(srcdir)/combine.cxx'; fi`
+
+applgrid_convert-convert.o: convert.cxx
+@am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(applgrid_convert_CXXFLAGS) $(CXXFLAGS) -MT applgrid_convert-convert.o -MD -MP -MF $(DEPDIR)/applgrid_convert-convert.Tpo -c -o applgrid_convert-convert.o `test -f 'convert.cxx' || echo '$(srcdir)/'`convert.cxx
+@am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/applgrid_convert-convert.Tpo $(DEPDIR)/applgrid_convert-convert.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='convert.cxx' object='applgrid_convert-convert.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(applgrid_convert_CXXFLAGS) $(CXXFLAGS) -c -o applgrid_convert-convert.o `test -f 'convert.cxx' || echo '$(srcdir)/'`convert.cxx
+
+applgrid_convert-convert.obj: convert.cxx
+@am__fastdepCXX_TRUE@	$(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(applgrid_convert_CXXFLAGS) $(CXXFLAGS) -MT applgrid_convert-convert.obj -MD -MP -MF $(DEPDIR)/applgrid_convert-convert.Tpo -c -o applgrid_convert-convert.obj `if test -f 'convert.cxx'; then $(CYGPATH_W) 'convert.cxx'; else $(CYGPATH_W) '$(srcdir)/convert.cxx'; fi`
+@am__fastdepCXX_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/applgrid_convert-convert.Tpo $(DEPDIR)/applgrid_convert-convert.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	$(AM_V_CXX)source='convert.cxx' object='applgrid_convert-convert.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(applgrid_convert_CXXFLAGS) $(CXXFLAGS) -c -o applgrid_convert-convert.obj `if test -f 'convert.cxx'; then $(CYGPATH_W) 'convert.cxx'; else $(CYGPATH_W) '$(srcdir)/convert.cxx'; fi`
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+install-library_includeHEADERS: $(library_include_HEADERS)
+	@$(NORMAL_INSTALL)
+	@list='$(library_include_HEADERS)'; test -n "$(library_includedir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(library_includedir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(library_includedir)" || exit 1; \
+	fi; \
+	for p in $$list; do \
+	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+	  echo "$$d$$p"; \
+	done | $(am__base_list) | \
+	while read files; do \
+	  echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(library_includedir)'"; \
+	  $(INSTALL_HEADER) $$files "$(DESTDIR)$(library_includedir)" || exit $$?; \
+	done
+
+uninstall-library_includeHEADERS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(library_include_HEADERS)'; test -n "$(library_includedir)" || list=; \
+	files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+	dir='$(DESTDIR)$(library_includedir)'; $(am__uninstall_files_from_dir)
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+distdir: $(BUILT_SOURCES)
+	$(MAKE) $(AM_MAKEFLAGS) distdir-am
+
+distdir-am: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS) $(LTLIBRARIES) $(HEADERS)
+install-binPROGRAMS: install-libLTLIBRARIES
+
+installdirs:
+	for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(libdir)" "$(DESTDIR)$(library_includedir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
+	clean-libtool clean-local mostlyclean-am
+
+distclean: distclean-am
+		-rm -f ./$(DEPDIR)/SparseMatrix3d.Plo
+	-rm -f ./$(DEPDIR)/TFileString.Plo
+	-rm -f ./$(DEPDIR)/TFileStringDict.Plo
+	-rm -f ./$(DEPDIR)/TFileVector.Plo
+	-rm -f ./$(DEPDIR)/TFileVectorDict.Plo
+	-rm -f ./$(DEPDIR)/appl_file.Plo
+	-rm -f ./$(DEPDIR)/appl_grid.Plo
+	-rm -f ./$(DEPDIR)/appl_igrid.Plo
+	-rm -f ./$(DEPDIR)/appl_pdf.Plo
+	-rm -f ./$(DEPDIR)/appl_timer.Plo
+	-rm -f ./$(DEPDIR)/applgrid_combine-combine.Po
+	-rm -f ./$(DEPDIR)/applgrid_convert-convert.Po
+	-rm -f ./$(DEPDIR)/basic_pdf.Plo
+	-rm -f ./$(DEPDIR)/combination.Plo
+	-rm -f ./$(DEPDIR)/dis_pdf.Plo
+	-rm -f ./$(DEPDIR)/fappl_fitter.Plo
+	-rm -f ./$(DEPDIR)/fappl_grid.Plo
+	-rm -f ./$(DEPDIR)/fastnlo.Plo
+	-rm -f ./$(DEPDIR)/file_index.Plo
+	-rm -f ./$(DEPDIR)/generic_pdf.Plo
+	-rm -f ./$(DEPDIR)/histogram.Plo
+	-rm -f ./$(DEPDIR)/hoppet_init.Plo
+	-rm -f ./$(DEPDIR)/integral.Plo
+	-rm -f ./$(DEPDIR)/jetrad_pdf.Plo
+	-rm -f ./$(DEPDIR)/lumi_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmQQ_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmw_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmwc_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmwjet_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmz_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmzjet_pdf.Plo
+	-rm -f ./$(DEPDIR)/nlojet_pdf.Plo
+	-rm -f ./$(DEPDIR)/nlojetpp_pdf.Plo
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-library_includeHEADERS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binPROGRAMS install-libLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+		-rm -f ./$(DEPDIR)/SparseMatrix3d.Plo
+	-rm -f ./$(DEPDIR)/TFileString.Plo
+	-rm -f ./$(DEPDIR)/TFileStringDict.Plo
+	-rm -f ./$(DEPDIR)/TFileVector.Plo
+	-rm -f ./$(DEPDIR)/TFileVectorDict.Plo
+	-rm -f ./$(DEPDIR)/appl_file.Plo
+	-rm -f ./$(DEPDIR)/appl_grid.Plo
+	-rm -f ./$(DEPDIR)/appl_igrid.Plo
+	-rm -f ./$(DEPDIR)/appl_pdf.Plo
+	-rm -f ./$(DEPDIR)/appl_timer.Plo
+	-rm -f ./$(DEPDIR)/applgrid_combine-combine.Po
+	-rm -f ./$(DEPDIR)/applgrid_convert-convert.Po
+	-rm -f ./$(DEPDIR)/basic_pdf.Plo
+	-rm -f ./$(DEPDIR)/combination.Plo
+	-rm -f ./$(DEPDIR)/dis_pdf.Plo
+	-rm -f ./$(DEPDIR)/fappl_fitter.Plo
+	-rm -f ./$(DEPDIR)/fappl_grid.Plo
+	-rm -f ./$(DEPDIR)/fastnlo.Plo
+	-rm -f ./$(DEPDIR)/file_index.Plo
+	-rm -f ./$(DEPDIR)/generic_pdf.Plo
+	-rm -f ./$(DEPDIR)/histogram.Plo
+	-rm -f ./$(DEPDIR)/hoppet_init.Plo
+	-rm -f ./$(DEPDIR)/integral.Plo
+	-rm -f ./$(DEPDIR)/jetrad_pdf.Plo
+	-rm -f ./$(DEPDIR)/lumi_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmQQ_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmw_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmwc_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmwjet_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmz_pdf.Plo
+	-rm -f ./$(DEPDIR)/mcfmzjet_pdf.Plo
+	-rm -f ./$(DEPDIR)/nlojet_pdf.Plo
+	-rm -f ./$(DEPDIR)/nlojetpp_pdf.Plo
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS uninstall-libLTLIBRARIES \
+	uninstall-library_includeHEADERS
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \
+	clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
+	clean-libtool clean-local cscopelist-am ctags ctags-am \
+	distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-binPROGRAMS \
+	install-data install-data-am install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-libLTLIBRARIES \
+	install-library_includeHEADERS install-man install-pdf \
+	install-pdf-am install-ps install-ps-am install-strip \
+	installcheck installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags tags-am uninstall uninstall-am uninstall-binPROGRAMS \
+	uninstall-libLTLIBRARIES uninstall-library_includeHEADERS
+
+.PRECIOUS: Makefile
+
+
+# LHAPDF flags 
+
+@USE_LHAPDF_TRUE@  # in preparation for including the lhapdf version 6 interface
+
+clean-local:
+	rm -rf *.o *.lo *Dict*
+.cxx.o : 
+	$(CXX) $(AM_CXXFLAGS) -c $<
+.c.o : 
+	$(CC) $(AM_CFLAGS) -c $<
+
+@USE_ROOT_TRUE@%Dict.cxx : %.h %.cxx
+@USE_ROOT_TRUE@	$(CINT) -f $@ -c $< -I.. 
+
+#../appl_grid/$*LinkDef.h
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
Index: applgrid/tags/applgrid-01-06-36/src/hoppet_init.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/hoppet_init.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/hoppet_init.cxx	(revision 2010)
@@ -0,0 +1,147 @@
+/**
+ **     @file    hoppet_init.cxx
+ **
+ **     @author  mark sutton
+ **     @date    Sat 26 Sep 2009 13:24:31 BST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: hoppet_init.cxx, v0.0   Sat 26 Sep 2009 13:24:31 BST sutt $
+ **
+ **/
+
+#include "hoppet_init.h"
+
+#include <iostream>
+#include <vector>
+#include <cmath>
+#include <cstdlib>
+
+#include "amconfig.h"
+#ifdef HAVE_HOPPET
+#include "hoppet_v1.h"
+#endif
+
+
+appl::hoppet_init::hoppet_init( double _Qmax, double _ymax ) {
+
+  double dy = 0.1;   
+  int nloop = 2;         
+
+  double Qmax = _Qmax;
+
+  double ymax = _ymax; /// always make this an integer multiple of dy
+
+  std::cout << "appl::hoppet_init::hoppet_init()  dy = " << dy << "\tnloop = " << nloop << "\tQmax = " << Qmax << "\tymax = " << ymax << std::endl; 
+  // the internal grid spacing (smaller->higher accuarcy)
+  // 0.1 should provide at least 10^{-3} accuracy
+  // the number of loops to initialise (max=3!)  
+  // ensure that ymax is always an integer multiple of dy
+
+#ifdef HAVE_HOPPET
+  //  hoppetstart_( dy, nloop );
+  /// NB: these parameters (except Qmax) should all be the default
+  ///     parameters from hoppet 
+  int MSbar = 1;
+  hoppetstartextended_( ymax, dy, 1.0, Qmax, 0.25*dy, nloop, -6,  MSbar );
+#else
+  std::cerr << "appl::hoppet_init::hoppet_init() called but hoppet not included" << std::endl;
+  exit(0);
+#endif
+}
+
+
+appl::hoppet_init::~hoppet_init() {
+  //  std::cout << "appl::hoppet_init::~hoppet_init()" << std::endl;
+}
+
+
+void appl::hoppet_init::fillCache( void (*pdf)(const double&, const double&, double* )  ) {
+
+  //  fill the cache
+
+  //  hoppetassign_( pdf );
+
+  //  std::cout << "appl::hoppet_init::fillCache()" << std::endl; 
+
+  clear();
+
+  for ( double lQ=1 ; lQ<=4 ; lQ+=2 ) { 
+    double Q = std::pow(10,lQ);
+    for ( double lx=-5 ; lx<0 ; lx+=1 ) { 
+      double x = std::pow(10,lx);
+      double xf[13];
+      pdf( x, Q, xf ); 
+      for ( int i=0 ; i<13 ; i++ ) push_back( xf[i] ); 
+    }
+  }  
+
+}
+
+
+
+bool appl::hoppet_init::compareCache( void (*pdf)(const double&, const double&, double* )  ) {
+ 
+  // fill a seperate cache for comparison
+
+  // flag whether the cache has changed
+  bool changed = false;
+
+  //  std::cout << "appl::hoppet_init::compareCache()" << std::endl; 
+
+
+  // is the cahche empty?
+  if ( size()==0 ) {
+#     ifdef HAVE_HOPPET 
+      hoppetassign_( pdf );
+#     endif
+      fillCache( pdf );
+      return changed = true;  
+  }
+
+  // fill the new cache for comparison
+  //  std::cout << "appl::hoppet_init::compareCache() ccache.size() = " << ccache.size() << "\tsize() = " << size() << std::endl;  
+
+  std::vector<double> ccache;
+    
+  for ( double lQ=1 ; lQ<=4 ; lQ+=2 ) { 
+    double Q = std::pow(10,lQ);
+    for ( double lx=-5 ; lx<0 ; lx+=1 ) { 
+      double x = std::pow(10,lx);
+      double xf[13];
+      pdf( x, Q, xf ); 
+      for ( int i=0 ; i<13 ; i++ ) ccache.push_back( xf[i] ); 
+    }
+  }
+  
+
+  // now compare with existing cache
+  
+  //  std::cout << "appl::hoppet_init::compareCache() ccache.size() = " << ccache.size() << "\tsize() = " << size() << std::endl;  
+  
+  if ( ccache.size()!= size() ) changed = true;  
+
+  for ( unsigned i=0 ; i<ccache.size() ; i++ ) if ( ccache[i]!=at(i) ) changed = true;
+  
+  // cache (and hence the pdf) has changed, so replace the old cached 
+  // values with the new and reinitialise hoppet with this new pdf 
+  if ( changed ) { 
+    
+    assign( pdf );
+    (*(std::vector<double>*)this) = ccache;
+  }
+  
+  //  std::cout << "appl::hoppet_init::compareCache() changed = " << changed << std::endl; 
+
+  return changed;
+  
+}
+
+
+
+void appl::hoppet_init::assign( void (*pdf)(const double&, const double&, double* )  ) { 
+  //  std::cout << "appl::hoppet_init::assign()" << std::endl; 
+#   ifdef HAVE_HOPPET 
+    hoppetassign_( pdf );
+#   endif
+}
Index: applgrid/tags/applgrid-01-06-36/src/sparse.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/sparse.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/sparse.h	(revision 2010)
@@ -0,0 +1,38 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    sparse.h
+ **
+ **     @brief   define 1, 2 and 3d block sparse matrices for doubles                  
+ **
+ **     @author  mark sutton
+ **     @date    Thu Nov 22 15:46:00 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: sparse.h, v1.0   Thu Nov 22 15:46:00 GMT 2007 sutt $
+ **
+ **/
+
+
+#ifndef __SPARSE_H
+#define __SPARSE_H
+
+
+#include "tsparse3d.h"
+
+typedef tsparse1d<double> sparse1d;
+typedef tsparse2d<double> sparse2d;
+typedef tsparse3d<double> sparse3d;
+
+
+#endif  // __SPARSE_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/nlojet_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/nlojet_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/nlojet_pdf.cxx	(revision 2010)
@@ -0,0 +1,29 @@
+/**
+ **     @file    nlojet_pdf.cxx
+ **
+ **     @brief   pdf transform function for nlojet                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: nlojet_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "nlojet_pdf.h"
+
+
+extern "C" void fnlojet_pdf__(const double* fA, const double* fB, double* H) { 
+  static nlojet_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/appl_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/appl_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/appl_pdf.cxx	(revision 2010)
@@ -0,0 +1,351 @@
+/**
+ **     @file    appl_pdf.cxx
+ **
+ **     @brief   pdf transform functions                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: appl_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+#include <fstream>
+#include <cstdlib>
+
+#include <cstdlib>
+
+#include "appl_grid/appl_pdf.h" 
+#include "appl_grid/basic_pdf.h"
+
+#include "mcfmz_pdf.h"
+#include "mcfmzjet_pdf.h"
+#include "mcfmw_pdf.h"
+#include "mcfmwjet_pdf.h"
+#include "mcfmwc_pdf.h"
+#include "mcfmQQ_pdf.h"
+#include "nlojet_pdf.h"
+#include "jetrad_pdf.h"
+#include "nlojetpp_pdf.h"
+#include "dis_pdf.h"
+#include "vrapz_pdf.h"
+
+// #include "generic_pdf.h"
+
+
+
+
+namespace appl { 
+
+// initialise the std::map with some default instances
+// although the user could create these themselves
+// if they wanted
+pdfmap appl_pdf::__pdfmap; 
+
+std::vector<std::string> appl_pdf::__pdfpath; 
+
+
+bool appl::appl_pdf::ALLOW_OVERWRITES = true;
+
+/// constructor and destructor
+appl_pdf::appl_pdf(const std::string& name, bool base ) : 
+  m_Nproc(0), m_name(name), m_ckmcharge(0) { 
+  if ( !base && m_name!="" ) addtopdfmap(m_name, this);
+}
+  
+appl_pdf::~appl_pdf() { 
+  // when I'm destroyed, remove my entry from the std::map 
+  pdfmap::iterator mit = __pdfmap.find(m_name);
+  if ( mit!=__pdfmap.end() ) __pdfmap.erase(mit);
+} 
+
+
+/// retrieve an instance from the std::map 
+appl_pdf* appl_pdf::getpdf(const std::string& s, bool ) {
+  /// initialise the factory
+  if ( __pdfmap.size()==0 ) appl::appl_pdf::create_map(); 
+  pdfmap::iterator itr = __pdfmap.find(s);
+  if ( itr!=__pdfmap.end() ) return itr->second; 
+ 
+  /// not found in std::map - used to throw (and catch) an exception 
+  return 0;
+  //  throw exception( std::cerr << "getpdf() " << s << " not instantiated in std::map " );
+}
+
+
+
+
+
+std::ifstream& appl_pdf::openpdf( const std::string& filename ) { 
+
+  /// if not set up yet, set up the search path for 
+  /// the pdf config files
+  if ( __pdfpath.size()==0 ) { 
+    __pdfpath.push_back("");
+    __pdfpath.push_back(std::string(DATADIR)+"/");
+    char* pdfpath = std::getenv( "PDFPATH" );
+    if ( pdfpath!=0 ) { 
+      __pdfpath.push_back(std::string(pdfpath)+"/");
+    } 
+  }
+
+  static std::ifstream infile;
+
+  for ( unsigned i=0 ; i<__pdfpath.size() ; i++ ) { 
+    /// try file
+    infile.open( (__pdfpath[i]+filename).c_str() );
+    /// if found return
+    if ( !infile.fail() ) { 
+      std::cout << "appl_pdf::openpdf()  opening " << __pdfpath[i]+filename << std::endl;
+      return infile;
+    }
+  }
+
+  /// haven't found the config file, so throw an exception ...
+  throw exception( std::string("appl_pdf::appl_pdf() cannot open file ") + filename );
+	
+  /// this should never be executed - just here so the function doesn't fall off the end
+  return infile;
+} 
+
+
+int appl_pdf::decideSubProcess( const int , const int  ) const { return -1; }
+
+
+
+bool appl_pdf::create_map() { 
+
+#ifdef DBG
+  std::cout << "appl_pdf::create_map() creating pdf combination factory" << std::endl;
+#endif
+
+  if ( __pdfmap.size()==0 ) { 
+    
+    /// the appl_pdf add their own pointers to the 
+    /// pdf std::map so we don't need to remember their 
+    /// pointers ourselves
+    new  mcfmz_pdf;
+    new  mcfmzjet_pdf;
+    new  mcfmwp_pdf;
+    new  mcfmwm_pdf;
+    new  mcfmwpjet_pdf;
+    new  mcfmwmjet_pdf;
+    
+    new  mcfmwpc_pdf;
+    new  mcfmwmc_pdf;
+    
+    new  mcfmCC_pdf;
+    new  mcfmBB_pdf;
+    new  mcfmTT_pdf;
+
+    new nlojet_pdf;
+    new nlojetpp_pdf;
+    new jetrad_pdf;
+    new dis_pdf;
+
+    new vrapzLO_pdf;
+    new vrapzNLO_pdf;
+    new vrapzNNLO_pdf;
+
+    new basic_pdf;
+
+    //    printmap( std::cerr );
+
+  }
+
+  return true;
+}
+
+
+
+
+/// get ckm related information 
+
+
+void appl_pdf::make_ckm( bool Wp ) { 
+  
+  // std::cout << "make_ckm() initialising" << std::endl;
+  std::vector<std::vector<double> > _ckm2(14, std::vector<double>(14,0));
+  
+  if ( Wp ) { 
+    
+    m_ckmcharge = +1;
+
+    //  std::cout << "creating ckm matrix terms for Wplus production" << std::endl;
+    
+    _ckm2[3][8]  =   0.049284000000000001417976847051249933 ;
+    _ckm2[8][3]  =   0.049284000000000001417976847051249933 ;
+    
+    _ckm2[5][8]  =   0.950624999999999942268402719491859898 ;
+    _ckm2[8][5]  =   0.950624999999999942268402719491859898 ;
+    
+    _ckm2[5][10] =   0.049284000000000001417976847051249933 ;
+    _ckm2[10][5] =   0.049284000000000001417976847051249933 ;
+    
+    _ckm2[3][10] =   0.950624999999999942268402719491859898 ;
+    _ckm2[10][3] =   0.950624999999999942268402719491859898 ;
+    
+  }
+  else { 
+
+    m_ckmcharge = -1;
+    
+    //    std::cout << "creating ckm matrix terms for Wminus production" << std::endl;
+    
+    _ckm2[4][9] =   0.049284000000000001417976847051249933 ;
+    _ckm2[9][4] =   0.049284000000000001417976847051249933 ;
+        
+    _ckm2[7][4] =   0.950624999999999942268402719491859898 ;
+    _ckm2[4][7] =   0.950624999999999942268402719491859898 ;
+    
+    _ckm2[7][2] =   0.049284000000000001417976847051249933 ;
+    _ckm2[2][7] =   0.049284000000000001417976847051249933 ;
+    
+    _ckm2[9][2] =   0.950624999999999942268402719491859898 ;
+    _ckm2[2][9] =   0.950624999999999942268402719491859898 ;
+    
+  }  
+ 
+  setckm2( _ckm2 );
+
+}
+
+
+
+void appl_pdf::setckm( const std::vector<std::vector<double> >& ckm ) { 
+
+  if ( m_ckm==ckm) return; 
+
+  if ( m_ckm.size()>0 ) { 
+    std::cerr << name() << "\tWARNING: writing CKM matrix" << std::endl;
+    if ( !ALLOW_OVERWRITES ) return;
+  }
+
+  m_ckm = ckm; 
+  
+  /// calculate ckm2 and pass into setckm2  
+
+  if ( m_ckmcharge==0 ) return;
+  
+  std::vector<std::vector<double> > ckm2( 14, std::vector<double>(14,0) );
+  for ( int iU=0 ; iU<3 ; iU++ ) {
+    for ( int iD=0 ; iD<3 ; iD++ ) {
+
+      /// lhapdf codes ...
+      int utype = 2*iU+2;
+      int dtype = 2*iD+1;
+
+      /// W- 
+      if ( m_ckmcharge<0 ) utype *= -1;
+
+      /// W+ 
+      if ( m_ckmcharge>0 ) dtype *= -1;
+
+      /// translate to array indices
+      utype += 6;
+      dtype += 6;
+
+      double V2 = ckm[iU][iD]*ckm[iU][iD];
+
+      ckm2[utype][dtype] = V2;
+      ckm2[dtype][utype] = V2;
+      
+    }
+  }
+  
+  //  std::cout << std::endl;
+
+  setckm2( ckm2 );
+} 
+
+
+
+
+void appl_pdf::setckm2( const std::vector<std::vector<double> >& ckm2 ) { 
+
+  if ( m_ckm2==ckm2 ) return;
+
+  if ( m_ckm2.size()>0 ) { 
+    std::cerr << name() << "\tWARNING: writing to CKM matrix squares: " << std::endl;
+    if ( !ALLOW_OVERWRITES ) return;
+  }
+
+  m_ckm2 = ckm2; 
+
+  m_ckmsum = std::vector<double>(m_ckm2.size(),0);
+  for ( unsigned i=0 ; i<m_ckm2.size() ; i++ ) { 
+    for ( unsigned j=0 ; j<m_ckm2[i].size() ; j++ ) m_ckmsum[i] += m_ckm2[i][j]; 
+  }  
+
+  /// reconstruct the ckm matrix from the squares
+  
+  m_ckm = std::vector<std::vector<double> > (3,std::vector<double>(3,0));
+  
+  for ( int i=1 ; i<=6 ; i++ ) { 
+    for ( int j=1 ; j<=6 ; j++ ) {
+      if ( m_ckm2[i+6][6-j]!=0 ) { 
+	int jp = j/2; 
+	int ip = (i-1)/2;
+	if ( i%2!=0 ) { 
+	  jp = i/2;
+	  ip = (j-1)/2;
+	}
+	m_ckm[ip][jp] = std::sqrt( std::fabs(m_ckm2[i+6][6-j]) );
+      }
+    }
+  }
+
+} 
+
+
+
+};
+
+
+
+std::ostream& operator<<( std::ostream& s, const appl::appl_pdf& p ) { 
+  s << "[ appl_pdf: name=" << p.name() << "\tsize=" << p.size() << "\tckmcharge=" << p.getckmcharge() << " ]";
+
+  if ( p.getckmcharge()!=0 ) { 
+    s << "\nckmsum: " << p.ckmsum() << "\n"; 
+    s << "\nckm2:\n";
+
+    std::string labels[14] = { "t-", "b-", "c-", "s-", "u-", "d-", "g ", "d ", "u ", "s ", "c ", "b ", "t ", "ga" };
+
+    if ( p.ckm2().size()>0 ) {
+      std::cout << "    ";
+      for ( size_t i=0 ; i<p.ckm2().size() ; i++ )  std::printf( "     %s     ", labels[i].c_str() );
+      std::cout << std::endl;
+      for ( size_t i=0 ; i<p.ckm2().size() ; i++ ) {
+	std::cout << labels[i] << "  ";
+	for ( size_t j=0 ; j<p.ckm2().size() ; j++ ) {
+	  // s << "\t" << 0.0001*(int(p.ckm2()[i][j]*10000));
+	  if ( i==j ) std::printf( "            " );
+	  else if ( p.ckm2()[i][j]==0 ) std::printf( "     --     " );
+          else  std::printf( "     %7.5lf", p.ckm2()[i][j] );
+	}
+	std::cout << "\n";
+      }
+    }
+
+    if ( p.ckm().size() ) { 
+      s << "\nckm: " << "\n";
+      for ( size_t i=0 ; i<3 ; i++ ) {
+	for ( size_t j=0 ; j<3 ; j++ ) {
+	  //	  s << "\t" << 0.0001*(int(p.ckm()[i][j]*10000); 
+	  std::printf( "     %6.4lf", p.ckm()[i][j] );
+	}
+	std::cout << "\n";
+      }
+    }
+  }
+
+  return s;
+}
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/TFileVector.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/TFileVector.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/TFileVector.cxx	(revision 2010)
@@ -0,0 +1,35 @@
+/**
+ **     @file    TFileVector.cxx
+ **
+ **     @brief   root TObject std::string std::vector class for writing std::string std::vectors
+ **              to root files               
+ **
+ **     @author  mark sutton
+ **     @date    Sat Mar 15 19:50:15 GMT 2008 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: TFileString.cxx, v0.0   Sat Mar 15 19:50:15 GMT 2008 sutt $
+ **
+ **/
+
+#include "appl_grid/appl_root.h"
+
+#ifdef USEROOT
+
+#include "TFileVector.h"
+
+ClassImp(TFileVector)
+
+
+std::ostream& operator<<(std::ostream& s, const TFileVector& fs) { 
+  for ( unsigned i=0 ; i<fs.size() ; i++ ) { 
+    for ( unsigned j=0 ; j<fs[i].size() ; j++ ) s << "\t" << fs[i][j]; 
+    s << std::endl;
+  }
+  return s;
+}
+
+#endif
+
+
Index: applgrid/tags/applgrid-01-06-36/src/threadManager.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/threadManager.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/threadManager.h	(revision 2010)
@@ -0,0 +1,226 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    threadManager.h
+ **
+ **     @author  mark sutton
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **/
+
+
+#ifndef __THREADMANAGER_H
+#define __THREADMANAGER_H
+
+#include <pthread.h>
+#include <stdio.h>
+#include <iostream>
+
+#include <string>
+using std::string;
+
+    
+
+
+class threadManager { 
+
+public:
+
+  threadManager(const string& s) : 
+    mname(s), mrunning(false), mprocessing(false), mterminate(false) { init_mutex(); } 
+  
+  threadManager(const threadManager& t) : 
+    mname(t.mname), mrunning(false), mprocessing(false), mterminate(false) { init_mutex(); }   
+
+  void init_mutex() {  
+    pthread_mutex_t tmpproc_mux = PTHREAD_MUTEX_INITIALIZER;
+    proc_mux = tmpproc_mux;
+
+    pthread_cond_t tmpproc_cv = PTHREAD_COND_INITIALIZER;
+    proc_cv  = tmpproc_cv;
+
+
+    pthread_mutex_t tmprun_mux = PTHREAD_MUTEX_INITIALIZER;
+    run_mux = tmprun_mux;
+    
+    pthread_cond_t tmprun_cv = PTHREAD_COND_INITIALIZER;
+    run_cv  = tmprun_cv;
+    
+      
+    pthread_mutex_t tmpmu = PTHREAD_MUTEX_INITIALIZER;  
+    mlock  = tmpmu;
+  }
+
+  virtual void start_thread() {
+    mrunning = true;
+    //    pthread_cond_t tmpcd = PTHREAD_COND_INITIALIZER;
+    //   std::cout << "starting thread" ;
+    mstatus = pthread_create( &mthread, 0, manage, this ); 
+    //  std::cout << "thread started " << this << "  " << mname << std::endl;
+  }
+  
+  virtual ~threadManager() { 
+    if ( mrunning ) { 
+      //      std::cout << "cancelling thread " << mname << " " << this << " ..." << std::endl;
+      terminate();
+      
+      if ( this->ready() ) {  
+	/// do we need to cancel the thread if it has been told to 
+	/// finish and it has completed the processing?  
+	pthread_cancel(mthread); 
+	//	std::cout << "cancelled " << std::endl;
+      }
+    }
+  }
+  
+
+  void    name(const string& name) { mname = name; }  
+  string  name() const             { return mname; }  
+
+  
+  void lock()   { 
+    //   return;
+    pthread_mutex_lock(&mlock);
+    //   printf("locked\n");
+  }
+
+  void unlock() {
+    //  return;
+    pthread_mutex_unlock(&mlock);
+    //  printf("unlocked\n");
+  }
+
+
+
+  void suspend(bool _wait=true) { 
+    lock_proc();
+
+    lock_run(); 
+    if ( mprocessing ) { 
+      //  printf("sending signal\n");
+      pthread_cond_signal(&run_cv);
+      mprocessing = false;
+    }
+    unlock_run(); 
+
+    if ( _wait ) wait_proc();
+
+    unlock_proc();
+  } 
+  
+
+  void process() {
+    lock_proc(); 
+    if ( mprocessing ) { 
+      /// should never happen - any attempt to start the thread 
+      /// processing should immediately be followed by waiting for 
+      /// the thread to return itself to the suspended state   
+      std::cerr << "error: thread already processing" << std::endl;
+      unlock_proc(); 
+      return;
+    }
+    mprocessing=true; 
+    pthread_cond_signal(&proc_cv);
+    unlock_proc();
+  } 
+
+  void terminate() {
+    lock_proc(); 
+    if ( mprocessing ) { 
+      /// should never happen - any attempt to start the thread 
+      /// processing should immediately be followed by waiting for 
+      /// the thread to return itself to the suspended state   
+      std::cerr << "error: thread already processing" << std::endl;
+      unlock_proc(); 
+      return;
+    }
+    mprocessing = true;
+    mterminate  = true; 
+    pthread_cond_signal(&proc_cv);
+    unlock_proc();
+  } 
+
+
+ bool ready() {
+
+    lock_proc();
+    /// printf("waiting on thread 0x%lx (mprocessing %d)\n", (unsigned long)this, mprocessing );
+    if ( !mprocessing ) { 
+      unlock_proc();
+      return true;
+    }    
+    unlock_proc();
+
+    lock_run(); 
+    /// printf("waiting ...\n");
+    wait_run();
+    ///    pthread_cond_signal(&run_cv);
+    ///    printf("finished\n");
+    unlock_run();
+    return true;
+  } 
+
+
+  bool running() const { return mrunning; } 
+
+  bool processing() const { return mprocessing; } 
+    
+  //  virtual void run() { std::cout << "don't call this" << std::endl; } ;
+  virtual void run_thread() = 0;
+
+protected:
+  
+  // this is clever, the thread function takes a parameter, 
+  // but has to be a static function, so passing in the this
+  // pointer, we can call non static member methods
+  static void* manage(void* _this) {
+    threadManager* me = (threadManager*)_this;
+    me->run_thread();
+    ///    std::cout << "thread completed " << me << std::endl; 
+    return 0;
+  }
+
+protected:
+
+  int             mstatus;
+  string          mname;
+
+  pthread_t       mthread;
+
+  pthread_mutex_t mlock;
+
+protected:
+
+  bool            mrunning;
+
+protected:
+
+  bool            mprocessing;
+  bool            mterminate;
+
+  pthread_mutex_t proc_mux;
+  pthread_cond_t  proc_cv;
+  
+  void lock_proc()   {  pthread_mutex_lock(&proc_mux); }
+  void unlock_proc() {  pthread_mutex_unlock(&proc_mux); }
+  
+  void wait_proc()   { pthread_cond_wait(&proc_cv, &proc_mux); }
+  
+
+  pthread_mutex_t run_mux;
+  pthread_cond_t  run_cv;
+
+  void lock_run()   {  pthread_mutex_lock(&run_mux); }
+  void unlock_run() {  pthread_mutex_unlock(&run_mux); }
+  
+  void wait_run()   { pthread_cond_wait(&run_cv, &run_mux); }
+
+
+  pthread_mutex_t cache_mux;
+  void lock_cache()   {  pthread_mutex_lock(&cache_mux); }
+  void unlock_cache() {  pthread_mutex_unlock(&cache_mux); }
+  
+
+};
+
+#endif //__THREADMANAGER_H
Index: applgrid/tags/applgrid-01-06-36/src/Cache.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/Cache.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/Cache.h	(revision 2010)
@@ -0,0 +1,209 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    Cache.h
+ **
+ **     @brief   evaluate a pdf function and store values in a cache.
+ **              if this x, Q2 node has been requested before then 
+ **              retrieve the values and return them if not, generate 
+ **              and then add to the cache and then retiurn them
+ **
+ **              the new LHAPDF (version 6) may implement something 
+ **              like this internally, but a retieval from the cache 
+ **              is still around 20-40 times faster than LHAPDF 6                   
+ **
+ **     @author  mark sutton
+ **     @date    Sat 19 Oct 2013 14:12:42 CEST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: Cache.h, v0.0   Sat 19 Oct 2013 14:12:42 CEST sutt $
+ **
+ **/
+
+
+#pragma  once
+
+#include <iostream>
+#include <map>
+#include <utility>
+
+/// Pass an LHAPDF (version 5) function pointer into the cache, 
+/// then call using the evaluate() method instead of the calling 
+/// the pdf directly. 
+
+
+#include "pthread.h"
+
+
+template<typename T>
+class Cache : public std::map<T, std::vector<double> > {
+ 
+private: 
+
+  /// actual type def of the map for convenience
+  typedef std::map<T,std::vector<double> > _map;
+
+  /// function pointer type
+  typedef void (*pdffunction)(const double& , const double&, double* );
+
+  /// for fast copy 
+  struct partons { double p[14]; };
+
+public:
+
+  /// give it the pdf function to use
+  Cache( pdffunction pdf=0, unsigned mx=20000 ) : _pdf(pdf), _max(mx), _ncalls(0), _ncached(0), _reset(0), _disabled(false), _printstats(false) {
+#   ifdef PDFTHREAD
+    pthread_mutex_t tmpcache_mux = PTHREAD_MUTEX_INITIALIZER;
+    cache_mux = tmpcache_mux;
+#   endif
+  } 
+  
+  virtual ~Cache() { } 
+
+  
+  /// evaluate the pdf function
+  /// if this node has been requested before, retrieve values 
+  /// from the cache, if not, generate and then add to cache
+  /// for next time 
+  void evaluate( const double& x, const double& Q2, double*  xf ) { 
+    
+    if ( _pdf==0 ) { 
+      /// should really throw an exception here
+      std::cerr << "whoops, pdf cache has no pdf!!" << std::endl; 
+      return; 
+    }
+
+    _ncalls++;
+
+    /// if we don't want to use the cache for some reason (it can get quite large)     
+    if ( _disabled ) { 
+      //  lock_cache();
+      _pdf( x, Q2, xf ); 
+      //   unlock_cache();
+      return;
+    }
+
+    //    _reset++;
+
+    T t(x, Q2);
+
+    /// find out if this x, Q2 node is in the cache ...
+       
+    typename _map::const_iterator itr = this->find( t );
+
+    
+    if ( itr!=this->end() ) { 
+      /// in the cache, simply copy to output ...
+      //  lock_cache();
+      (*(partons*)xf) = (*(partons*)(&itr->second[0])); 
+      _ncached++;
+      //  unlock_cache();
+    } 
+    else { 
+      /// not in cache, call pdf function 
+      std::vector<double> _xf(14,0);
+
+      _pdf( x, Q2, &_xf[0] ); 
+
+      /// add to cache if enough room
+      //    lock_cache();
+      if ( this->size()<_max ) this->insert( typename _map::value_type( t, _xf ) );
+
+      /// copy to output 
+      (*(partons*)xf) = ( *((partons*)&_xf[0]) ); 
+      //  unlock_cache();
+    }
+
+
+    //    static int i=0; 
+
+    //    i++;
+
+    //    if ( i>10000 ) { 
+    //      std::cout << "(Cache stats : called " << ncalls() << "\tcached " << ncached() << std::endl;
+    //      i=0;
+    //    }
+  }
+  
+
+  void evaluate( const double& x, const double& Q2, std::vector<double>&  xf ) { evaluate( x, Q2, &xf[0] ); } 
+
+
+  /// print some useful stats
+  void stats() const { if (_printstats) std::cout << *this << std::endl; }
+
+  /// return the actual stored pdf - very handy 
+  pdffunction pdf() { return _pdf; }
+
+  unsigned max()        const { return _max; } 
+  unsigned ncalls()     const { return _ncalls; } 
+  unsigned ncached()    const { return _ncached; } 
+  unsigned ngenerated() const { return _ncalls-_ncached; } 
+
+  double   fraction()  const { 
+    if ( _ncalls>0 ) return this->size()*1.0/_ncalls; 
+    return 0;
+  }
+
+  void reset() { this->clear(); _reset=_ncalls=_ncached=0; }
+
+  void disable() { _disabled = true; }
+  void enable()  { _disabled = false; }
+
+
+private:
+
+  //  void (*_pdf)(const double& , const double&, double* );
+  pdffunction _pdf;
+
+  /// maximum cache size
+  unsigned _max;
+
+  /// some stats - how many nodes generated and how many from the cache
+  unsigned _ncalls;
+  unsigned _ncached;
+  unsigned _ngenerated;
+
+  unsigned _reset;
+
+  bool     _disabled;
+
+  bool     _printstats;
+
+# ifdef PDFTHREAD
+  pthread_mutex_t cache_mux;
+  void lock_cache()   {  pthread_mutex_lock(&cache_mux); }
+  void unlock_cache() {  pthread_mutex_unlock(&cache_mux); }
+#else
+  void lock_cache()   {  }
+  void unlock_cache() {  }
+# endif
+
+  
+};
+
+
+
+
+template<typename T>
+inline std::ostream& operator<<( std::ostream& s, const Cache<T>& _c ) { 
+  s << "Cache:: " 
+    << "\tgenerated "  << _c.ngenerated() 
+    << "\tfrom cache " << _c.ncached() 
+    << "\tsize "       << _c.size() <<  " ( " << int(_c.fraction()*1000)*0.1 << "% )\t"
+    << " :maximum "    << _c.max();
+    return s;
+}
+
+/// useful typdef
+typedef Cache<std::pair<double,double> > NodeCache;
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/axis.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/axis.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/axis.h	(revision 2010)
@@ -0,0 +1,138 @@
+/** emacs: this is -*- c++ -*- **/
+
+// emacs: this is -*- c++ -*-
+//
+//   axis.h        
+//
+//                   
+// 
+//   Copyright (C) 2007 M.Sutton (sutt@hep.ucl.ac.uk)    
+//
+//   $Id: axis.h, v   Wed Nov 14 02:13:30 GMT 2007 sutt
+
+
+#ifndef __AXIS_H
+#define __AXIS_H
+
+#include <iostream>
+#include <iomanip>
+#include <vector>
+#include <cmath>
+
+
+
+template<class T> 
+class axis {
+
+public:
+
+  axis() : m_N(0), m_min(0), m_max(0) { }
+
+  axis(int N, T mn, T mx) : m_N(N), m_min(mn), m_max(mx) { 
+    if ( m_N>1 )   m_delta  = (m_max-m_min)/(m_N-1);
+    else           m_delta  = 0;
+    if ( m_delta ) m_idelta = 1/m_delta;
+    else           m_idelta = 0;
+    for ( int i=0 ; i<m_N ; i++ ) {
+      double x2 = 0;
+      if ( m_N>1 ) x2 = ((m_N-1-i)*m_min + i*m_max)/(m_N-1);
+      else         x2 = m_min;
+      m_v.push_back(x2);    
+    }
+  }
+
+  ~axis() {  }
+  
+  int N()      const { return m_N; } 
+  int size()   const { return m_N; } 
+  T   min()    const { return m_min; }
+  T   max()    const { return m_max; }
+  T   delta()  const { return m_delta; }
+  T   idelta() const { return m_idelta; }
+
+  axis(const axis& ax) 
+    : m_N(ax.m_N), m_min(ax.m_min), m_max(ax.m_max), 
+      m_delta(ax.m_delta), m_idelta(ax.m_idelta) { 
+    for ( int i=0 ; i<m_N ; i++ ) m_v.push_back(ax.m_v[i]);    
+  } 
+
+
+  axis& operator=(const axis& ax) { 
+    m_N   = ax.m_N;
+    m_min = ax.m_min;
+    m_max = ax.m_max;
+    
+    m_delta  = ax.m_delta;
+    m_idelta = ax.m_idelta;
+    
+    m_v = ax.m_v;
+
+    return *this;
+  }
+  
+  const std::vector<T>& v() const { return m_v; }  
+  
+  T&       operator()(int i)       { return m_v[i]; }
+  const T& operator()(int i) const { return m_v[i]; }
+  
+  T&       operator[](int i)       { return m_v[i]; }  
+  const T& operator[](int i) const { return m_v[i]; }  
+  
+  //  int bin(T x) const { return (x-m_min)*m_idelta; }
+  int bin(T x) const { return int((x-m_min)*m_idelta); }
+  
+  void print(T (*f)(T)) const {
+    for ( int i=0 ; i<m_N ; i++ ) { 
+      std::cout << "\t" << std::setprecision(3) << std::setw(5) << f(m_v[i]); 
+    }
+    std::cout << std::endl;
+  }
+
+  /// NB: to avoid rounding errors, allow differences in the bin 
+  ///     limits of 1e-10 the distance between the nodes   
+  bool operator==(const axis& ax) const { 
+  
+    //    std::cerr << "axis::operator== m_N " << m_N << "\tax.m_N " << ax.m_N 
+    //		<< "\tdmin " << (m_min-ax.m_min) << " " << m_min << " " << ax.m_min 
+    //		<< "\tdmax " << (m_max-ax.m_max) << " " << m_max << " " << ax.m_max << std::endl;
+    
+    return  ( m_N==ax.m_N 
+	      && (std::fabs(m_min-ax.m_min)<=m_delta*1e-10) 
+	      && (std::fabs(m_max-ax.m_max)<=m_delta*1e-10) );   
+  }
+  
+private:
+  
+  int m_N;
+  
+  T   m_min;
+  T   m_max;
+  T   m_delta;
+  T   m_idelta;
+  
+  std::vector<T>  m_v;
+};
+
+
+template<class T>
+std::ostream& operator<<(std::ostream& s, const axis<T>& a) { 
+  s << "\t[ N=" << a.N(); 
+  // for ( int i=0 ; i<a.N() ; i++ )  s << "\t" << std::setprecision(3) << std::setw(5) << a[i]; 
+  s << ",\t"  << std::setprecision(3) << std::setw(5) << a[0];
+  s << " .. " << std::setprecision(3) << std::setw(5) << a[a.N()-1];
+  s << " ]";
+  return s;
+} 
+
+
+#endif  // __AXIS_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/mcfmwc_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmwc_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmwc_pdf.h	(revision 2010)
@@ -0,0 +1,175 @@
+/** emacs: this is -*- c++ -*- **/
+
+#ifndef __MCFMWC_PDF_H
+#define __MCFMWC_PDF_H
+
+
+#include "appl_grid/appl_pdf.h" 
+
+//
+// MCFM W+Charm production
+//
+
+
+//
+//  W+
+//
+class mcfmwpc_pdf : public appl::appl_pdf { 
+  
+public: 
+  
+ mcfmwpc_pdf(const std::string& s="mcfm-wpc") : appl_pdf(s) 
+  { m_Nproc = 10; make_ckm( true ); } 
+  
+  ~mcfmwpc_pdf() { } 
+  
+  virtual void evaluate(const double* fA, const double* fB, double* H) const;
+
+
+};
+
+
+
+//
+//  W-
+//
+class mcfmwmc_pdf : public appl::appl_pdf { 
+
+public: 
+
+  mcfmwmc_pdf(const std::string& s="mcfm-wmc") : appl_pdf(s) 
+     { m_Nproc = 10; make_ckm( false ); } 
+
+  ~mcfmwmc_pdf() {   } 
+
+  virtual void evaluate(const double* fA, const double* fB, double* H) const;
+
+};
+
+// actual funtion to evaluate the pdf combinations 
+// for the W+ Cbar
+
+inline  void mcfmwpc_pdf::evaluate(const double* fA, const double* fB, double* H) const { 
+  
+  const int nQuark = 6;
+//  const int iQuark = 5; 
+  
+  // offset so we can use fA[-6]
+  // fA += 6;
+  // fB += 6;
+  
+  double GA=fA[6];
+  double GB=fB[6];
+  double QA=0., QB=0.; 
+  double DbA_c = 0., DbB_c = 0., DbADbB = 0. , UbA =0. , UbB = 0., dbAsbB = 0., sbAdbB = 0.;
+  
+  for(int i = -3; i <= -1; i += 2) 
+    {
+      DbA_c += fA[nQuark + i]*m_ckm2[nQuark + i][nQuark + 4];
+      DbB_c += fB[nQuark + i]*m_ckm2[nQuark + 4][nQuark + i];
+    }
+ 
+  for(int i = 1; i <= 3; i+=1) 
+    {
+      QA += fA[nQuark + i];
+      QB += fB[nQuark + i];
+    }
+  
+  for(int i = -3; i <= -1; i+=2) 
+    {
+      DbADbB += fA[nQuark + i]*fB[nQuark + i] * m_ckm2[nQuark + i][nQuark + 4] ;
+    }
+
+  for(int i = -2; i <= -2; i +=2) 
+    {
+      UbA += fA[nQuark + i];
+      UbB += fB[nQuark + i];
+    }
+  
+  dbAsbB = fA[nQuark - 1]*fB[nQuark - 3];
+  sbAdbB = fA[nQuark - 3]*fB[nQuark - 1];
+
+  //LO
+  H[0] = GA*DbB_c;
+  H[1] = DbA_c*GB;
+  //NLO
+  H[2] = GA  * GB;
+  
+  H[3] = DbA_c    * QB  ;
+  H[4] = QA     * DbB_c ;
+  H[5] = UbA      * DbB_c ;
+  H[6] = DbA_c    * UbB   ;
+  H[7] = DbADbB   ;
+  H[8] = dbAsbB   ;
+  H[9] = sbAdbB   ;
+
+}
+
+//
+// actual funtion to evaluate the pdf combinations 
+//   for the W- + C 
+//
+inline  void mcfmwmc_pdf::evaluate(const double* fA, const double* fB, double* H) const { 
+  
+  const int nQuark = 6;
+//  const int iQuark = 5; 
+  
+  // offset so we can use fA[-6]
+  // fA += 6;
+  // fB += 6;
+  
+  double GA=fA[6];
+  double GB=fB[6];
+  double DA_c=0., DB_c=0.;
+  double QbA=0., QbB=0., UA =0. , UB = 0., DADB = 0., dAsB = 0., sAdB = 0.;
+  
+  for(int i = 1; i <= 3; i+=2) 
+    {
+      DA_c += fA[nQuark + i]*m_ckm2[nQuark + i][nQuark - 4];
+      DB_c += fB[nQuark + i]*m_ckm2[nQuark - 4][nQuark + i];
+    }
+
+  for(int i = -3; i <= -1; i+=1) 
+    {
+      QbA += fA[nQuark + i];
+      QbB += fB[nQuark + i];
+    }
+  
+  for(int i = 1; i <= 3; i+=2) 
+    {
+      DADB += fA[nQuark + i]*fB[nQuark + i] * m_ckm2[nQuark + i][2] ;
+    }
+
+  for(int i = 2; i <= 2; i +=2) 
+    {
+      UA += fA[nQuark + i];
+      UB += fB[nQuark + i];
+    }
+  
+  dAsB = fA[nQuark + 1]*fB[nQuark + 3];
+  sAdB = fA[nQuark + 3]*fB[nQuark + 1];
+
+  //LO
+  H[0] = GA*DB_c;
+  H[1] = DA_c*GB;
+  //NLO
+  H[2] = GA  * GB;
+  
+  H[3] = DA_c    * QbB  ;
+  H[4] = QbA     * DB_c ;
+  H[5] = UA      * DB_c ;
+  H[6] = DA_c    * UB   ;
+  H[7] = DADB   ;
+  H[8] = dAsB   ;
+  H[9] = sAdB   ;
+
+}
+  
+
+// fortran callable wrappers
+extern "C" void fmcfmwpc_pdf__(const double* fA, const double* fB, double* H);
+extern "C" void fmcfmwmc_pdf__(const double* fA, const double* fB, double* H);
+
+
+
+#endif //  __MCFMWC_PDF_H
Index: applgrid/tags/applgrid-01-06-36/src/mcfmwjet_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmwjet_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmwjet_pdf.h	(revision 2010)
@@ -0,0 +1,171 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    mcfmwjet_pdf.h
+ **
+ **     @brief   pdf transform functions for W production                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: mcfmw_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#ifndef __MCFMWJET_PDF_H
+#define __MCFMWJET_PDF_H
+
+
+#include "appl_grid/appl_pdf.h" 
+
+//
+// MCFM W production
+//
+
+
+//
+//  W+
+//
+class mcfmwpjet_pdf : public appl::appl_pdf { 
+
+public: 
+
+  mcfmwpjet_pdf(const std::string& s="mcfm-wpjet") : appl_pdf(s) { 
+    m_Nproc = 7; 
+    make_ckm( true ); 
+  } 
+
+  ~mcfmwpjet_pdf() { }
+
+  virtual void evaluate(const double* fA, const double* fB, double* H) const;
+
+};
+
+
+
+//
+//  W-
+//
+class mcfmwmjet_pdf : public appl::appl_pdf { 
+
+public: 
+
+  mcfmwmjet_pdf(const std::string& s="mcfm-wmjet") : appl_pdf(s) { 
+    m_Nproc = 7; 
+    make_ckm( false ); 
+  } 
+
+  ~mcfmwmjet_pdf() { } 
+
+  virtual void evaluate(const double* fA, const double* fB, double* H) const;
+
+};
+
+
+// actual funtion to evaluate the pdf combinations 
+// for the W+
+
+inline  void mcfmwpjet_pdf::evaluate(const double* fA, const double* fB, double* H) const { 
+  
+  const int nQuark = 6;
+  const int iQuark = 5; 
+  
+  // offset so we can use fA[-6]
+  // fA += 6;
+  // fB += 6;
+  
+  double GA=fA[6];
+  double GB=fB[6];
+  double QA=0; double QB=0; double QbA=0; double QbB=0;
+  
+  for(int i = 1; i <= iQuark; i++) 
+    {
+      QA += fA[nQuark + i]*m_ckmsum[nQuark + i];
+      QB += fB[nQuark + i]*m_ckmsum[nQuark + i];
+    }
+  for(int i = -iQuark; i < 0; i++) 
+    {
+      QbA += fA[nQuark + i]*m_ckmsum[nQuark + i];
+      QbB += fB[nQuark + i]*m_ckmsum[nQuark + i];
+    }
+  
+  H[2]=QbA * GB;
+  H[3]= QA * GB;
+  H[4]= GA * QbB;
+  H[5]= GA * QB;
+  H[6]= GA * GB;
+
+  // have to zero H[0..1] first
+  H[0]=H[1]=0; 
+  
+  for (int i1 = 3; i1 <= 5; i1 += 2)
+    {
+      for(int i2 = 8; i2 <= 10; i2 += 2)
+	{
+	  H[0] += fA[i1]*fB[i2]*m_ckm2[i1][i2];
+	  H[1] += fA[i2]*fB[i1]*m_ckm2[i2][i1];
+	}
+    }  
+}
+
+
+
+
+//
+// actual funtion to evaluate the pdf combinations 
+//   for the W- 
+//
+inline  void mcfmwmjet_pdf::evaluate(const double* fA, const double* fB, double* H) const { 
+  
+  const int nQuark = 6;
+  const int iQuark = 5; 
+  
+  // offset so we can use fA[-6]
+  // fA += 6;
+  // fB += 6;
+  
+  double GA=fA[6];
+  double GB=fB[6];
+  double QA=0; double QB=0; double QbA=0; double QbB=0;
+  
+  for(int i = 1; i <= iQuark; i++) 
+    {
+      QA += fA[nQuark + i]*m_ckmsum[nQuark + i];
+      QB += fB[nQuark + i]*m_ckmsum[nQuark + i];
+    }
+  for(int i = -iQuark; i < 0; i++) 
+    {
+      QbA += fA[nQuark + i]*m_ckmsum[nQuark + i];
+      QbB += fB[nQuark + i]*m_ckmsum[nQuark + i];
+    }
+  
+  H[2]=  QA * GB;
+  H[3]= QbA * GB;
+  H[4]= GA  * QB;
+  H[5]= GA  * QbB;
+  H[6]= GA  * GB;
+
+  // have to zero H[0..1] first
+  H[0]=H[1]=0; 
+  
+  for (int i1 = 7; i1 <= 9; i1 += 2)
+    {
+      for(int i2 = 2; i2 <= 4; i2 += 2)
+	{
+	  H[0] += fA[i1]*fB[i2]*m_ckm2[i1][i2];
+	  H[1] += fA[i2]*fB[i1]*m_ckm2[i2][i1];
+	}
+    }  
+}
+  
+
+// fortran callable wrappers
+extern "C" void fmcfmwpjet_pdf__(const double* fA, const double* fB, double* H);
+extern "C" void fmcfmwmjet_pdf__(const double* fA, const double* fB, double* H);
+
+
+
+#endif //  __MCFMWJET_PDF_H
Index: applgrid/tags/applgrid-01-06-36/src/tsparse2d.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/tsparse2d.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/tsparse2d.h	(revision 2010)
@@ -0,0 +1,364 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    tsparse2d.h
+ **
+ **     @brief   very basic 2d sparse matrix class, needs to be improved, 
+ **              and extended to 3d.
+ **
+ **              NB: be VERY CAREFUL about the use of the (i,j) index operator, 
+ **                  since it checks to see whether the given element exists,
+ **                  and if not, it creates it, so even viewing it such as...
+ **
+ **                     tsparse2d<...> d(10,10); std::cout << a(2,3) << std::endl;                    
+ **
+ **                  will create the element if it doesn't exist *unless* you 
+ **                  are looking at a const tsparse2d<...>
+ **
+ **     @author  mark sutton
+ **     @date    Fri Nov  9 00:17:31 CET 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: tsparse2d.h, v1.0   Fri Nov  9 00:17:31 CET 2007 sutt $
+ **
+ **/
+
+
+#ifndef __TSPARSE2D_H
+#define __TSPARSE2D_H
+
+#include <iostream>
+
+#include <iomanip>
+
+#include <cmath>
+
+
+
+#include "tsparse1d.h"
+
+
+template<class T> 
+class tsparse2d : public tsparse_base {
+
+#if 0
+public:
+
+  class exception { 
+  public:
+    exception(const std::string& s) { cerr << s << std::endl; }; 
+    exception(ostream& s) { cerr << s << std::endl; }; 
+  };
+#endif
+
+public:
+
+  tsparse2d(int nx, int ny) : tsparse_base(nx), m_Ny(ny), m_v(NULL) {  
+    m_v = new tsparse1d<T>*[m_Nx];
+    for ( int i=0 ; i<Nx() ; i++ ) m_v[i] = new tsparse1d<T>(m_Ny);
+  }
+
+  tsparse2d(int nx, int ny, int lx, int ly, int _size=1, int _sizey=1) : tsparse_base(nx, lx, _size), m_Ny(ny), m_v(NULL) {  
+    if ( _size ) m_v = new tsparse1d<T>*[_size];
+    for ( int i=0 ; i<_size ; i++ ) m_v[i] = new tsparse1d<T>(m_Ny,ly, _sizey);
+  }
+
+
+  
+  ~tsparse2d() { 
+    if ( m_v ) { 
+      for ( int i=0 ; i<m_ux-m_lx+1 ; i++ )  if ( m_v[i] ) delete m_v[i];
+      delete[] m_v;
+    }
+  }
+  
+  
+
+
+
+  void trim() { 
+    
+    // trim each column down 
+    for ( int i=0 ; i<m_ux-m_lx+1 ; i++ ) if ( m_v[i] ) m_v[i]->trim();
+
+    int xmin = m_lx;
+    int xmax = m_ux;
+    
+     // now find new limits
+    for ( ; xmin<xmax+1 && (*m_v[xmin-m_lx])==0 ; xmin++ )  delete m_v[xmin-m_lx]; 
+    for ( ; xmin<xmax   && (*m_v[xmax-m_lx])==0 ; xmax-- )  delete m_v[xmax-m_lx];
+
+    // don't need to change anything
+    if ( xmin==m_lx && xmax==m_ux ) return;
+ 
+     // trimmed matrix is empty
+    if ( xmax<xmin ) { 
+      m_lx = xmin;  
+      m_ux = xmax;
+      delete[] m_v; 
+      m_v = NULL;
+      return; 
+    }
+
+    // copy to trimmed matrix
+    tsparse1d<T>** m_vnew = new tsparse1d<T>*[xmax-xmin+1]; 
+    tsparse1d<T>** mnp = m_vnew;
+    tsparse1d<T>** mvp = m_v+xmin-m_lx;
+    for ( int j=xmin ; j<=xmax ; j++ )  (*mnp++) = (*mvp++);
+    delete[] m_v;
+
+    // update the sparse data and corresponding limits 
+    m_lx = xmin;
+    m_ux = xmax;
+    m_v  = m_vnew;
+  }
+  
+
+  const tsparse1d<T>* operator[](int i) const {
+    //    range_check(i);
+    if ( i<m_lx || i>m_ux ) return NULL;
+    return m_v[i-m_lx];
+  } 
+
+  void untrim() { 
+    grow(0);
+    grow(Nx()-1);
+    tsparse1d<T>** mvp = m_v;
+    for ( int i=m_lx ; i<=m_ux ; i++ ) (*mvp++)->untrim();
+  }
+
+
+  
+  // NB: for some reason, even though this is a const function, 
+  //     it allows non const member functions to be called on
+  //     non const members so we cast the member to const to enforce
+  //     constness.
+  T operator()(int i, int j) const {
+    //    range_check(i);
+    if ( i<m_lx || i>m_ux ) return 0;
+    return (*((const tsparse1d<T>**)m_v)[i-m_lx])(j);
+  } 
+
+
+  T& operator()(int i, int j) {
+    //  range_check(i);
+    grow(i);
+    //    if ( m_v[i-m_lx] ) 
+    return (*m_v[i-m_lx])(j);
+    //    else throw exception( std::cerr << "no element with index " << i << ", " << j << std::endl );               
+  } 
+
+
+  int Ny() const { return m_Ny; } 
+
+
+  bool trimmed(int i, int j) const { 
+    if ( i<m_lx || i>m_ux ) return false;
+    return m_v[i-m_lx]->trimmed(j);
+  } 
+
+  tsparse1d<T>* trimmed(int i) const { 
+    if ( i<m_lx || i>m_ux ) return NULL;
+    return m_v[i-m_lx];
+  } 
+
+
+  int size() const { 
+    int N=0;
+    for ( int i=0 ; i<m_ux-m_lx+1 ; i++ )  if ( m_v[i] ) N += m_v[i]->size();
+    return N; //+3*sizeof(int;
+  }
+ 
+
+  int ymin() {
+    int minx = m_Ny;
+    tsparse1d<T>** mvp = m_v;
+    for ( int i=m_lx ; i<=m_ux ; i++ ) { 
+      int _minx=(*mvp++)->xmin();
+      if ( _minx<=minx ) minx=_minx;
+    }
+    return minx;
+  }
+
+
+  int ymax() {
+    int maxx = -1;
+    tsparse1d<T>** mvp = m_v;
+    for ( int i=m_lx ; i<=m_ux ; i++ ) { 
+      int _minx=(*mvp)->xmin(); 
+      int _maxx=(*mvp++)->xmax(); 
+      if ( _minx<=_maxx &&_maxx>maxx ) maxx=_maxx;
+    }
+    if ( maxx==-1 ) maxx=m_Ny-1;
+    return maxx;
+  }
+
+
+  void print() const { 
+    
+    //   std::cout << "\t\t+";
+    //   for ( int j=0 ; j<Ny() ; j++ ) std::cout << "-";
+    //   std::cout << "+\n" << std::endl; 
+   
+    std::cout << "\t\t+";
+    for ( int j=0 ; j<Ny() ; j++ ) std::cout << "--";
+    std::cout << "+\n";
+
+    for ( int i=0 ; i<Nx() ; i++ ) {
+      std::cout << std::setw(9) << trimmed(i) << "\t";
+      
+      std::cout << "|";
+
+      for ( int j=0 ; j<Ny() ; j++ ) {
+#if 1
+      	if ( trimmed(i,j) ) { 
+	  //	  std::cout << std::setprecision(2) << std::setw(4) << mant((*this)(i,j)) << "\t";
+	  std::cout << "oo";
+	}
+      	else  if ( i==j ) std::cout << ". "; 
+	else              std::cout << "  "; 
+#else
+      	if ( trimmed(i,j) ) { 
+	  std::cout << std::setprecision(1) << std::setw(1) << std::mant(std::fabs((*this)(i,j)));
+	}
+      	else  if ( i==j ) std::cout << "  "; 
+	else              std::cout << "  "; 
+#endif
+      }
+      //      std::cout << "|\n";
+      std::cout << "|\n";
+    }
+
+#if 0
+    ////////////////////////////////////////////////////////////////////
+    
+    std::cout << "\nPAW null 0 " << Nx() <<  "0 " << Ny() << std::endl;
+
+    for ( int i=0 ; i<Nx() ; i++ ) {
+      std::cout << std::setw(9) << trimmed(i) << "\t";
+      
+      for ( int j=0 ; j<Ny() ; j++ ) {
+
+      	if ( trimmed(i,j) ) { 
+	  //	  std::cout << std::setprecision(2) << std::setw(4) << mant((*this)(i,j)) << "\t";
+	  std::cout << "\nPAW box " << i-1 << " " << i << " " << j-1 << " " << j << std::endl;
+	}
+	//      	else  if ( i==j ) std::cout << ". "; 
+	//	else              std::cout << "  "; 
+      }
+      //      std::cout << "|\n";
+      // std::cout << "|\n";
+    }
+    //////////////////////////////////////////////////////////////
+    std::cout << "\nPAW wait" << std::endl;
+
+#endif
+
+    std::cout << "\t\t+";
+    for ( int j=0 ; j<Ny() ; j++ ) std::cout << "--";
+    std::cout << "+\n";
+   
+    //   std::cout << "\t\t+";
+    //   for ( int j=0 ; j<Ny() ; j++ ) std::cout << "-";
+    //   std::cout << "+\n" << std::endl; 
+    
+  }
+
+  
+  tsparse1d<T>**  v() { return m_v; }
+
+
+  tsparse2d& operator*=(const double& d) { 
+    for ( int i=0 ; i<Nx() ; i++ ) {     
+      if ( tsparse_base::trimmed(i) ) { 
+	if ( m_v[i-m_lx] ) (*m_v[i-m_lx])*=d;
+      }
+    }
+    return *this;
+  }
+  
+
+  tsparse2d& operator+=(const tsparse2d& t) {
+    if ( Nx()!=t.Nx() || Ny()!=t.Ny() ) throw out_of_range("bin mismatch");
+    for ( int i=0 ; i<Nx() ; i++ ) {
+      for ( int j=0 ; i<Ny() ; j++ ) (*this)(i,j) += t(i,j);
+    }
+    return *this;
+  }
+
+private:
+
+
+ 
+  void grow(int i) { 
+     
+    // nothing needs to be done; 
+    if ( i>=m_lx && i<=m_ux ) return;
+
+    // is it empty? add a single row
+    if ( m_lx>m_ux ) { 
+      m_v    = new tsparse1d<T>*[1];
+      (*m_v) = new tsparse1d<T>(m_Ny, m_Ny, 0);
+      m_lx = m_ux = i;
+      return;
+    }
+
+    tsparse1d<T>** newv = new tsparse1d<T>*[ ( i<m_lx ? m_ux-i+1 : i-m_lx+1 ) ];
+    tsparse1d<T>** newp = newv;
+    tsparse1d<T>** mvp  = m_v;
+
+    int lx = m_lx;
+    int ux = m_ux;
+
+    for ( ; m_lx>i ; m_lx-- )  (*newp++) = new tsparse1d<T>(m_Ny, m_Ny, 0);
+    for ( ; lx<=ux ; lx++ )    (*newp++) = (*mvp++);
+    for ( ; m_ux<i ; m_ux++ )  (*newp++) = new tsparse1d<T>(m_Ny, m_Ny, 0);
+
+    delete[] m_v;
+    m_v = newv;
+  }
+
+
+private:
+    
+  int m_Ny;
+
+  tsparse1d<T>**  m_v;
+
+};
+
+
+// stream IO template
+template<class T>std::ostream& operator<<(std::ostream& s, const tsparse2d<T>& sp) { 
+#if 0
+  for ( int i=0 ; i<sp.Nx() ; i++ ) { 
+    for ( int j=0 ; j<sp.Ny() ; j++ ) { 
+      s << sp(i,j) << "\t"; 
+    }
+    s << "\n";
+  }
+  return s;
+#else
+  for ( int i=0 ; i<sp.Nx() ; i++ ) { 
+    for ( int j=0 ; j<sp.Ny() ; j++ ) { 
+      if ( sp(i,j) ) s << "X";
+      else           s << ".";
+    }
+    s << "\n";
+  }
+  return s;
+#endif
+}
+
+
+#endif  // __TSPARSE2D_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/SparseMatrix3d.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/SparseMatrix3d.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/SparseMatrix3d.h	(revision 2010)
@@ -0,0 +1,195 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    SparseMatrix3d.h
+ **
+ **     @author  mark sutton
+ **     @date    Wed Nov 14 14:23:49 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: SparseMatrix3d.h, v1.0   Wed Nov 14 14:23:49 GMT 2007 sutt $
+ **
+ **/
+
+
+#ifndef __SPARSEMATRIX3D_H
+#define __SPARSEMATRIX3D_H
+
+#include <iostream>
+
+#include "appl_grid/appl_root.h"
+
+#include "axis.h"
+#include "sparse.h"
+
+#ifdef USEROOT
+#include "TH1D.h"
+#include "TH3D.h"
+#endif
+
+
+
+#include "appl_grid/stream_grid.h"
+
+
+class SparseMatrix3d : public tsparse3d<double> {
+
+public:
+
+  // constructors and destructor
+
+  SparseMatrix3d( int Nx, double lx, double ux, 
+		  int Ny, double ly, double uy, 
+		  int Nz, double lz, double uz);
+
+  SparseMatrix3d(const SparseMatrix3d& s); 
+  
+#ifdef USEROOT
+  SparseMatrix3d(const TH3D* h);   
+#endif
+
+  SparseMatrix3d(const stream_grid& h);   
+
+  ~SparseMatrix3d();
+  
+
+  // axis accessors
+  const axis<double>& xaxis() const { return m_xaxis; } 
+  const axis<double>& yaxis() const { return m_yaxis; } 
+  const axis<double>& zaxis() const { return m_zaxis; } 
+
+
+  // trim to sparse structure 
+  void trim() { empty_fast(); sparse3d::trim(); }
+    
+  // set up fast lookup table into the (untrimmed) 3d array.
+  void setup_fast() { 
+    if ( m_fastindex ) return;
+    m_fastindex = new double*[Nx()*Ny()*Nz()];
+
+    for ( int i=0 ; i<Nx() ; i++ ) { 
+      for ( int j=0 ; j<Ny() ; j++ ) { 
+	for ( int k=0 ; k<Nz() ; k++ ) { 
+	  m_fastindex[(i*Ny()+j)*Nz()+k] = &(m_v[i]->v()[j])->v()[k];
+	}
+      }
+    }
+  }
+  
+  // and clean up
+  void empty_fast() { 
+    if ( m_fastindex ) delete[] m_fastindex;
+    m_fastindex = NULL;
+  }
+
+  // access using the fast (dangerous) methods
+  double& fill_fast(int i, int j, int k)       { return *m_fastindex[(i*Ny()+j)*Nz()+k]; }
+  double  fill_fast(int i, int j, int k) const { return *m_fastindex[(i*Ny()+j)*Nz()+k]; }
+
+  void fill(double x, double y, double z, double w) { 
+
+    int i=xaxis().bin(x);
+    int j=yaxis().bin(y);
+    int k=zaxis().bin(z);
+
+    if ( i<0 || i>=Nx() || j<0 || j>=Ny() || k<0 || k>=Nz() ) return; 
+   
+    if ( m_fastindex ) fill_fast(i,j,k) += w; 
+    else                 (*this)(i,j,k) += w;
+  }
+  
+
+  /// print out
+  void print() const {
+    //sparse3d::print();
+    std::cout << m_xaxis << "\n"; 
+    std::cout << m_yaxis << "\n"; 
+    std::cout << m_zaxis << "\n"; 
+  }
+
+  
+  void print_axes() const { 
+    std::cout << "SparseMatrix: x: " 
+	      << "\tx " << m_xaxis 
+	      << "\ty " << m_yaxis 
+	      << "\tz " << m_zaxis << std::endl; 
+  }
+
+
+  /// check if the axes are all the same
+  bool compare_axes(const SparseMatrix3d& s) const { 
+
+#if 0
+
+    std::cout << "\tSparseMatrix: " 
+	      << "\tx " << m_xaxis 
+	      << "\ty " << m_yaxis 
+	      << "\tz " << m_zaxis << std::endl; 
+
+    std::cout << "\tSparseMatrix: " 
+	      << "\tx " << s.m_xaxis 
+	      << "\ty " << s.m_yaxis 
+	      << "\tz " << s.m_zaxis << std::endl; 
+
+    std::cout << "\tSparseMatrix: " 
+	      << "\tx " << ( m_xaxis == s.m_xaxis ) 
+	      << "\ty " << ( m_yaxis == s.m_yaxis )
+	      << "\tz " << ( m_zaxis == s.m_zaxis ) << std::endl; 
+
+
+    std::cout << "\tSparseMatrix: " 
+	      << "\tdNx " << ( m_xaxis.N() - s.m_xaxis.N() ) 
+	      << "\tmin "   << m_xaxis.min()   << " (" << ( m_xaxis.min() - s.m_xaxis.min() )     << ") " 
+	      << "\tmax "   << m_xaxis.max()   << " (" << ( m_xaxis.max() - s.m_xaxis.max() )    << ") " 
+	      << "\tdelta " << m_xaxis.delta() << " (" << ( m_xaxis.delta() - s.m_xaxis.delta() ) << ") " 
+	      << std::endl; 
+
+#endif
+
+    return ( m_xaxis == s.m_xaxis &&  
+	     m_yaxis == s.m_yaxis &&
+	     m_zaxis == s.m_zaxis );
+   
+  }
+
+
+  /// checks if the actual contents are the same
+  bool operator==(const SparseMatrix3d& s) const;
+
+  bool operator!=(const SparseMatrix3d& s) const { 
+    return !( *this == s );
+  }
+
+  // utilities for file access and storage
+#ifdef USEROOT
+  TH3D*    getTH3D(const std::string& s) const; 
+#endif
+
+  stream_grid* get(const std::string& s) const; 
+
+  void clear(); 
+
+private:
+
+  axis<double> m_xaxis;
+  axis<double> m_yaxis;
+  axis<double> m_zaxis;
+  
+  double** m_fastindex;
+
+};
+
+
+std::ostream& operator<<(std::ostream& s, const SparseMatrix3d& sm); 
+
+#endif  // __SPARSEMATRIX3D_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/lumi_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/lumi_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/lumi_pdf.cxx	(revision 2010)
@@ -0,0 +1,646 @@
+/**
+ **     @file    lumi_pdf.cxx
+ **
+ **     @author  mark sutton
+ **     @date    Tue  9 Jul 2013 08:14:47 CEST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: lumi_pdf.cxx, v0.0   Tue  9 Jul 2013 08:14:47 CEST sutt $
+ **
+ **/
+
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+#include <vector>
+#include <string>
+#include <algorithm>
+
+#include "appl_grid/lumi_pdf.h"
+
+
+std::string str_replace( std::string s ) { 
+  std::string a = s;
+  for ( int i=a.size() ; i-- ; ) if ( a[i]=='_' ) a[i]='-';
+  return a;
+}
+
+
+void latex( const lumi_pdf& p, const std::string& d);
+
+bool lumi_pdf::m_runlatex = false;
+
+lumi_pdf::lumi_pdf(const std::string& s, const std::vector<int>& combinations ) : // , int Wcharge ) :  //, bool amcflag ) : 
+  appl_pdf(s, true), m_filename(s), 
+  m_lookup( std::vector<std::vector<std::vector<int> > >(0) ) 
+  //,  m_amcflag(amcflag)
+{
+
+  /// need to decode the input std::vector
+
+  if ( combinations.size() ) { 
+
+    /// std::vector initialised from serialised std::vector
+    unsigned iv = 0;
+
+    unsigned nproc = combinations[iv++];
+
+    for ( unsigned i=0 ; i<nproc && iv<combinations.size() ; i++ ) {
+      int index  = combinations[iv++];
+      int npairs = combinations[iv++];
+
+      std::vector<int> v(npairs*2+2);
+      v[0] = index;
+      v[1] = npairs;
+      
+      for ( int j=0 ; j<npairs ; j++ ) { 
+	v[j*2+2] = combinations[iv++];
+	v[j*2+3] = combinations[iv++];
+      }
+
+      combination c(v);
+      if ( c.size() ) add(c); 
+    } 
+
+    /// extra value on the end for the W+- charge (if required) - flags that 
+    /// ckm matrix is to be used    
+    if ( iv<combinations.size() ) m_ckmcharge = combinations[iv];
+
+  }
+  else if ( m_filename!="" ) {  
+    /// else read from file ...
+  
+    std::ifstream& infile = openpdf( m_filename  );
+
+    /// will never fail, appl_pdf::open() would have thrown already
+    ///  if ( infile.fail() ) throw exception( std::cerr << "lumi_pdf::lumi_pdf() cannot open file " << m_filename << std::endl ); 
+ 
+    std::string   line;
+
+    infile >> m_ckmcharge;
+    
+    ///    std::cout << "ckmcharge " << m_ckmcharge << std::endl;  
+
+    while (std::getline(infile, line)) {
+      //    std::cout << "line: " << line << std::endl;
+      combination c( line );
+      if ( c.size() ) add(c); 
+    }
+    
+    infile.close();
+
+  }
+
+  if ( m_ckmcharge>0 ) { 
+    std::cout << "lumi_pdf::lumi_pdf() setting W+ cmk matrix" << std::endl;
+    make_ckm(true);
+  }
+  else if ( m_ckmcharge<0  ) { 
+    std::cout << "lumi_pdf::lumi_pdf() setting W- cmk matrix" << std::endl;
+    make_ckm(false);
+  }
+
+  
+  // some checking
+  
+  //  for ( int i=0 ; i<m_combinations.size() ; i++ ) { 
+  //    if ( m_combinations[i].
+  //  }
+
+  m_Nproc = m_combinations.size();
+
+
+  // create the reverse lookup 
+
+  create_lookup();
+
+  //  std::cout << "decideSuprocess " << decideSubProcess( 0, 0 ) << std::endl;
+  //  std::cout << "lumi_pdf::lumi_pdf() " << s << "\tv size " << m_combinations.size() << " lookup size " << m_lookup.size() << std::endl; 
+  //  std::cout << *this << std::endl;
+
+  //  lumi_pdf* _pdf = dynamic_cast<lumi_pdf*>(appl::appl_pdf::getpdf(name()));
+  //  std::cout << "done " << _pdf << _pdf->decideSubProcess( 0, 0 ) << std::endl;
+
+  //  std::cout << *this << std::endl;
+
+  if ( m_name!="" ) addtopdfmap(m_name, this);
+
+  if ( m_runlatex ) latex( *this, ".pdf" );
+
+}
+
+
+
+
+
+lumi_pdf::lumi_pdf(const std::string& s, const std::vector<combination>& combinations, int ckmcharge ) : 
+  appl_pdf(s), m_filename(s), m_combinations(combinations),
+  m_lookup( std::vector<std::vector<std::vector<int> > >(0) ) 
+{
+  
+  //  std::cout << "lumi_pdf::lumi_pdf() " << s << "\tv size " << combinations.size() << " lookup size " << m_lookup.size() << " " << this << std::endl; 
+
+  /// no need need to decode the input std::vector
+
+  /// the W+- charge (if required) - flags that 
+  /// ckm matrix is to be used    
+  m_ckmcharge = ckmcharge;
+
+  if ( m_ckmcharge>0 ) { 
+    std::cout << "lumi_pdf::lumi_pdf() setting W+ cmk matrix" << std::endl;
+    make_ckm(true);
+  }
+  else if ( m_ckmcharge<0  ) { 
+    std::cout << "lumi_pdf::lumi_pdf() setting W- cmk matrix" << std::endl;
+    make_ckm(false);
+  }
+
+  m_Nproc = m_combinations.size();
+
+  create_lookup();
+
+  //  std::cout << *this << std::endl; 
+
+  if ( m_runlatex ) latex( *this, ".pdf" );
+
+}
+
+
+
+void lumi_pdf::create_lookup() { 
+  if ( m_lookup.size()==0 ) { 
+    /// create a 14 x 14 lookup table (including a photon contribution! 
+    m_lookup = std::vector<std::vector<std::vector<int> > >(14, std::vector<std::vector<int> >(14) ); 
+    for ( unsigned i=size() ; i-- ; ) { 
+      const combination& c = m_combinations[i];
+      for ( unsigned j=c.size() ; j-- ; ) m_lookup[ c[j].first+6 ][ c[j].second+6 ].push_back(i);
+    } 
+  }
+
+  m_proclookup.clear();
+
+  for ( unsigned i=size() ; i-- ; ) { 
+    const combination& c = m_combinations[i];
+    for ( unsigned j=c.index().size() ; j-- ; ) { 
+      std::map<int,int>::iterator itr = m_proclookup.find(c.index()[j]);
+      if ( itr==m_proclookup.end() ) m_proclookup.insert( std::map<int,int>::value_type( c.index()[j], i ) );
+    }
+  } 
+  
+}
+
+
+
+int lumi_pdf::decideSubProcess(const int iproc ) const {
+  std::map<int,int>::const_iterator itr = m_proclookup.find(iproc);
+  if ( itr==m_proclookup.end() ) return -1;
+  else return itr->second;
+}
+
+
+void lumi_pdf::evaluate(const double* xfA, const double* xfB, double* H) const { 
+  /// if need to include the ckm matrix ...
+  if ( m_ckmcharge==0 )  {
+    for ( unsigned i=size() ; i-- ; ) { 
+      H[i] = m_combinations[i].evaluate( xfA, xfB ); 
+    }
+  }
+  else { 
+   for ( unsigned i=size() ; i-- ; ) { 
+     H[i] = m_combinations[i].evaluate( xfA, xfB, m_ckmsum, m_ckm2 ); 
+    }
+  }
+}
+
+
+int  lumi_pdf::decideSubProcess(const int iflav1, const int iflav2) const { 
+  //  std::cout << "lumi_pdf::decideSubProcess() " << name() << " " << m_lookup.size() << std::endl;
+  if ( m_lookup[iflav1+6][iflav2+6].size()==1 ) return m_lookup[iflav1+6][iflav2+6][0];
+  return -1;
+}
+
+
+size_t  lumi_pdf::nSubProcesses(const int iflav1, const int iflav2) const { 
+  //  std::cout << "lumi_pdf::decideSubProcess() " << name() << " " << m_lookup.size() << std::endl;
+  return m_lookup[iflav1+6][iflav2+6].size();
+}
+
+
+std::vector<int> lumi_pdf::decideSubProcesses(const int iflav1, const int iflav2) const { 
+  //  std::cout << "lumi_pdf::decideSubProcess() " << name() << " " << m_lookup.size() << std::endl;
+  return m_lookup[iflav1+6][iflav2+6];
+}
+
+#if 0
+std::vector<int> lumi_pdf::decideSubProcesses(const int iflav ) const { 
+  //  std::cout << "lumi_pdf::decideSubProcess() " << name() << " " << m_lookup.size() << std::endl;
+  return m_lookup[iflav+6][iflav+6];
+}
+
+int  lumi_pdf::decideSubProcess(const int iflav ) const { 
+  //  std::cout << "lumi_pdf::decideSubProcess() " << name() << " " << m_lookup.size() << std::endl;
+  if ( m_lookup[iflav+6][iflav+6].size()==1 ) return m_lookup[iflav+6][iflav+6][0];
+  return -1;
+}
+#endif
+
+
+std::vector<int> lumi_pdf::serialise() const  { 
+
+  std::vector<int> v;
+
+  v.push_back( Nproc() );
+
+  for ( int i=0 ; i<Nproc() ; i++ ) { 
+
+    const combination& c = m_combinations[i];
+
+    v.push_back( c.index()[0] );
+
+    v.push_back( c.size() );
+    for ( unsigned j=0 ; j<c.size() ; j++ ) { 
+      v.push_back( c[j].first );
+      v.push_back( c[j].second );
+    }
+  }  
+  
+  if      ( m_ckmcharge>0 ) v.push_back(1);
+  else if ( m_ckmcharge<0 ) v.push_back(-1);
+  else                      v.push_back(0);
+			 
+  return v;
+}
+
+
+
+std::vector<std::vector<int> > lumi_pdf::vectorise() const  { 
+
+  std::vector<std::vector<int> > v;
+
+  for ( int i=0 ; i<Nproc() ; i++ ) { 
+
+    std::vector<int> v0;
+    v0.push_back( i );
+
+    const combination& c = m_combinations[i];
+    for ( unsigned j=0 ; j<c.size() ; j++ ) { 
+      v0.push_back( c[j].first );
+      v0.push_back( c[j].second );
+    }
+    v.push_back( v0 );
+  }  
+  
+  return v;
+}
+
+
+
+
+
+void lumi_pdf::write(std::ostream& s) const { 
+
+  std::cout << "lumi_pdf::write() " << name() << std::endl;
+  
+  s << m_ckmcharge << "\n";
+
+  for ( unsigned i=0 ; i<m_combinations.size() ; i++ ) { 
+
+    //    std::cout << "swrite: " << m_combinations[i].index()[0] << std::endl;
+
+    s << m_combinations[i].index()[0] << " ";
+    s << m_combinations[i].size()  << " ";
+
+    for ( unsigned j=0 ; j<m_combinations[i].size() ; j++ ) { 
+      s << "  " << m_combinations[i][j].first << " " << m_combinations[i][j].second;
+    }
+
+    s << "\n";
+
+  }
+
+}
+
+
+void lumi_pdf::write(const std::string& filename) const {  
+  std::ofstream s(filename.c_str());
+  write(s);
+}
+
+
+// std::string lumi_pdf::summary(std::ostream& s=std::cout) const { 
+std::string lumi_pdf::summary() const { 
+  std::stringstream s_;
+  s_ << "lumi_pdf::lumi_pdf()\t" << name() << "\tcombinations " << m_combinations.size() << "\tlookup size " << m_lookup.size() << "\taddr: " << this; 
+  return s_.str();
+}
+
+
+
+
+bool lumi_pdf::contains( int i ) const { 
+  for ( int ic=0 ; ic<Nproc() ; ic++ ) { 
+    const combination& c = m_combinations[ic];
+    for ( size_t ip=0 ; ip<c.size() ; ip++ ) { 
+      if ( c.pair(ip).first==i || c.pair(ip).second==i ) return true; 
+    }
+  }
+  return false;
+}
+ 
+
+
+void lumi_pdf::removeDuplicates() { 
+
+  size_t original_size = size();
+
+  std::vector<combination> combinations;
+  
+  for ( unsigned i=0 ; i<size() ; i++ ) {
+    bool unique = true;
+    for ( unsigned j=0 ; j<combinations.size() ; j++ ) {
+      if ( at(i) == combinations[j] ) {
+	unique = false;
+	// std::cout << "duplicate:\n" << at(i) << "\n" << combinations[j] << std::endl;
+	combinations[j].add_index( at(i).index()[0] );
+      }
+    }
+    if ( unique ) combinations.push_back( at(i) );
+  }
+
+  m_combinations = combinations;
+
+  m_Nproc = m_combinations.size();
+
+  create_lookup();
+
+  std::cout << "lumi_pdf::removeDuplicates() " << name() << "\tsize " << original_size << " -> " << size() << std::endl; 
+
+}
+   
+
+
+
+
+void lumi_pdf::restoreDuplicates() { 
+
+  std::vector<combination> combinations;
+
+  for ( unsigned i=0 ; i<size() ; i++ ) {
+
+    std::vector<int> indices = at(i).index();
+
+    for ( unsigned j=0 ; j<indices.size() ; j++ ) {
+
+      combination c = at(i);
+
+      c.index().clear();
+      c.index().push_back(indices[j]);
+
+      combinations.push_back( c );
+    }
+  }
+
+  std::sort( combinations.begin(), combinations.end() );
+
+  m_combinations = combinations;
+
+  m_Nproc = m_combinations.size();
+
+  create_lookup();
+  
+}
+
+
+/// delete a combination
+void lumi_pdf::remove( int i ) {   
+
+    std::cout << "lumi_pdf::remove() remving combination: " << i << "\t" << m_combinations[i] << std::endl;
+
+    std::vector<combination>::iterator itr = m_combinations.begin();
+    for ( int ic=0 ; itr!=m_combinations.end() ; itr++, ic++ ) {
+      std::cout << *itr << std::endl;
+      if ( ic==i ) break;
+    }
+   
+    if ( itr!=m_combinations.end() ) { 
+      m_combinations.erase( itr );
+      m_Nproc = m_combinations.size();
+      create_lookup();
+    }
+
+}
+
+
+   
+
+
+
+#include <fstream>
+#include <cstdlib>
+
+/// decode as a latex table, and create pdf file
+
+void latex( const lumi_pdf& p, const std::string& d) {
+
+  std::cout << "latex() : " << d << "\t" << p << std::endl; 
+
+  std::ofstream zj( (d+p.name()+".tex").c_str() );
+ 
+  std::string _f[14] = { 
+    "\\bar{t}", 
+    "\\bar{b}", 
+    "\\bar{c}", 
+    "\\bar{s}", 
+    "\\bar{u}", 
+    "\\bar{d}",
+    "g",
+    "d",
+    "u", 
+    "s", 
+    "c", 
+    "b", 
+    "t",
+    "\\gamma" };
+
+  const std::string* f = _f+6; 
+
+
+  //  std::cout << "\\bigskip\n";
+
+  std::cout << "Contribution:   " << p.name() << "\t processes " << p.Nproc() << "\n";
+
+  bool landscape = false;
+
+  size_t maxind  = 20;
+  size_t maxindc = 15;
+
+  if ( landscape ) { 
+    zj << "\\documentclass[7pt,a4paper,landscape]{article}\n\n";
+        
+    zj << "\\usepackage{makecell}\n\n";
+    zj << "\\usepackage[a4paper,landscape]{geometry}\n\n";
+        
+    /// WTF ? 
+    //  zj << "\\usepackage{atbegshi}% http://ctan.org/pkg/atbegshi\n";
+    //  zj << "\\AtBeginDocument{\\AtBeginShipoutNext{\\AtBeginShipoutDiscard}}\n";
+    
+    zj << "\\topmargin=-3cm\n";
+    zj << "\\textheight=18cm\n";
+    zj << "\\textwidth=28cm\n";
+    zj << "\\oddsidemargin=-1.5cm\n";
+
+    zj << "\\begin{document}\n";
+
+    zj << "\\ \\\\ \\ \\\\ \\ \\\\\n";
+
+  }
+  else { 
+    //  zj << "\\documentclass[7pt,a4paper]{article}\n\n";
+    
+    //  zj << "\\usepackage{makecell}\n\n";
+    //  zj << "\\usepackage[a4paper]{geometry}\n\n";
+    
+    //  zj << "\\topmargin=-3cm\n";
+    //  zj << "\\textheight=28cm\n";
+    //  zj << "\\textwidth=18cm\n";
+    //  zj << "\\oddsidemargin=-1.5cm\n";
+
+    maxind  = 3;
+    maxindc = 4;
+
+  }
+
+
+
+
+  zj << "\\begin{table}\n" << std::endl;
+  zj << "\\caption{Mapping between grid ID And NNLOJET ID for parton contributions at " << d << ".}\n" << std::endl;
+
+  zj << "\\label{tab:" << d << "}" << std::endl;
+
+  zj << "{\\footnotesize\n";
+
+
+  size_t maxproc = 0;
+
+  size_t totalproc = 0;
+
+  //  std::cout << "Nproc: " << p.Nproc() << " " << p.size() << std::endl; 
+
+  for ( int i=0 ; i<p.Nproc() ; i++ ) { 
+    const combination& c = p[i];
+
+    const std::vector<int>& ind = c.index();
+
+    if ( ind.size()>maxproc ) maxproc = ind.size();
+
+    totalproc += ind.size();
+  }  
+
+  //  size_t maxpairs = 20-maxproc;
+
+  //  if ( maxpairs<1 ) maxpairs = maxproc;
+
+  //  zj << "\\hspace{-12cm}";
+  //  zj << "\\begin{minipage}[t]{18cm}\n";
+  //  zj << "pdf : " << str_replace(p.name()) << "\tnprocesses: " << totalproc  << "\\\\" << std::endl; 
+  zj << "\\begin{tabular}[t]{cll}\\\\\n";
+
+  zj << "grid PID & NNLOJET PID & parton contributions\\\\\n\\hline\\\\[-2mm]" << std::endl;
+
+  int irow = 0;
+
+  for ( int i=0 ; i<p.Nproc() ; i++ ) { 
+
+    const combination& c = p[i];
+
+    const std::vector<int>& ind = c.index();
+
+    int nrows = (ind.size()+27)/28;
+
+    zj << "\\makecell[t]{ " << i;
+    for ( int ig=1 ; ig<nrows ; ig++ ) zj << " \\\\ \\ ";
+    zj << "}\t&\t";
+
+    zj << "\\makecell[lt]{";
+    // zj << "\\thead[l]{";
+
+    for ( unsigned j=0 ; j<ind.size() ; j++ ) {
+      //    if ( ind[j]<100 ) zj << "~";
+      //    if ( ind[j]<10  ) zj << "~";
+      zj << ind[j] << " ";
+      if ( j>0 && (j+1)%maxind==0 ) zj << "\\\\";
+    }
+
+    zj << "}\t&\t";
+
+    zj << "\\makecell[lt]{ ";
+
+    for ( unsigned j=0 ; j<c.size() ; j++ ) {
+
+      int p0 = c[j].first;
+      int p1 = c[j].second;
+
+      if ( j>0 ) zj << " + ";
+
+      if ( j>0 && j%maxindc==0 ) zj << " \\\\ \\ ";
+      // if ( j%maxindc==0 ) zj << " \\\\ \\ ";
+
+      zj << "($" << f[p0] << "$, $" << f[p1] << "$)\t";  
+
+    }
+
+    for ( int ig=1 ; ig<nrows ; ig++ ) zj << " \\\\ \\ ";
+    zj << "}";
+
+    zj << "\\\\\n";
+    zj << "\\\\[-2mm]\n";
+    
+    if ( irow==46 ) { 
+
+      irow=0;
+
+      zj << "\\\\\n";
+      zj << "\\hline\n";
+      zj << "\\end{tabular}\n";
+      //      zj << "\\end{minipage}\n";
+
+      zj << "\\pagebreak[4];\n\n\n";
+
+      //      zj << "\\hspace{-12cm}";
+      //      zj << "\\begin{minipage}[t]{18cm}\n";
+      //    zj << "pdf : " << str_replace(p.name()) << "\tnprocesses: " << totalproc  << " (continued)\\\\" << std::endl; 
+      zj << "\\begin{tabular}[t]{cll}\\hline\\\\\n";
+
+    }
+    else irow++;
+  }
+
+  zj << "\\\\\n";
+  zj << "\\hline\n";
+  zj << "\\end{tabular}\n";
+  //  zj << "\\end{minipage}\n";
+
+  zj << "}\n";
+
+  zj << "\\end{table}\n" << std::endl;
+
+  if ( landscape ) { 
+    zj << "\\end{document}\n";
+  }
+
+  zj.close();
+
+  std::cout << "running latex to file : " << d+p.name() << std::endl;
+
+  std::string cmd = std::string("pdflatex ") + d+p.name()+".tex > " + d+p.name() + ".log ";
+
+  //  std::system( cmd.c_str() );
+
+}
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/tsparse_base.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/tsparse_base.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/tsparse_base.h	(revision 2010)
@@ -0,0 +1,99 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    tsparse_base.h
+ **
+ **     @brief   base class for common range checking and utilities for generic 
+ **              sparse matrix classes
+ **
+ **     @author  mark sutton
+ **     @date    Thu Nov 22 01:28:27 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: tsparse_base.h, v0.0   Thu Nov 22 01:28:27 GMT 2007 sutt $
+ **
+ **/
+
+
+#ifndef __TSPARSE_BASE_H
+#define __TSPARSE_BASE_H
+
+#include <iostream>
+
+
+#include <cmath>
+
+
+class tsparse_base {
+
+public: 
+
+  // range exception
+  class out_of_range { 
+  public: 
+    out_of_range(const std::string& s="out of range") { std::cerr << s << std::endl; }
+  };
+
+public:
+  
+  // constructors etc
+
+  tsparse_base()       : m_Nx(0),  m_lx(1),  m_ux(0), m_empty(true)    { }
+  tsparse_base(const int& nx) : m_Nx(nx), m_lx(0),  m_ux(nx-1), m_empty(true) { } 
+  tsparse_base(const int& nx, const int& lx, const int& _size=1)  
+    : m_Nx(nx), m_lx(lx), m_ux(lx+_size-1), m_empty(true) { } 
+
+  virtual ~tsparse_base() { } 
+
+
+  // access functions
+
+  int Nx() const { return m_Nx; } 
+  int lo() const { return m_lx; }
+  int hi() const { return m_ux; }
+
+  virtual int size() const { return m_ux-m_lx+1; } //  + 3*sizeof(int); } 
+
+  bool trimmed(int i) const { return ( i<m_lx || i>m_ux ? false : true );  } 
+
+  // make sure index is not out of range
+  void range_check(int i) const { if ( i<0 || i>m_Nx-1 ) { 
+      std::cerr << "index i=" << i << std::endl;
+      throw out_of_range(); 
+    }    
+  } 
+
+  bool operator==(int i) const { return size()==i; } 
+
+  int xmin() const { return m_lx; } 
+  int xmax() const { return m_ux; } 
+
+  bool empty() const { return ( m_empty || m_ux<m_lx ); }
+
+  // shouldn't really be in here, it's just for printing 
+  static double mant(double x) { 
+    return  x/std::pow(10.,int(std::log10(std::fabs(x))+128)-128);
+  }
+
+protected:
+
+  int m_Nx; // number of elements
+  int m_lx; // indices of lowest and highest occupied 
+  int m_ux; // elements : (u)pper and (l)ower
+
+  bool m_empty; /// is the grid empty - need this to test in case the grid is untrimmed
+
+};
+
+
+#endif  // __TSPARSE_BASE_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/combination.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/combination.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/combination.cxx	(revision 2010)
@@ -0,0 +1,174 @@
+/**
+ **     @file    combination.cxx
+ **
+ **     @brief   this is a class which allows the sum of a std::vector of 
+ **              pairs of parton-parton initial states to be calculated  
+ **
+ **     @author  mark sutton
+ **     @date    Thu  4 Jul 2013 23:20:23 CEST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: combination.cxx, v0.0   Thu  4 Jul 2013 23:20:23 CEST sutt $
+ **
+ **/
+
+
+/// automatically keeps count of how many subprocess combinations 
+/// there have been 
+ 
+/// read each of the pairs to be summed for a specific 
+/// subprocess combination 
+
+
+#include "appl_grid/appl_pdf.h"
+#include "appl_grid/combination.h"
+
+
+combination::combination() {} 
+
+
+combination::combination(const std::vector<int>& v) { 
+  //  std::cout << "combination::combination() std::vector" << std::endl;
+  construct(v); 
+}
+
+
+combination::combination(const std::string& line) : m_size(0) { 
+
+  //  std::cout << "combination::combination() std::string " << line << std::endl;
+
+  std::istringstream iss(line);
+  
+  std::vector<int> v;
+  v.reserve(11);
+  
+  int i;
+  
+  while (iss >> i) v.push_back(i);
+
+  construct(v);
+}
+
+
+combination::combination(const combination& c) : 
+  m_index(c.m_index), 
+  m_size(c.m_size), 
+  m_pairs(c.m_pairs) 
+{ }
+
+
+
+combination& combination::operator=(const combination& c) {
+  m_index = c.m_index; 
+  m_size  = c.m_size; 
+  m_pairs = c.m_pairs; 
+  return *this;
+}
+
+
+
+void combination::construct(const std::vector<int>& v) { // : m_index(v.at(0)), m_size(v.at(1)) { 
+
+  //  std::cout << "combination::construct()" << std::endl;
+
+  if ( v.size()==0 ) return; 
+
+  //  if ( v.size()<4 || v.size()%2!=0 ) throw appl::appl_pdf::exception("not enough entries for combination");
+
+  m_index.push_back( v[0] );
+  m_size  = v[1];
+
+  for ( size_t i=2 ; i<v.size() ; i+=2 ) { 
+    int v1 = remap(v[i]);
+    int v2 = remap(v[i+1]);
+    if ( v2==99 ) v2 = v1;
+    m_pairs.push_back( cpair( v1, v2 ) ); 
+  }
+
+
+
+  /// check there are no duplicated pairs in this combination ...
+
+  bool duplicates = false;
+
+  if ( m_pairs.size()>0 ) {  
+    for ( unsigned i=0 ; i<m_pairs.size()-1 ; i++ ) { 
+      //  std::cout << i << "\tindex " << m_index << " " << m_pairs[i] << std::endl; 
+      for ( unsigned j=i+1 ; j<m_pairs.size() ; j++ ) { 
+	if ( m_pairs[i]==m_pairs[j] ) { 
+	  duplicates = true;
+	  std::cerr << "index " << m_index << "\t duplicated entry" << m_pairs[i] << " in " << *this << std::endl;
+	}
+      }  
+    }
+
+    if ( duplicates ) throw appl::appl_pdf::exception("mismatch in entries for this for combination");
+    
+  }
+  
+}
+
+
+/// evaluate the sum over all the (paired) subprocesses
+
+double combination::evaluate( const double* xfA, const double* xfB,  
+			      const std::vector<double>& _ckmsum, 
+			      const std::vector<std::vector<double> >& _ckm2 ) const {  
+
+  xfA += 6;  /// offset the pointer for xf1 and xf2 so can use 
+
+
+  if ( xfB ) xfB += 6;  /// lhapdf code for indexing
+
+  double H = 0; 
+
+  if ( _ckmsum.size()==0 ) {
+    /// without using the ckm matrix 
+    /// for DIS pass in a null vector as the PDF components
+    if ( xfB ) for ( unsigned i=size() ; i-- ; ) H += xfA[m_pairs[i].first]*xfB[m_pairs[i].second]; 
+    else       for ( unsigned i=size() ; i-- ; ) H += xfA[m_pairs[i].first]; 
+  }
+  else {
+    /// using the ckm matrix 
+    if ( xfB ) { 
+      for ( unsigned i=size() ; i-- ; ) { 
+	if ( m_pairs[i].first!=0  ) {
+	  if ( m_pairs[i].second!=0  )  H += xfA[m_pairs[i].first]*xfB[m_pairs[i].second]*_ckm2[m_pairs[i].first+6][m_pairs[i].second+6]; //q-q 
+	  else 	                        H += xfA[m_pairs[i].first]*xfB[m_pairs[i].second]*_ckmsum[m_pairs[i].first+6]; /// q-gluon
+	}
+	else {
+	  if ( m_pairs[i].second!=0  )  H += xfA[m_pairs[i].first]*xfB[m_pairs[i].second]*_ckmsum[m_pairs[i].second+6]; // gluon-q 
+	  else                          H += xfA[m_pairs[i].first]*xfB[m_pairs[i].second]; /// gluon-gluon
+	}
+      }  /// looop over pairs
+    }
+    else { 
+      throw appl::appl_pdf::exception("use of CKM matrix for DIS contributions not yet implemented" );
+    }
+  } /// use ckm matrix 
+  
+  return H;
+}
+
+
+
+bool combination::operator==( const combination& c ) const { 
+  if ( size()!=c.size() ) return false;
+  for ( int i=size() ; i-- ; ) { 
+    if ( (*this)[i]!=c[i] ) return false; 
+  }
+  return true;
+} 
+
+
+
+std::vector<int> combination::serialise() const { 
+  std::vector<int> v;
+  v.push_back(size());
+  for ( unsigned i=0 ; i<size() ; i++ ) { 
+    v.push_back( pair(i).first );
+    v.push_back( pair(i).second );
+  }
+  return v;
+}
Index: applgrid/tags/applgrid-01-06-36/src/jetrad_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/jetrad_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/jetrad_pdf.cxx	(revision 2010)
@@ -0,0 +1,26 @@
+/**
+ **     @file    jetrad_pdf.cxx
+ **
+ **     @brief   pdf transform function for jetrad                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: jetrad_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/appl_pdf.h"
+
+#include "jetrad_pdf.h"
+
+
+// fortran callable wrapper
+extern "C" void fjetrad_pdf__(const double* fA, const double* fB, double* H) { 
+  static jetrad_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
Index: applgrid/tags/applgrid-01-06-36/src/mcfmQQ_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmQQ_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmQQ_pdf.h	(revision 2010)
@@ -0,0 +1,117 @@
+/** emacs: this is -*- c++ -*- **/
+#ifndef __MCFMQQ_PDF_H
+#define __MCFMQQ_PDF_H
+
+#include <cmath>
+
+#include "appl_grid/appl_pdf.h" 
+
+
+//
+// MCFM Q - Qbar production
+//
+class mcfmQQ_pdf : public appl::appl_pdf { 
+
+public: 
+
+  mcfmQQ_pdf(std::string name) : appl_pdf(name) { m_nFlavours = 5;  m_Nproc = 7 ;}
+  ~mcfmQQ_pdf() { } 
+
+  virtual void evaluate(const double* fA, const double* fB, double* H) const;
+
+  int m_nFlavours;
+
+};
+
+
+// actual funtion to evaluate the pdf combinations 
+
+inline  void mcfmQQ_pdf::evaluate(const double* fA, const double* fB, double* H) const 
+{
+
+  //  const int nQuark = 6;
+//  const int iQuark = 5; 
+  
+  // offset psd ptrs so can use [-6..6] indexing rather than [nQuark+..] indexing
+
+  fA += 6;
+  fB += 6;
+
+  //  double GA=fA[6];
+  //  double GB=fB[6];
+ double G1=fA[0];
+ double G2=fB[0];
+  
+  double Q1=0, Q2=0, Q1bar=0, Q2bar=0;
+  //double B1 = 0 , B2 = 0, B1bar = 0 , B2bar = 0;
+  
+  for ( int i=1  ; i<= m_nFlavours ; i++ ) {  Q1    += fA[i];   Q2    += fB[i]; }
+  for ( int i=-1 ; i>=-m_nFlavours ; i-- ) {  Q1bar += fA[i];   Q2bar += fB[i]; }  
+
+  //  B1 = 	fA[5]; B2 = fB[5];
+  // B1bar = fA[-5]; B2bar = fB[-5];
+
+  // don't include the gluon (fA[0], fB[0])
+//  for ( int i=-6 ; i<=6 ; i++ ) if ( i ) D += fA[i]*fB[i];
+//
+
+  double QQb1 = 0, QQb2 = 0.;
+
+  for ( int i=1 ; i<=m_nFlavours ; i++ ) { 
+    QQb1 += fA[i]  * fB[-i];  
+    QQb2 += fA[-i] * fB[i]; 
+  }
+
+//  double QQbHeavy = 0;
+//  for ( int i=5 ; i<=6 ; i++ ) { 
+//    QQbHeavy += fA[i]  * fB[-i];  
+//    QQbHeavy += fA[-i] * fB[i]; 
+//  }
+  
+  H[0] =  G1*G2 ;
+  H[1] =  (Q1 )*G2 ;
+  H[2] =  G1*(Q2 ) ;
+  H[3] =  ( Q1bar)*G2 ;
+  H[4] =  G1*(Q2bar) ;
+  H[5] =  QQb1 ;
+  H[6] =  QQb2 ;
+//  H[4] =  QQbHeavy;
+//  H[4] =  D ; 
+//  H[5] =  Dbar ;
+//  H[6] =  Q1*Q2bar+Q1bar*Q2-Dbar ;
+    
+  
+  return;     
+}
+
+
+class mcfmTT_pdf : public mcfmQQ_pdf { 
+
+public: 
+
+  mcfmTT_pdf() : mcfmQQ_pdf("mcfm-TT") { m_nFlavours = 5 ;}
+  ~mcfmTT_pdf() { } 
+
+};
+
+class mcfmBB_pdf : public mcfmQQ_pdf { 
+
+public: 
+
+  mcfmBB_pdf() : mcfmQQ_pdf("mcfm-BB") { m_nFlavours = 4 ;}
+  ~mcfmBB_pdf() { } 
+
+};
+
+class mcfmCC_pdf : public mcfmQQ_pdf { 
+
+public: 
+
+  mcfmCC_pdf() : mcfmQQ_pdf("mcfm-CC") { m_nFlavours = 3 ;}
+  ~mcfmCC_pdf() { } 
+
+};
+
+#endif  
+
+
Index: applgrid/tags/applgrid-01-06-36/src/mcfmw_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmw_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmw_pdf.cxx	(revision 2010)
@@ -0,0 +1,38 @@
+/**
+ **     @file    mcfmw_pdf.cxx
+ **
+ **     @brief   pdf transform functions                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: mcfmw_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "mcfmw_pdf.h"
+
+// fortran callable wrapper
+
+extern "C" void fmcfmwp_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmwp_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+// fortran callable wrapper
+
+extern "C" void fmcfmwm_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmwm_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/mcfmz_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmz_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmz_pdf.cxx	(revision 2010)
@@ -0,0 +1,27 @@
+/**
+ **     @file    mcfmz_pdf.cxx
+ **
+ **     @brief   pdf transform functions for z production                  
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: mcfmz_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "mcfmz_pdf.h"
+
+
+// fortran callable wrapper
+extern "C" void fmcfmz_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmz_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
Index: applgrid/tags/applgrid-01-06-36/src/file_index.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/file_index.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/file_index.cxx	(revision 2010)
@@ -0,0 +1,108 @@
+/* emacs: this is -*- c++ -*- */
+/**
+ **   @file    file_index.cxx        
+ **                   
+ **   @author  sutt
+ **   @date    Thu 31 Dec 2020 14:42:12 GMT
+ **
+ **   $Id: file_index.cxx, v0.0   Thu 31 Dec 2020 14:42:12 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#include <iostream>
+#include <vector>
+#include <map>
+#include <string>
+
+#include "appl_grid/file_index.h"
+
+
+
+file_index::file_index() : mname("index"), mrunning(0) { } 
+
+file_index::~file_index() { } 
+
+
+void file_index::add( const std::string& _s, int bytes, int offset, int counter ) { 
+    std::string s = _s;
+    check_for_duplicates( s, counter );
+    if ( counter>0 ) std::cerr << "index::add() duplicated key: " << _s << "\tusing: " << s << std::endl;  
+    mkeys.push_back( s );
+    mmap.insert( map_t::value_type( s, entry( bytes, mrunning ) ) );
+    mrmap.insert( rmap_t::value_type( mrunning, s ) );
+    if ( offset!=-1 && mrunning!=offset ) std::cerr << "index::add() offset mismatch: " << s << std::endl;
+    mrunning += bytes;
+} 
+
+
+void file_index::check_for_duplicates( std::string& s, int& counter ) { 
+    size_t pos = s.find_last_of(";");
+    if ( counter==0 && pos!=std::string::npos ) check_for_duplicates( s.erase(pos), counter );      
+    else if ( mmap.find(s)!=mmap.end() ) { 
+      if ( counter==0 ) s += ";"+std::to_string(++counter);
+      else s.replace( pos+1, s.size(), std::to_string(++counter) );
+      check_for_duplicates( s, counter );
+    }
+}
+
+
+#if 0
+/// different treatment of duplicated keys and whether ";" is 
+/// allowed in the names
+void check_for_duplicates( std::string& s, int& counter ) { 
+    if ( mmap.find(s)!=mmap.end() ) { 
+      size_t pos = s.find_last_of(";");
+      if ( pos==std::string::npos ) s += ";"+std::to_string(++counter);
+      else s.replace( pos+1, s.size(), std::to_string(++counter) );
+      check_for_duplicates( s, counter );
+    }
+}
+  
+#endif
+
+
+/// serialisation and deserialisation ...
+
+void file_index::serialise_internal( std::vector<SB::TYPE>& s ) const {  
+
+    SB::serialise( s, name() );
+
+    s.push_back( mkeys.size() );
+
+    for ( size_t i=0 ; i<mkeys.size() ; i++ ) { 
+      SB::serialise( s, mkeys[i] );
+      entry e = find( mkeys[i] );
+      s.push_back( e.size );
+      s.push_back( e.offset );
+    }
+
+}
+
+
+void file_index::deserialise_internal( std::vector<SB::TYPE>::const_iterator& itr ) { 
+
+    SB::deserialise( itr, mname );
+
+    size_t nkeys = (*itr++);
+
+    for ( size_t i=0 ; i<nkeys ; i++ ) {
+      std::string key;
+      SB::deserialise( itr, key );
+      SB::TYPE esize  = (*itr++);
+      SB::TYPE offset = (*itr++);
+      add( key, esize, offset, -1 );
+    }
+      
+} 
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/fastnlo.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/fastnlo.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/fastnlo.cxx	(revision 2010)
@@ -0,0 +1,465 @@
+/**
+ **     @file    fastnlo.cxx
+ **
+ **     @brief   Implementaion of the fastnlo - applgrid interface
+ **
+ **     @author  mark sutton
+ **     @date    Sun 27 Sep 2009 22:39:32 BST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: fastnlo.cxx, v0.0   Sun 27 Sep 2009 22:39:32 BST sutt $
+ **
+ **/
+
+
+#include "appl_grid/fastnlo.h"
+#include "appl_igrid.h"
+
+
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <vector>
+#include <cmath>
+#include <sys/stat.h>
+
+
+std::ofstream sout( "/dev/null" );
+// std::ostream& sout = std::cout;
+
+template<class A>
+void out( A& a ) { sout << a << std::endl; }
+
+template<class A>
+void out( const std::string& s, A& a ) { sout << s << ":\t " << a << std::endl; }
+
+
+
+
+
+
+fastnlo::fastnlo( const std::string& filename ) : m_manage_grids(true) {
+  
+  std::cout << "fastnlo::fastnlo() reading fastnlo grid file " << filename << std::endl;
+
+  // can we open the fastnlo grid file
+  struct stat stfileinfo;
+  if ( stat(filename.c_str(),&stfileinfo) )   {
+    std::cerr << "fastnlo::fastnlo() cannot open fastnlo grid file " << filename << std::endl;
+    return;
+  }
+
+  // read the fastnlo grid
+  std::ifstream faststream( filename.c_str() );
+
+  std::string dummy;
+
+  std::string seperator = "1234567890";
+
+
+  faststream >> dummy;  
+
+  long long tabversion=0;   faststream >> tabversion; out( "tabversion", tabversion);
+
+  faststream >> dummy; 
+
+  long long ireaction=0;   faststream >> ireaction;   out( "reaction", ireaction );
+  double    Ecms=0;        faststream >> Ecms;        out( "Ecms", Ecms );
+  
+  double ixsecunits = 0;  faststream >> ixsecunits;   out( "ixsecunits", ixsecunits );
+  std::string label[5];   
+
+  for ( int i=0 ; i<5 ; i++ ) { faststream >> label[i]; out( "label", label[i] ); } 
+
+  std::string docstring;
+  for ( int i=0 ; i<5 ; i++ ) if ( label[i]!="-" ) docstring += label[i] + " ";
+
+  long long iproc;       faststream >> iproc;    out( "iproc", iproc );
+  long long ialgo;       faststream >> ialgo;    out( "ialgo", ialgo );
+  double    jetres1;     faststream >> jetres1;  out( "jetres1", jetres1 );
+  double    jetres2;     faststream >> jetres2;  out( "jetres2", jetres2 );
+  long long Norder = 0;  faststream >> Norder;   out( "Norder", Norder );
+
+  std::vector<int> Npow(Norder); 
+  for ( int i=0 ; i<Norder ; i++ ) { faststream >> Npow[i]; out( "\t", Npow[i] ); }
+
+  std::vector<std::string> powlabel(Norder);
+  std::vector<std::string> codelabel(Norder);
+  for ( int i=0 ; i<Norder ; i++ ) { 
+    faststream >> powlabel[i];   out( "\tpowlabel:",  powlabel[i] ); 
+    faststream >> codelabel[i];  out( "\tcodelabel:", codelabel[i] ); 
+  }
+
+  faststream >> dummy;  
+
+  std::vector<long long> Nevt(Norder);   
+ 
+  for ( int i=0 ; i<Norder ; i++ ) { faststream >> Nevt[i]; out( "\t:", Nevt[i] ); }
+
+  int Nxtot    = 0;   faststream >> Nxtot;     out( "Nxtot", Nxtot );
+  int ixscheme = 0;   faststream >> ixscheme;  out( "scheme", ixscheme );
+  int ipdfwgt  = 0;   faststream >> ipdfwgt;   out( "ipdfwgt", ipdfwgt );
+  int iref     = 0;   faststream >> iref;      out( "iref", iref );
+
+  faststream >> dummy; 
+
+  int Nbintot = 0;     faststream >> Nbintot;     out( "Nbintot", Nbintot );
+  int Ndimension = 0;  faststream >> Ndimension;  out( "Ndimension", Ndimension );
+
+  std::vector<std::string> dimlabel(Ndimension);
+  for ( int i=0 ; i<Ndimension ; i++ ) { faststream >> dimlabel[i]; out( "dimlabel: ", dimlabel[i] ); }
+
+  int Nrapidity = 0;  faststream >> Nrapidity; out( "Nrapidity", Nrapidity );
+
+  // rapidity bins
+
+  std::vector<double> rapbin(Nrapidity+1);
+  for ( int i=0 ; i<Nrapidity+1 ; i++ ) { faststream >> rapbin[i]; out( "\trapbin:", rapbin[i] ); }
+
+  std::vector<int>  Npt(Nrapidity); out( "\nNpt:", Nrapidity );
+  for ( int i=0 ; i<Nrapidity ; i++ ) {  faststream >> Npt[i]; out( "\tNpt:", Npt[i] );  }
+
+
+  // pt bins   
+  std::vector<std::vector<double> > ptbin(Nrapidity);
+  for ( int i=0 ; i<Nrapidity ; i++ ) {
+    ptbin[i] = std::vector<double>(Npt[i]+1);
+    for ( int j=0 ; j<Npt[i]+1 ; j++ )  { 
+      faststream >> ptbin[i][j]; 
+      //      std::cout << "\t\tptbin " << ptbin[i][j] << std::endl;
+    }
+  }
+
+  faststream >> dummy; 
+  
+  // xlimits 
+  std::vector<std::vector<double> > xlimit(Nrapidity);
+  for ( int i=0 ; i<Nrapidity ; i++ ) {
+    xlimit[i]=std::vector<double>(Npt[i]);
+    for ( int j=0 ; j<Npt[i] ; j++ ) faststream >> xlimit[i][j]; 
+  }
+
+  
+  faststream >> dummy; 
+
+  std::string scalelabel;  faststream >> scalelabel; out( "scalelabel: ", scalelabel );  
+  int Nscalebin = 0;       faststream >> Nscalebin;  out( "Nscalebin: ", Nscalebin );  
+
+  std::vector<std::vector<std::vector<double> > > murval(Nrapidity);
+  //  murval.reserve(Nrapidity);
+  for ( int i=0 ; i<Nrapidity ; i++ ) {
+    murval[i] = std::vector<std::vector<double> >(Npt[i]);
+    for ( int j=0 ; j<Npt[i] ; j++ ) { 
+      murval[i][j]=std::vector<double>(Nscalebin);
+      //      std::cout << "rapidity bin " << i << "\n\tpt " << j << "\tNscalebin " << Nscalebin << "\n\t";
+      for ( int k=0 ; k<Nscalebin ; k++ ) { 
+	faststream >> murval[i][j][k];
+	//	std::cout << "\t" << murval[i][j][k];
+      }
+      //      std::cout << std::endl;
+    }
+  }
+  
+  faststream >> dummy; 
+
+  std::vector<std::vector<std::vector<double> > > mufval(Nrapidity);
+  for ( int i=0 ; i<Nrapidity ; i++ ) {
+    mufval[i] = std::vector<std::vector<double> >(Npt[i]);
+    for ( int j=0 ; j<Npt[i] ; j++ ) { 
+      mufval[i][j] = std::vector<double>(Nscalebin);
+      for ( int k=0 ; k<Nscalebin ; k++ ) faststream >> mufval[i][j][k];
+    }
+  }
+  
+  faststream >> dummy; 
+
+  int Nscalevar = 0;  faststream >> Nscalevar;  out( "Nscalevar: ", Nscalevar );
+
+  std::vector<double> murscale(Nscalevar);
+  for ( int i=0 ; i<Nscalevar ; i++ ) { faststream >> murscale[i]; out( "mur: ", murscale[i] ); }
+
+  std::vector<double> mufscale(Nscalevar);
+  for ( int i=0 ; i<Nscalevar ; i++ ) { faststream >> mufscale[i]; out ( "muf: ", mufscale[i] ); }
+
+  faststream >> dummy;  
+  
+
+  int Nxsum = 0;
+  int Nsubproc = 0;
+
+  std::string pdfname; 
+  if ( ireaction==1 ) pdfname = "dis";
+  if ( ireaction==2 ) pdfname = "nlojet";
+  if ( ireaction==3 ) pdfname = "nlojetpp";
+ 
+  if ( (ireaction==2) || (ireaction==3) ) { //  pp or ppbar
+    Nxsum = (Nxtot*Nxtot+Nxtot)/2;
+    Nsubproc = 7;
+  }
+  else { 
+    if (ireaction==1) { //  ! DIS
+      Nxsum = Nxtot;
+      Nsubproc = 3;
+    }
+  }
+
+
+  //  appl::grid** m_grid = new appl::grid*[Nrapidity];
+  m_grid = std::vector<appl::grid*>(Nrapidity);
+
+  // need to implement other schemes if need be...
+  std::string transform;
+  std::string qtransform = "h0";
+
+
+  //  static double (*fy)(double x) = NULL;
+  //  static double (*fx)(double x) = NULL;
+
+  //  appl::igrid::transform_t fx = 0;
+  //  appl::igrid::transform_t fy = 0;
+
+  out( "scheme: ", ixscheme ); 
+
+  switch ( ixscheme ) { 
+  case 1:  
+    // dis
+    transform = "f4";
+    //   fy = &appl::igrid::_fy4;
+    //   fx = &appl::igrid::_fx4;
+    break;
+  case 2: 
+    // pp and ppbar  
+    transform = "f3";
+    //   fy = appl::igrid::_fy3;
+    //   fx = appl::igrid::_fx3;
+    break;
+  default:
+    std::cerr << "fastnlo::fastnlo() transform not defined" << std::endl;
+    return;
+  }
+
+  out( "tranform:  ",  transform ); 
+  out( "qtranform: ", qtransform ); 
+
+
+  bool DISgrid = false;
+  if ( ireaction==1 )  DISgrid = true;
+ 
+
+  out("Nxsum ", Nxsum );
+  out("Nsubproc ", Nsubproc );
+
+
+  out( "Nrapidity: ", Nrapidity );
+  
+  int Nbins = 0;
+  for ( int i=0 ; i<Nrapidity ; i++ ) Nbins += Npt[i];
+  
+  std::vector<  std::vector<  std::vector<  std::vector<  std::vector<double> > > > > array(Nbins);
+
+
+  int ibin = 0;
+  for ( int irap=0 ; irap<Nrapidity ; irap++ ) { 
+    // out( "Npt: ", Npt[irap] );
+    for ( int ipt=0 ; ipt<Npt[irap] ; ipt++ ) {
+      // out( "Nxsum: ", Nxsum );
+      
+      array[ibin] = std::vector<  std::vector<  std::vector<  std::vector<double> > > >(Nxsum);
+
+      int ix = 0;
+      for ( int ix1=0 ; ix1<Nxtot ; ix1++ ) {
+
+	int ix2max = ix1;
+	if ( ireaction==1 ) ix2max=0;
+ 
+	for ( int ix2=0 ; ix2<=ix2max ; ix2++ ) { 
+	  
+	  // out( "Nsubproc: ", Nsubproc );
+
+	  array[ibin][ix] = std::vector<  std::vector<  std::vector<double> > >(Nsubproc);
+      
+	  for ( int isub=0 ; isub<Nsubproc ; isub++ ) {
+	    long long ns=1+Nscalevar*(Norder-1);
+	    //	  out( "Nscales: ", ns );
+	    
+	    array[ibin][ix][isub] = std::vector<  std::vector<double> >(ns);
+	    
+	    for ( int iscale=0 ; iscale<ns ; iscale++ ) { 
+	      // out ( "Nscalebin: ", Nscalebin );
+	      
+	      array[ibin][ix][isub][iscale] = std::vector<double>(Nscalebin);
+	      
+	      for ( int i=0 ; i<Nscalebin ; i++ ) { 
+		faststream >> array[ibin][ix][isub][iscale][i];
+
+#if 0
+		std::cout << "array " 
+			  << ibin << " " << ix << " " << isub << " " << iscale << " " << i << " " 
+			  << " : " << array[ibin][ix][isub][iscale][i] << std::endl;
+	
+#endif	
+
+	      }
+	    }
+	  }
+
+	  ix++;
+	}
+      }
+      ibin++;
+    }
+  }
+    
+  faststream >> dummy; 
+
+
+  std::cout << "making grids..." << std::endl;
+  
+  // get the central bin since we use our own scale variations
+  // we don't have seperate grids for each different scale
+  
+  int icentral = 0;
+  for ( int imu=0 ; imu<Nscalevar ; imu++ ) { 
+    if ( std::fabs(murscale[imu]-1)<1e-5 ) icentral = imu;
+  } 
+  
+  
+  int iposition[3] = { 0, 1+icentral, 1+icentral+Nscalevar };
+  ibin = 0;
+  
+  /// create dummy igrid, purely so we can call the fx and fy functions! I ask you! 
+  appl::igrid grid_dummy(   5, 10, 100, 0,   5, 0.01, 0.1, 0,  transform, qtransform, Nsubproc, DISgrid );
+
+  
+  for ( int irap=0 ; irap<Nrapidity ; irap++ ) { 
+
+    //    std::cout << "rap " << irap << std::endl;
+    
+    std::vector<double> ptlims;
+    
+    for ( int ipt=0 ; ipt<Npt[irap]+1 ; ipt++ ) {
+      ptlims.push_back(ptbin[irap][ipt]);
+    }
+
+    std::stringstream ss;
+    ss << " - bin " << irap;
+    std::string _docstring = docstring + ss.str(); 
+       
+    m_grid[irap] = new appl::grid( ptlims, pdfname, Npow[0], Npow.back()-Npow[0], transform, qtransform );
+    m_grid[irap]->symmetrise(true);
+    m_grid[irap]->setCMSScale(Ecms);
+    m_grid[irap]->setDocumentation(_docstring);
+    //   m_grid[irap]->disable_threads(true);
+
+    std::cout << "reading fastnlo grid: " << _docstring << std::endl; 
+
+    for ( int ipt=0 ; ipt<Npt[irap] ; ipt++ ) {
+      
+      // need this width, since the grid is already divided by it, 
+      // so when the applgrid normalises to this, it will be wrong.
+      double width = ptbin[irap][ipt+1]-ptbin[irap][ipt];
+
+      //      std::cout << "width " << ipt << "\t" << width << std::endl;
+
+      // need to generalise this??
+      double hylim = -grid_dummy.fy( xlimit[irap][ipt] );
+      
+      double hyl = hylim;
+      double hyu = hylim/Nxtot;
+      double xl = grid_dummy.fx(-hyl);
+      double xu = grid_dummy.fx(-hyu);
+      
+      //      std::cout << "ipt " << ipt << "\txl " << xl << "\txu " << xu << std::endl; 
+      
+      // this has made the x values - only need to make the limits, 
+      // using appropriate fx/fy transforms 
+      
+      for ( int iord=0 ; iord<Norder ; iord++ ) { 
+
+	//	std::cout << "Rap " << irap << "  pt " << ipt << "  order " << iord << std::endl;
+
+	double Q2min = murval[irap][ipt][0];            Q2min *= Q2min;
+	double Q2max = murval[irap][ipt][Nscalebin-1];  Q2max *= Q2max;
+
+#if 0
+	std::cout << "Q2 " << Q2min 
+		  << "\t"  << Q2max 
+		  << "\tNscale "  << Nscalebin 
+		  << "\tDISgrid " << DISgrid << std::endl;  
+#endif
+
+	appl::igrid* g = new appl::igrid( Nscalebin, Q2min, Q2max, 0, 
+					  Nxtot, xl, xu, 0, 
+					  transform, qtransform, Nsubproc, DISgrid );
+	
+	g->symmetrise(true);
+	if ( ipdfwgt ) g->reweight(true);
+
+	m_grid[irap]->add_igrid(ipt, iord, g);
+
+	//	std::cout << "grid ";
+	for ( int isub=0 ; isub<Nsubproc ; isub++ ) { 
+	  
+	  
+	  SparseMatrix3d& sgrid = *g->weightgrid()[isub];
+	  
+	  
+ 	  // now need to fill the grid;
+	  
+	  int ix=0;
+	  
+	  //	  int NQ  = g->weightgrid()[isub]->Nx();
+	  int Nx1 = g->weightgrid()[isub]->Ny();
+	  int Nx2 = g->weightgrid()[isub]->Nz();
+	  if ( DISgrid ) Nx2 = 1;
+
+	  // std::cout << "ord " << iord << "\tipos " << iposition[iord] << std::endl; 
+	  
+	  for ( int ix1=0 ; ix1<Nxtot ; ix1++ ) { 
+
+	    int ix2max = ix1;
+	    if ( ireaction==1 ) ix2max=0;
+
+	    for ( int ix2=0 ; ix2<=ix2max ; ix2++ ) { 
+	      for ( int iQbin=0 ; iQbin<Nscalebin ; iQbin++ ) {       
+		// leading order
+		//		double d = array[ibin][ix][isub][iposition[iord]][iQbin];
+		double d = array[ibin][ix][isub][iposition[iord]][iQbin];
+#if 0
+		std::cout << "\taray[" << ibin << "][" << ix << "][" << isub << "][" << iposition[iord] << "]" << std::endl;
+		std::cout << "  Q " << iQbin << "  x1 " <<  ix1 << "  x1 " << ix2 << "  ix " << ix 
+			  << "  g " << d << std::endl;
+		
+		std::cout << "array " 
+			  << ibin << " " << ix << " " << isub << " " << iposition[iord] << " " << iQbin << " " 
+			  << " : " << array[ibin][ix][isub][iposition[iord]][iQbin] << std::endl;
+#endif
+		
+		sgrid(iQbin, Nx1-1-ix1, Nx2-1-ix2) = d*width;
+	      }
+	      ix++;
+	    }
+
+	  }
+	  // loops over x1, x2 and Q2 bins
+	  
+	} // Nsubproc
+
+	g->setlimits();
+
+      }
+
+
+      ibin++;	
+      
+    }
+  }
+  
+}
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/fappl_grid.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/fappl_grid.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/fappl_grid.cxx	(revision 2010)
@@ -0,0 +1,443 @@
+/**
+ **     @file    fappl_grid.cxx
+ **
+ **     @brief   fortran callable wrapper functions for the c++  
+ **              appl grid project.
+ **
+ **     @author  mark sutton
+ **     @date    Wed May 21 14:31:36 CEST 2008 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: fappl.cxx, v1.0   Wed May 21 14:31:36 CEST 2008 sutt $
+ **
+ **/
+
+#include <map>
+#include <iostream>
+#include <sstream>
+
+#include "appl_grid/appl_grid.h"
+#include "appl_grid/fastnlo.h"
+
+
+#include "fappl_grid.h"
+
+/// externally defined alpha_s and pdf routines for fortran 
+/// callable convolution wrapper
+extern "C" double fnalphas_(const double& Q); 
+extern "C" void   fnpdf_(const double& x, const double& Q, double* f);
+
+
+
+static int idcounter = 0;
+static std::map<int,appl::grid*> _grid;
+
+
+
+void throw_exception( const std::string& msg, int id, const std::string& s="" ) {  
+    std::stringstream s_;
+    s_ << msg << id << s;
+    throw appl::grid::exception( s_.str() );
+}
+
+
+/// grid std::map management
+
+void ngrids_(int& n) { n=_grid.size(); }
+
+void gridids_(int* ids) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.begin();
+  for ( int i=0 ; gitr!=_grid.end() ; gitr++, i++ ) ids[i] = gitr->first;
+}
+
+
+
+void bookgrid_(int& id, const int& Nobs, const double* binlims) 
+{
+  id = idcounter++;
+
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+
+  if ( gitr==_grid.end() ) {
+    std::cout << "bookgrid_() creating grid with id " << id << std::endl; 
+    _grid.insert(  std::map<int,appl::grid*>::value_type( id, new appl::grid( Nobs, binlims,
+									      2,    10, 1000, 1,
+									      12,  1e-5, 1, 3, 
+									      "nlojet", 1, 3, "f3") ) ) ;									 
+    //  _grid->symmetrise(true);
+  }
+  else throw_exception( "grid with id ", id, " already exists" );
+}
+
+
+
+void readgrid_( int& id, const char* _s, int _len) {
+  std::string s(_s,0,_len);
+  id = idcounter++;
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr==_grid.end() ) { 
+    appl::grid* g = new appl::grid(s);
+    std::cout << "readgrid: " << id << " " << g->getDocumentation() << std::endl;
+    _grid.insert(  std::map<int,appl::grid*>::value_type( id, g ) ); 
+  }
+  else throw_exception( "grid with id ", id, " already exists" ); 
+}
+
+
+  
+void printgrid_(const int& id) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    std::cout << "grid id " << id << "\n" << *gitr->second << std::endl;
+  }
+  else throw_exception( "No grid with id ", id ); 
+}
+
+
+void printgrids_() { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.begin();
+  for ( ; gitr!=_grid.end() ; gitr++ ) { 
+    std::cout << "grid id " << gitr->first << "\n" << *gitr->second << std::endl;
+  }
+}
+
+  
+void printgriddoc_(const int& id) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    std::cout << "grid id " << id << "\n" << gitr->second->getDocumentation() << std::endl;
+  }
+  else throw_exception( "No grid with id ", id ); 
+}
+
+
+void releasegrid_(const int& id) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() )     { 
+    delete gitr->second; 
+    _grid.erase(gitr);
+  }
+  else throw_exception( "No grid with id ", id ); 
+}
+
+
+void releasegrids_() { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.begin();
+  for ( ; gitr!=_grid.end() ; gitr++ ) { 
+    delete gitr->second; 
+    _grid.erase(gitr);
+  }
+}
+
+void setckm_( const int& id, const double* ckm ) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) {
+    gitr->second->setckm( ckm );
+  }
+  else  throw_exception( "No grid with id ", id );  
+}
+
+
+void getckm_( const int& id, double* ckm ) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    std::vector<std::vector<double> > __ckm = gitr->second->getckm();
+    for ( unsigned i=0 ; i<__ckm.size() ; i++ ) {
+      for ( unsigned j=0 ; j<__ckm[i].size() ; j++ ) {
+	ckm[i*3+j] = __ckm[i][j];
+      }
+    }
+  }
+  else  throw_exception( "No grid with id ", id );  
+}
+
+
+
+void redefine_(const int& id, 
+	       const int& iobs, const int& iorder, 
+	       const int& NQ2,  const double& Q2min, const double& Q2max, 
+	       const int& Nx,   const double&  xmin, const double&  xmax) 
+{
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) {
+    gitr->second->redefine(iobs, iorder, 
+			   NQ2, Q2min, Q2max, 
+			   Nx,   xmin,  xmax); 
+  }
+  else  throw_exception( "No grid with id ", id );    
+} 
+
+
+
+void getreference_(const int& id, double* data, double* dataerror ) {
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr==_grid.end() ) throw_exception( "No grid with id ", id );  
+  appl::TH1D* h = gitr->second->getReference();
+  for ( size_t i=0 ; i<h->size() ; i++ ) { 
+    data[i]      = h->y(i);
+    dataerror[i] = h->ye(i);
+  }
+}
+
+
+void getfracerror_(const int& id, double* f ) {
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr==_grid.end() ) throw_exception( "No grid with id ", id );  
+  appl::TH1D* h = gitr->second->getReference();
+  for ( size_t i=0 ; i<h->size() ; i++ ) { 
+    if ( h->y(i)!=0 ) f[i] = h->ye(i)/h->y(i);
+  }
+}
+
+
+int getnbins_(const int& id) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr==_grid.end() ) throw_exception( "No grid with id ", id );  
+  return gitr->second->Nobs();
+}
+
+int getbinnumber_(const int& id, double& x) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr==_grid.end() ) throw_exception( "No grid with id ", id );  
+  return gitr->second->obsbin(x);
+}
+
+double getbinlowedge_(const int& id, int& bin) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr==_grid.end() ) throw_exception( "No grid with id ", id );  
+  return gitr->second->obslow(bin);
+}
+
+double getbinwidth_(const int& id, const int& bin) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr==_grid.end() ) throw_exception( "No grid with id ", id );
+  return gitr->second->deltaobs(bin);
+}
+
+
+
+
+void convolute_(const int& id, double* data) { 
+  convolutewrap_(id, data, fnpdf_, fnalphas_); 
+}
+
+
+void convolutewrap_(const int& id, double* data, 
+		    void (*pdf)(const double& , const double&, double* ),  
+		    double (*alphas)(const double& ) ) {  
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    appl::grid*    g = gitr->second;
+    std::vector<double> v = g->vconvolute( pdf, alphas);
+    for ( unsigned i=0 ; i<v.size() ; i++ ) data[i] = v[i];      
+  }
+  else throw_exception( "No grid with id ", id );  
+}
+
+
+
+
+/// for the pdf of an anti-particle given a pdf
+void (*_pdf)(const double& x, const double& Q, double* f) = 0;
+
+extern "C" void  antipdf(const double& x, const double& Q, double* f) { 
+  if ( _pdf!=0 ) { 
+    double xf[13];
+    _pdf( x, Q, xf );
+    for ( int i=0 ; i<13 ; i++ ) f[i] = xf[12-i]; 
+  }
+}
+
+
+
+void convoluteppbar_(const int& id, double* data) { 
+  convoluteppbarwrap_(id, data, fnpdf_, fnalphas_); 
+}
+
+
+void convoluteppbarwrap_(const int& id, double* data, 
+			 void (*pdf)(const double& , const double&, double* ),  
+			 double (*alphas)(const double& ) ) {  
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    _pdf = pdf;
+    appl::grid* g = gitr->second;
+    std::vector<double> v = g->vconvolute( pdf, antipdf, alphas, g->nloops() );
+    for ( unsigned i=0 ; i<v.size() ; i++ ) data[i] = v[i];
+    _pdf = 0;
+  }
+  else throw_exception( "No grid with id ", id );  
+}
+
+
+
+
+void convoluteorder_(const int& id, const int& nloops, double* data) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    appl::grid*    g = gitr->second;
+    std::vector<double> v = g->vconvolute(fnpdf_, fnalphas_, nloops);
+    for ( unsigned i=0 ; i<v.size() ; i++ ) data[i] = v[i];      
+  }
+  else throw_exception( "No grid with id ", id );  
+}
+
+
+
+
+void fullconvolutewrap_(const int& id, double* data, 
+			void (*pdf)(const double& , const double&, double* ),  
+			double (*alphas)(const double& ),
+			const int& nloops,
+			const double& rscale, const double& fscale  ) {  
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    appl::grid*    g = gitr->second;
+    std::vector<double> v = g->vconvolute( pdf, alphas, nloops, rscale, fscale);
+    for ( unsigned i=0 ; i<v.size() ; i++ ) data[i] = v[i];      
+  }
+  else throw_exception( "No grid with id ", id );  
+}
+
+
+void fullconvolute_(const int& id, double* data, 
+		    const int& nloops,
+		    const double& rscale, const double& fscale  ) {  
+  fullconvolutewrap_( id, data, fnpdf_, fnalphas_, nloops, rscale, fscale);
+}
+
+
+
+/// convolute functions with beam energy scaling 
+
+void escaleconvolute_(const int& id, double* data, const double& Escale) { 
+  escaleconvolutewrap_(id, data, fnpdf_, fnalphas_, Escale); 
+}
+
+void escaleconvolutewrap_(const int& id, double* data, 
+			  void (*pdf)(const double& , const double&, double* ),  
+			  double (*alphas)(const double& ), 
+			  const double& Escale ) {  
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    appl::grid*    g = gitr->second;
+    std::vector<double> v = g->vconvolute( pdf, alphas, g->nloops(), 1, 1, Escale );
+    for ( unsigned i=0 ; i<v.size() ; i++ ) data[i] = v[i];      
+  }
+  else throw_exception( "No grid with id ", id );  
+}
+
+
+
+void escalefullconvolutewrap_(const int& id, double* data, 
+			      void (*pdf)(const double& , const double&, double* ),  
+			      double (*alphas)(const double& ),
+			      const int& nloops,
+			      const double& rscale, const double& fscale,
+			      const double& Escale ) {  
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    appl::grid*    g = gitr->second;
+    std::vector<double> v = g->vconvolute( pdf, alphas, nloops, rscale, fscale, Escale);
+    for ( unsigned i=0 ; i<v.size() ; i++ ) data[i] = v[i];      
+  }
+  else throw_exception( "No grid with id ", id );  
+}
+
+
+void escalefullconvolute_(const int& id, double* data, 
+			  const int& nloops,
+			  const double& rscale, const double& fscale,
+			  const double& Escale ) {  
+  escalefullconvolutewrap_(id, data, 
+			   fnpdf_, fnalphas_, nloops, rscale, fscale, Escale );
+  
+}
+
+
+void writegrid_(const int& id, const char* _s, int _len) {
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    std::string s(_s,0,_len);
+    std::cout << "writegrid_() writing " << s << "\tid " << id << std::endl;
+    appl::grid* g = gitr->second;
+    g->trim();
+    //   g->print();
+    g->Write(s);
+  }
+  else throw_exception( "No grid with id ", id );  
+}
+
+
+
+void fillgrid_(const int& id, 
+	       const int& ix1, const int& ix2, const int& iQ,  
+	       const int& iobs, 
+	       const double* w, 
+	       const int& iorder ) { 
+  //  std::cout << "ix " << ix1 << " " << ix2 << "  iQ" << iQ << " " << iobs << "  " << iorder << std::endl;  
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    gitr->second->fill_index(ix1, ix2, iQ, iobs, w, iorder);
+  }  
+  else throw_exception( "No grid with id ", id );  
+}
+
+
+void readfastnlogrids_( int* ids, const char* _s, int _len ) { 
+
+  std::string s(_s,0,_len);
+
+  /// create the fastnlo grids
+  fastnlo f(s);
+
+  /// don't want the grids managed by the fastnlo object, 
+  /// manage them in fortran with the std::map
+  f.manageGrids(false);
+
+  ///copy to the fortran accessible grid std::map
+  std::vector<appl::grid*> grids = f.grids();
+
+  //  std::cout << "hooray!" << std::endl;
+  
+  for ( unsigned i=0 ; i<grids.size() ; i++ ) { 
+    int id = idcounter++;
+    std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+    if ( gitr==_grid.end() )  { 
+      _grid.insert(  std::map<int,appl::grid*>::value_type( id, grids[i] ) );
+      // std::cout << grids[i]->getDocumentation() << std::endl;
+    }
+    else throw_exception( "grid with id ", id, " already exists" );  
+
+    ids[i] = id;
+  }  
+
+}
+
+
+void getrun_( const int& id, double& run ) { 
+  std::map<int,appl::grid*>::iterator gitr = _grid.find(id);
+  if ( gitr!=_grid.end() ) { 
+    appl::grid*    g = gitr->second;
+    run = g->run();
+  }
+  else throw_exception( "No grid with id ", id );  
+}
+
+
+void lockckm_( const int& i ) { 
+  if ( i==0 ) { 
+    std::cout << "enable ckm overwrites" << std::endl;
+    appl::appl_pdf::overwrites( true );
+  }
+  else { 
+    std::cout << "disable ckm overrites\n" << std::endl;
+    appl::appl_pdf::overwrites( false );
+  }
+
+}
+
+  
+
+
Index: applgrid/tags/applgrid-01-06-36/src/hoppet_init.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/hoppet_init.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/hoppet_init.h	(revision 2010)
@@ -0,0 +1,57 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    hoppet_init.h
+ **
+ **     @author  mark sutton
+ **     @date    Sat 26 Sep 2009 13:24:26 BST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: hoppet_init.h, v0.0   Sat 26 Sep 2009 13:24:26 BST sutt $
+ **
+ **/
+
+
+#ifndef __HOPPETINIT_H
+#define __HOPPETINIT_H
+
+#include <iostream>
+#include <vector>
+
+namespace appl { 
+
+class hoppet_init : public std::vector<double> {
+
+public:
+
+  /// default maximum scale for hoppet initialisation
+  hoppet_init( double _Qmax=15000, double _ymax=12 );
+
+  virtual ~hoppet_init();
+
+  void fillCache( void (*pdf)(const double&, const double&, double* )  );
+  
+  bool compareCache( void (*pdf)(const double&, const double&, double* )  );
+
+  static void assign( void (*pdf)(const double&, const double&, double* )  );
+
+};
+
+}
+
+inline std::ostream& operator<<( std::ostream& s, const appl::hoppet_init& _h ) { 
+  return s << "appl::hoppet_init size: " << _h.size();
+}
+
+
+#endif  // __HOPPETINIT_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/dis_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/dis_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/dis_pdf.cxx	(revision 2010)
@@ -0,0 +1,29 @@
+/**
+ **     @file    dis_pdf.cxx
+ **
+ **     @brief   pdf transform function for nlojet                   
+ **
+ **     @author  mark sutton
+ **     @date    Sat 10 11:27:04 GMT 2009 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: dis_pdf.cxx, v1.0   Sat 10 11:27:04 GMT 2009 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "dis_pdf.h"
+
+
+extern "C" void fdis_pdf__(const double* fA, const double* fB, double* H) { 
+  static dis_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/mcfmzjet_pdf.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/mcfmzjet_pdf.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/mcfmzjet_pdf.cxx	(revision 2010)
@@ -0,0 +1,27 @@
+/**
+ **     @file    mcfmzjet_pdf.cxx
+ **
+ **     @brief   pdf transform functions for z production                  
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: mcfmz_pdf.cxx, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#include "appl_grid/appl_pdf.h" 
+
+#include "mcfmzjet_pdf.h"
+
+
+// fortran callable wrapper
+extern "C" void fmcfmzjet_pdf__(const double* fA, const double* fB, double* H) { 
+  static mcfmzjet_pdf pdf;
+  pdf.evaluate(fA, fB, H);
+}
+
Index: applgrid/tags/applgrid-01-06-36/src/Sparse3d.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/Sparse3d.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/Sparse3d.h	(revision 2010)
@@ -0,0 +1,84 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    Sparse3d.h        
+ **
+ **     @brief   This uses the root TSparse class
+ ** 
+ **              NB: THIS SHOULD NOT BE USED - it is included  
+ **                  only to allow benchmarking against the 
+ **                  root class
+ **
+ **     @author  mark sutton
+ **     @date    Thu Nov 29 19:28:51 CET 2007 sutt                
+ ** 
+ **     @copyright (C) 2007-2019 M.Sutton (sutt ! cern.ch)    
+ **
+ **     $Id: Sparse3d.h, v0.0   Thu Nov 29 19:28:51 CET 2007 sutt $
+ **/
+
+#ifndef __SPARSE3D_H
+#define __SPARSE3D_H
+
+#ifdef USEROOT
+
+#include <iostream>
+
+
+#include "TMatrixDSparse.h"
+#include "tsparse1d.h"
+#include "axis.h"
+
+
+class Sparse3d : public tsparse_base {
+
+public:
+
+  Sparse3d(int nx, double lx, double ux, 
+	   int ny, double ly, double uy, 
+	   int nz, double lz, double uz) :
+    tsparse_base(nx), 
+    mv(m_Nx), 
+    mxaxis(nx, lx, ux), 
+    myaxis(ny, ly, uy), 
+    mzaxis(nz, lz, uz) { 
+    
+    for ( int i=0 ; i<m_Nx ; i++ ) { 
+      mv(i) = new TMatrixDSparse(ny, nz);
+    }
+  }
+
+
+  ~Sparse3d() { 
+    for ( int i=m_ux-m_lx+1 ; i-- ; ) delete mv(i);
+  }
+
+  double& operator()(int i, int j, int k) { 
+    return (*mv(i))(j,k);
+  }
+
+  double operator()(int i, int j, int k) const { 
+    return (*mv(i))(j,k);
+  }
+
+private:
+  
+  tsparse1d<TMatrixDSparse*> mv;
+  
+  axis<double> mxaxis;
+  axis<double> myaxis;
+  axis<double> mzaxis;
+
+};
+
+#endif
+
+#endif  // __SPARSE3D_H 
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/nlojet_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/nlojet_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/nlojet_pdf.h	(revision 2010)
@@ -0,0 +1,70 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    nlojet_pdf.h
+ **
+ **     @brief   pdf transform functions for nlojet                   
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: nlojet_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+#ifndef __NLOJET_PDF_H
+#define __NLOJET_PDF_H
+
+#include "appl_grid/appl_pdf.h" 
+
+
+
+// nlojet pdf combination class 
+
+class nlojet_pdf : public appl::appl_pdf {
+
+public:
+
+  nlojet_pdf() : appl_pdf("nlojet") { m_Nproc=7; } 
+
+  void evaluate(const double* fA, const double* fB, double* H) const;
+
+};  
+
+
+inline void  nlojet_pdf::evaluate(const double* fA, const double* fB, double* H) const {  
+  
+  fA += 6;  // offset internal pointers so can use fA[-6]..fA[6] for simplicity
+  fB += 6; 
+
+  double Q1=0, Q1bar=0, Q2=0, Q2bar=0, D=0, Dbar=0, G1=fA[0], G2=fB[0]; 
+
+  for ( int i=1  ; i<=6  ; i++ ) {  Q1    += fA[i];   Q2    += fB[i]; }
+  for ( int i=-1 ; i>=-6 ; i-- ) {  Q1bar += fA[i];   Q2bar += fB[i]; }  
+
+  // don't include the gluon (fA[0], fB[0])
+  for ( int i=-6 ; i<=6 ; i++ ) if ( i ) D += fA[i]*fB[i];
+
+  for ( int i=1 ; i<=6 ; i++ ) { 
+    Dbar += fA[i]  * fB[-i];  
+    Dbar += fA[-i] * fB[i]; 
+  }
+  
+  H[0] =  G1*G2 ;
+  H[1] =  (Q1+Q1bar)*G2 ;
+  H[2] =  G1*(Q2+Q2bar) ;
+  H[3] =  Q1*Q2+Q1bar*Q2bar-D ;
+  H[4] =  D ; 
+  H[5] =  Dbar ;
+  H[6] =  Q1*Q2bar+Q1bar*Q2-Dbar ;
+}
+
+
+extern "C" void fnlojet_pdf__(const double* fA, const double* fB, double* H);
+
+
+#endif  // __NLOJET_PDF_H
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/appl_igrid.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/appl_igrid.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/appl_igrid.cxx	(revision 2010)
@@ -0,0 +1,2790 @@
+/**
+ **     @file    appl_igrid.cxx
+ **
+ **     @brief   grid class - all the functions needed to create and 
+ **              fill the grid from an NLO calculation program. 
+ **
+ **     @author  mark sutton
+ **     @date    2007/10/16 17:01:39 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: appl_igrid.cxx, v1.00 2007/10/16 17:01:39 sutt $
+ **
+ **/
+
+#include <stdlib.h>
+#include <iostream>
+#include <iomanip>
+#include <cmath>
+
+#include "amconfig.h"
+
+#include "appl_igrid.h"
+#include "appl_grid/appl_grid.h"
+#include "appl_grid/appl_pdf.h"
+#include "appl_grid/appl_timer.h"
+#include "appl_grid/vector_stream.h"
+#include "appl_grid/stream_grid.h"
+
+#include "hoppet_init.h"
+#include "threadManager.h"
+
+#ifdef USEROOT
+#include "TFile.h"
+#include "TObject.h"
+#include "TObjString.h"
+#include "TH3D.h"
+#include "TVectorT.h"
+#include "TFileString.h"
+#endif
+
+#include "Splitting.h"
+
+
+#include "appl_grid/lumi_pdf.h"
+
+#include "appl_grid/stream_vector.h"
+
+// splitting function code
+
+
+// pdf reweighting
+// bool   igrid::m_reweight   = false;
+// bool   igrid::m_symmetrise = false;
+
+// variable tranformation parameters
+double appl::igrid::transvar = 5;
+bool   appl::igrid::threads_disabled = true;
+
+double appl::igrid::lambda = 0.0625;
+
+int appl::igrid::range_count     = 0;
+int appl::igrid::range_count_max = 100;
+
+
+
+#if __cplusplus <= 199711L
+// #warning Not using C++11 compilation - using substitute functions
+std::string std::to_string( int i );
+std::string std::to_string( double f );
+#endif
+
+
+static int ithread = 0;
+
+std::string label( int i ) { 
+  char lab[64];
+  std::sprintf( lab, "thread-%d", i );
+  return lab;
+}
+
+
+double (*fuserscale)( double ) = 0;
+
+
+// static bool dbg = true;
+
+appl::igrid::igrid() : 
+  threadManager( label(ithread++) ),
+  mfy(0),   mfx(0),
+  m_parent(0),
+  m_Ny1(0),   m_y1min(0),   m_y1max(0),   m_deltay1(0),
+  m_Ny2(0),   m_y2min(0),   m_y2max(0),   m_deltay2(0),
+  m_yorder(0),   
+  m_Ntau(0), m_taumin(0), m_taumax(0), m_deltatau(0),   m_tauorder(0), 
+  m_Nproc(0),
+  m_transform(""), 
+  m_qtransform(""), 
+  m_transvar(transvar),
+  m_lambda(lambda),
+  m_reweight(false),
+  m_symmetrise(false),
+  m_optimised(false),
+  m_weight(0),
+  m_fg1(0),     m_fg2(0),
+  m_fsplit1(0), m_fsplit2(0),
+  m_fsplit12(0), m_fsplit22(0),
+  m_alphas(0),
+  m_taufilledmin(-1),  m_taufilledmax(-1),
+  m_y1filledmin(-1),   m_y1filledmax(-1),
+  m_y2filledmin(-1),   m_y2filledmax(-1),
+  m_partons(13)
+{ 
+  init_fmap();
+  //  std::string qtransform = "h0";
+  //  if ( m_lambda==0.25 ) qtransform = "h1";
+  set_transforms( m_transform, mfx, mfy );
+  set_transforms( m_qtransform,  mfQ2, mftau );
+
+  // std::cout << "igrid() (default) Ntau=" << m_Ntau << "\t" << fQ2(m_taumin) << " - " << fQ2(m_taumax) << std::endl;
+
+} 
+
+
+
+
+
+// standard constructor
+appl::igrid::igrid(int NQ2, double Q2min, double Q2max, int Q2order, 
+		   int Nx,  double xmin,  double xmax,  int xorder,  
+		   std::string transform, std::string qtransform,  
+		   int Nproc, bool disflag ):
+  threadManager( label(ithread++) ),
+  mfy(0),   mfx(0),
+  m_parent(0),
+  m_Ny1(Nx),   m_Ny2( disflag ? 1 : Nx ),  m_yorder(xorder), 
+  m_Ntau(NQ2), m_tauorder(Q2order), 
+  m_Nproc(Nproc), 
+  m_transform(transform), 
+  m_qtransform(qtransform), 
+  m_transvar(transvar),
+  m_lambda(lambda),
+  m_reweight(false),
+  m_symmetrise(false), 
+  m_optimised(false),
+  m_weight(0),
+  m_fg1(0),     m_fg2(0),  
+  m_fsplit1(0), m_fsplit2(0),
+  m_fsplit12(0), m_fsplit22(0),
+  m_alphas(0),
+  m_DISgrid(disflag),   
+  m_taufilledmin(-1),  m_taufilledmax(-1),
+  m_y1filledmin(-1),   m_y1filledmax(-1),
+  m_y2filledmin(-1),   m_y2filledmax(-1),
+  m_partons(13)
+{
+  //  std::cout << "igrid::igrid() transform=" << m_transform << std::endl;
+  init_fmap();
+
+  //  std::string qtransform = "h0";
+  //  if ( m_lambda==0.25 ) qtransform = "h1";
+  set_transforms( m_transform,   mfx,  mfy );
+  set_transforms( m_qtransform,  mfQ2, mftau );
+
+
+  double ymin1 = fy(xmax);
+  double ymax1 = fy(xmin);
+
+  m_y1min  = ymin1 ;
+  m_y1max  = ymax1 ;
+  m_y2min  = ymin1 ;
+  m_y2max  = ymax1 ;
+
+  if ( m_DISgrid ) m_y2min = m_y2max = 1;
+
+  if ( m_Ny1>1 ) m_deltay1 = (m_y1max-m_y1min)/(m_Ny1-1);
+  else           m_deltay1 = 0;
+  
+  if ( m_Ny2>1 ) m_deltay2 = (m_y2max-m_y2min)/(m_Ny2-1);
+  else           m_deltay2 = 0;
+  
+  double taumin=ftau(Q2min);
+  double taumax=ftau(Q2max);
+  m_taumin   = taumin;
+  m_taumax   = taumax;
+  if ( m_Ntau>1 ) m_deltatau = (taumax-taumin)/(m_Ntau-1);
+  else            m_deltatau = 0;
+
+  if ( m_Ny1-1<m_yorder ) { 
+    std::cerr << "igrid() not enough nodes for this interpolation order Ny1=" << m_Ny1 
+	      << "\tyorder=" << m_yorder << std::endl;
+ 
+    while ( m_Ny1-1<m_yorder ) m_yorder--;
+  } 
+
+  if ( !m_DISgrid ) { 
+    if ( m_Ny2-1<m_yorder ) { 
+      std::cerr << "igrid() not enough nodes for this interpolation order Ny2=" << m_Ny2 
+		<< "\tyorder=" << m_yorder << std::endl;
+      
+      while ( m_Ny2-1<m_yorder ) m_yorder--;
+    } 
+  }
+  
+  if ( m_Ntau-1<m_tauorder ) { 
+    std::cerr << "igrid() not enough nodes for this interpolation order Ntau=" << m_Ntau 
+	      << "\ttauorder=" << m_tauorder << std::endl;
+    
+    while ( m_Ntau-1<m_tauorder ) m_tauorder--;
+  } 
+  
+  m_weight = new SparseMatrix3d*[m_Nproc];
+  construct();
+
+  if ( !threads_disabled ) this->start_thread();
+}
+
+
+
+
+
+// copy constructor
+appl::igrid::igrid(const appl::igrid& g) :
+  threadManager( label(ithread++) ), 
+  mfy(0),  mfx(0),  
+  m_parent(0),
+  m_Ny1(g.m_Ny1),     
+  m_y1min(g.m_y1min),     m_y1max(g.m_y1max),     m_deltay1(g.m_deltay1),   
+  m_Ny2(g.m_Ny2),     
+  m_y2min(g.m_y2min),     m_y2max(g.m_y2max),     m_deltay2(g.m_deltay2),   
+  m_yorder(g.m_yorder),   
+  m_Ntau(g.m_Ntau), 
+  m_taumin(g.m_taumin), m_taumax(g.m_taumax), m_deltatau(g.m_deltatau), m_tauorder(g.m_tauorder), 
+  m_Nproc(g.m_Nproc),
+  m_transform(g.m_transform), 
+  m_qtransform(g.m_qtransform), 
+  m_transvar(g.m_transvar),
+  m_lambda(g.m_lambda),
+  m_reweight(g.m_reweight),
+  m_symmetrise(g.m_symmetrise),
+  m_optimised(g.m_optimised),
+  m_weight(0),
+  m_fg1(0),     m_fg2(0),
+  m_fsplit1(0), m_fsplit2(0),
+  m_fsplit12(0), m_fsplit22(0),
+  m_alphas(0),
+  m_taufilledmin(g.m_taufilledmin),  m_taufilledmax(g.m_taufilledmax),
+  m_y1filledmin(g.m_y1filledmin),    m_y1filledmax(g.m_y1filledmax),
+  m_y2filledmin(g.m_y2filledmin),    m_y2filledmax(g.m_y2filledmax),
+  m_partons(g.m_partons)
+{
+  init_fmap();
+
+  //  std::string qtransform = "h0";
+  //  if ( m_lambda==0.25 ) qtransform = "h1";
+  set_transforms( m_transform, mfx, mfy );
+  set_transforms( m_qtransform,  mfQ2, mftau );
+
+
+  m_weight = new SparseMatrix3d*[m_Nproc];
+
+
+  for( int ip=0 ; ip<m_Nproc ; ip++ )   m_weight[ip] = new SparseMatrix3d(*g.m_weight[ip]);
+  //  construct();
+
+  if ( !threads_disabled ) this->start_thread();
+}
+
+
+
+
+void _setlimits( int& _min, int& _max, const int _mint, const int _maxt ) {  
+  if ( _mint<=_maxt ) { 
+    if ( _min==-1 || _min>_mint ) _min = _mint;
+    if ( _max==-1 || _max<_maxt ) _max = _maxt;
+  }
+}
+
+
+
+
+#ifdef USEROOT
+// read from a file 
+appl::igrid::igrid(TFile& f, const std::string& s) :
+  threadManager( label(ithread++) ), 
+  mfy(0),  mfx(0),  
+  m_parent(0),
+  m_Ny1(0),   m_y1min(0),   m_y1max(0),   m_deltay1(0),   
+  m_Ny2(0),   m_y2min(0),   m_y2max(0),   m_deltay2(0),   
+  m_yorder(0),   
+  m_Ntau(0), m_taumin(0), m_taumax(0), m_deltatau(0), m_tauorder(0), 
+  m_Nproc(0),
+  m_transform(""), 
+  m_qtransform(""), 
+  m_transvar(transvar),
+  m_lambda(lambda),
+  m_reweight(false),
+  m_symmetrise(false),
+  m_optimised(false),
+  m_weight(0), 
+  m_fg1(0),     m_fg2(0),
+  m_fsplit1(0), m_fsplit2(0),    
+  m_fsplit12(0), m_fsplit22(0),    
+  m_alphas(0), 
+  m_taufilledmin(-1),  m_taufilledmax(-1),
+  m_y1filledmin(-1),   m_y1filledmax(-1),
+  m_y2filledmin(-1),   m_y2filledmax(-1),
+  m_partons(13)
+{ 
+  //  std::cout << "igrid::igrid()" << std::endl;
+  
+  // get the name of the transform pair
+  TFileString _tag = *(TFileString*)f.Get((s+"/Transform").c_str());
+  m_transform = _tag[0];
+
+  TFileString* _qtag = (TFileString*)f.Get((s+"/QTransform").c_str());
+
+  if ( _qtag != 0 ) m_qtransform = _qtag->at(0);
+  else              m_qtransform = "h0";
+
+
+  init_fmap();
+
+  //  std::string qtransform = "h0";
+  //  TFileString* _qtag = (TFileString*)f.Get((s+"/TransformQ").c_str());
+  //  if ( _qtag ) qtransform = (*_qtag)[0];
+  
+  //  if ( m_lambda==0.25 ) qtransform = "h1";
+  set_transforms( m_transform, mfx, mfy );
+  set_transforms( m_qtransform,  mfQ2, mftau );
+
+
+  // delete _transform;
+  
+  // retrieve setup parameters 
+  TVectorT<double>* setup=(TVectorT<double>*)f.Get((s+"/Parameters").c_str());
+  //  f.GetObject((s+"/Parameters").c_str(), setup);
+
+  // NB: round integer variables to nearest integer 
+  //     in case (unlikely) truncation error during 
+  //     conversion to double when they were stored
+  m_Ny1      = int((*setup)(0)+0.5);  
+  m_y1min    = (*setup)(1);
+  m_y1max    = (*setup)(2);
+
+  m_Ny2      = int((*setup)(3)+0.5);  
+  m_y2min    = (*setup)(4);
+  m_y2max    = (*setup)(5);
+
+  m_yorder   = int((*setup)(6)+0.5);
+
+  m_Ntau     = int((*setup)(7)+0.5);
+  m_taumin   = (*setup)(8);
+  m_taumax   = (*setup)(9);
+  m_tauorder = int((*setup)(10)+0.5);
+
+  m_transvar = (*setup)(11);
+
+  m_Nproc    = int((*setup)(12)+0.5);
+
+  m_reweight   = ((*setup)(13)!=0 ? true : false );
+  m_symmetrise = ((*setup)(14)!=0 ? true : false );
+  m_optimised  = ((*setup)(15)!=0 ? true : false );
+  m_DISgrid    =  ( setup->GetNoElements()>16 && (*setup)[16]!=0 ) ;
+
+  m_lambda = (*setup)(17); 
+
+  if ( m_lambda==0 ) m_lambda = lambda;
+
+  delete setup;
+
+  //  std::cout << "igrid::igrid() read setup" << std::endl;
+
+  // create grids
+  m_deltay1   = (m_y1max-m_y1min)/(m_Ny1-1);
+  m_deltay2   = (m_y2max-m_y2min)/(m_Ny2-1);
+
+  m_deltatau = (m_taumax-m_taumin)/(m_Ntau-1);
+  
+  //  int rawsize=0;
+  //  int trimsize=0;
+
+  m_weight = new SparseMatrix3d*[m_Nproc];
+
+  for( int ip=0 ; ip<m_Nproc ; ip++ ) {
+    char _name[128];  sprintf(_name,"/weight[%i]", ip );
+    // get storage histogram
+    TH3D* htmp = (TH3D*)f.Get((s+_name).c_str()); 
+ 
+    //    std::cout << "igrid::igrid() read " << _name << std::endl;
+
+    // create grid
+    m_weight[ip]=new SparseMatrix3d(htmp);
+
+    // save some space
+    m_weight[ip]->trim();
+
+    // delete storage histogram
+    delete htmp;
+
+    // DON'T trim on reading unless the user wants it!!
+    // he can trim himself if need be!! 
+    // rawsize += m_weight[ip]->size();
+    // m_weight[ip]->trim(); // trim the grid and do some book keeping
+    // trimsize += m_weight[ip]->size();
+  }
+
+  /// now calculate the actual limits of this grid
+  
+  //  static double _ctime = 0;
+
+  setlimits();
+
+  if ( !threads_disabled ) this->start_thread();
+
+}
+#endif
+
+
+
+
+
+
+// read from a file 
+appl::igrid::igrid( appl::file& file, const std::string& s) :
+  threadManager( label(ithread++) ), 
+  mfy(0),  mfx(0),  
+  m_parent(0),
+  m_Ny1(0),   m_y1min(0),   m_y1max(0),   m_deltay1(0),   
+  m_Ny2(0),   m_y2min(0),   m_y2max(0),   m_deltay2(0),   
+  m_yorder(0),   
+  m_Ntau(0), m_taumin(0), m_taumax(0), m_deltatau(0), m_tauorder(0), 
+  m_Nproc(0),
+  m_transform(""), 
+  m_qtransform(""), 
+  m_transvar(transvar),
+  m_lambda(lambda),
+  m_reweight(false),
+  m_symmetrise(false),
+  m_optimised(false),
+  m_weight(0), 
+  m_fg1(0),     m_fg2(0),
+  m_fsplit1(0), m_fsplit2(0),    
+  m_fsplit12(0), m_fsplit22(0),    
+  m_alphas(0), 
+  m_taufilledmin(-1),  m_taufilledmax(-1),
+  m_y1filledmin(-1),   m_y1filledmax(-1),
+  m_y2filledmin(-1),   m_y2filledmax(-1),
+  m_partons(13)
+{ 
+  // get the name of the transform pair
+  
+  stream_vector<std::string> tag = file.Read<stream_vector<std::string> >( s+"/Transform" );
+  // stream_vector<std::string> tag; file.Read(tag);
+
+  m_transform = tag[0];
+
+  stream_vector<std::string> qtag = file.Read<stream_vector<std::string> >( s+"/QTransform" );
+  // stream_vector<std::string> qtag; file.Read( qtag );
+
+  if ( qtag.size()>0 ) m_qtransform = qtag.at(0);
+  else                 m_qtransform = "h0";
+
+  init_fmap();
+  
+  //  if ( m_lambda==0.25 ) qtransform = "h1";
+  set_transforms( m_transform,  mfx,  mfy );
+  set_transforms( m_qtransform, mfQ2, mftau );
+
+
+  // delete _transform;
+  
+  // retrieve setup parameters 
+
+  stream_vector<double> setup = file.Read<stream_vector<double> >( s+"/Parameters" );
+  // stream_vector<double> setup; file.Read( setup );
+  
+  // NB: round integer variables to nearest integer 
+  //     in case (unlikely) truncation error during 
+  //     conversion to double when they were stored
+  m_Ny1      = int(setup[0]+0.5);  
+  m_y1min    = setup[1];
+  m_y1max    = setup[2];
+  
+  m_Ny2      = int(setup[3]+0.5);  
+  m_y2min    = setup[4];
+  m_y2max    = setup[5];
+  
+  m_yorder   = int(setup[6]+0.5);
+  
+  m_Ntau     = int(setup[7]+0.5);
+  m_taumin   = setup[8];
+  m_taumax   = setup[9];
+  m_tauorder = int(setup[10]+0.5);
+  
+  m_transvar = setup[11];
+  
+  m_Nproc    = int(setup[12]+0.5);
+  
+  m_reweight   = (setup[13]!=0 ? true : false );
+  m_symmetrise = (setup[14]!=0 ? true : false );
+  m_optimised  = (setup[15]!=0 ? true : false );
+  m_DISgrid    = ( setup.size()>16 && setup[16]!=0 ) ;
+  
+  m_lambda = setup[17]; 
+  
+  if ( m_lambda==0 ) m_lambda = lambda;
+  
+  //  std::cout << "igrid::igrid() read setup" << std::endl;
+  
+  // create grids
+  m_deltay1   = (m_y1max-m_y1min)/(m_Ny1-1);
+  m_deltay2   = (m_y2max-m_y2min)/(m_Ny2-1);
+  
+  m_deltatau = (m_taumax-m_taumin)/(m_Ntau-1);
+  
+  //  int rawsize=0;
+  //  int trimsize=0;
+ 
+  m_weight = new SparseMatrix3d*[m_Nproc];
+  
+  for( int ip=0 ; ip<m_Nproc ; ip++ ) {
+    char _name[128];  sprintf(_name,"%s/weight[%i]", s.c_str(), ip );
+
+    // get storage histogram
+ 
+    stream_grid htmp = file.Read<stream_grid>( _name );
+    // stream_grid htmp; file.Read( htmp );
+
+    // create grid
+    m_weight[ip]=new SparseMatrix3d(htmp);
+
+    // save some space
+    m_weight[ip]->trim();
+
+    // delete storage histogram
+
+    // DON'T trim on reading unless the user wants it!!
+    // he can trim himself if need be!! 
+    // rawsize += m_weight[ip]->size();
+    // m_weight[ip]->trim(); // trim the grid and do some book keeping
+    // trimsize += m_weight[ip]->size();
+  }
+
+  /// now calculate the actual limits of this grid
+  
+  //  static double _ctime = 0;
+
+  setlimits();
+
+  if ( !threads_disabled ) this->start_thread();
+
+
+}
+
+
+
+
+
+
+
+void appl::igrid::setlimits() { 
+  if ( m_weight ) { 
+    //   struct timeval _tstart = appl_timer_start(); 
+    for ( int i=0 ; i<m_Nproc ; i++ ) {
+      const SparseMatrix3d* _weight = m_weight[i];
+      if ( _weight ) { 
+	if ( _weight->empty() ) continue;
+	_setlimits( m_taufilledmin, m_taufilledmax,  _weight->xmin(),  _weight->xmax() ); 
+	_setlimits( m_y1filledmin,  m_y1filledmax,   _weight->ymin(),  _weight->ymax() ); 
+	_setlimits( m_y2filledmin,  m_y2filledmax,   _weight->zmin(),  _weight->zmax() ); 
+      }
+    }
+  }
+}    
+
+
+
+// constructor common internals 
+void appl::igrid::construct() 
+{
+  // Initialize histograms representing the weight grid
+  for( int ip=0 ; ip<m_Nproc ; ip++ ) {
+    m_weight[ip] = new SparseMatrix3d(m_Ntau, m_taumin,   m_taumax,    
+    				      m_Ny1,  m_y1min,    m_y1max, 
+    				      m_Ny2,  m_y2min,    m_y2max ); 
+    
+  }  
+}
+
+
+// destructor
+appl::igrid::~igrid() {
+  deletepdftable();
+  deleteweights();
+}
+
+
+
+
+double*** delete_array( double*** a, unsigned Nx, unsigned Ny ) { 
+  if ( a ) { 
+    for ( unsigned i=0 ; i<Nx ; i++ ) {
+      if ( a[i] ) for ( unsigned j=0 ; j<Ny ; j++ )  delete[] a[i][j];
+      delete[] a[i];
+    }
+    delete[] a;
+  }
+  return 0;
+}
+
+
+// clean up internal pdf table (could be moved to local variable)
+void appl::igrid::deletepdftable() { 
+
+}
+
+
+
+
+
+// delete internal grids
+void appl::igrid::deleteweights() { 
+  if ( m_weight ) { 
+    for ( int ip=0 ; ip<m_Nproc ; ip++ ) if (m_weight[ip]) delete m_weight[ip];
+    delete[] m_weight;
+    // m_weight=0;
+  }
+}
+
+
+
+#ifdef USEROOT
+
+// write to file
+void appl::igrid::write( const std::string& _name ) { 
+  Directory d(_name);
+  d.push();
+
+  // using a TH1D to store the transform pair tag since I don't know how 
+  // to write a TString to a root file
+  // TH1D* _transform = new TH1D("Transform", m_transform.c_str(), 1, 0, 1);
+  //  _transform->Write();
+
+  // write the name of the transform pair
+  TFileString("Transform", m_transform).Write();
+  TFileString("QTransform", m_qtransform).Write();
+
+
+  TVectorT<double>* setup=new TVectorT<double>(20); // a few spare
+
+  (*setup)(0)  = m_Ny1;
+  (*setup)(1)  = m_y1min;
+  (*setup)(2)  = m_y1max;
+
+  (*setup)(3)  = m_Ny2;
+  (*setup)(4)  = m_y2min;
+  (*setup)(5)  = m_y2max;
+
+  (*setup)(6)  = m_yorder;
+
+  (*setup)(7)  = m_Ntau;
+  (*setup)(8)  = m_taumin;
+  (*setup)(9)  = m_taumax;
+  (*setup)(10) = m_tauorder;
+
+  (*setup)(11) = m_transvar;
+
+  (*setup)(12) = m_Nproc;
+ 
+  (*setup)(13) = ( m_reweight   ? 1 : 0 );
+  (*setup)(14) = ( m_symmetrise ? 1 : 0 );
+  (*setup)(15) = ( m_optimised  ? 1 : 0 );
+  (*setup)(16) = ( m_DISgrid    ? 1 : 0 );
+
+  (*setup)(17) = m_lambda;
+
+  setup->Write("Parameters");
+
+  int igridsize     = 0;
+  int igridtrimsize = 0;
+
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) { 
+
+    char hname[128];
+    //    sprintf(hname,"%s[%d]", _name.c_str(), ip);
+    sprintf(hname,"weight[%d]", ip);
+
+    //    int oldsize = m_weight[ip]->size();
+
+    igridsize += m_weight[ip]->size();
+ 
+    // trim it so that it's quicker to copy into the TH3D
+    m_weight[ip]->trim();
+
+    igridtrimsize += m_weight[ip]->size();
+
+    TH3D* h=m_weight[ip]->getTH3D(hname);
+    h->SetDirectory(0);
+    h->Write();
+    delete h; // is this dengerous??? will root try to delete it later? I guess not if we SetDirectory(0)
+  }
+
+#if 0
+  //    std::cout << _name << " trimmed" << std::endl;
+  std::cout << _name << "\tsize=" << igridsize << "\t-> " << igridtrimsize;
+  if ( igridsize ) std::cout << "\t( " << igridtrimsize*100/igridsize << "% )";
+  std::cout << std::endl;
+#endif
+
+  d.pop();
+}
+
+#endif
+
+
+
+
+
+// write to file
+void appl::igrid::write( appl::file& _file, const std::string& _name) { 
+
+  // write the name of the transform pair
+
+  _file.Write( stream_vector<std::string>( _name+"/Transform",  std::vector<std::string>(1,m_transform) ) );
+  _file.Write( stream_vector<std::string>( _name+"/QTransform", std::vector<std::string>(1,m_qtransform) ) );
+
+  std::vector<double> setup(20); // a few spare
+
+  setup[0]  = m_Ny1;
+  setup[1]  = m_y1min;
+  setup[2]  = m_y1max;
+
+  setup[3] = m_Ny2;
+  setup[4]  = m_y2min;
+  setup[5]  = m_y2max;
+
+  setup[6]  = m_yorder;
+
+  setup[7]  = m_Ntau;
+  setup[8]  = m_taumin;
+  setup[9]  = m_taumax;
+  setup[10] = m_tauorder;
+
+  setup[11] = m_transvar;
+
+  setup[12] = m_Nproc;
+ 
+  setup[13] = ( m_reweight   ? 1 : 0 );
+  setup[14] = ( m_symmetrise ? 1 : 0 );
+  setup[15] = ( m_optimised  ? 1 : 0 );
+  setup[16] = ( m_DISgrid    ? 1 : 0 );
+
+  setup[17] = m_lambda;
+
+  _file.Write( stream_vector<double>( _name+"/Parameters", setup ) );
+
+  int igridsize     = 0;
+  int igridtrimsize = 0;
+
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) { 
+
+    char hname[128];
+    //    sprintf(hname,"%s[%d]", _name.c_str(), ip);
+    sprintf(hname,"%s/weight[%d]", _name.c_str(), ip);
+
+    //    int oldsize = m_weight[ip]->size();
+
+    igridsize += m_weight[ip]->size();
+ 
+    // trim it so that it's quicker to copy into the TH3D
+    m_weight[ip]->trim();
+
+    igridtrimsize += m_weight[ip]->size();
+
+    stream_grid* h = m_weight[ip]->get(hname);
+
+    //    std::cout << *h << std::endl;
+
+    
+    _file.Write( *h );
+
+    delete h; 
+  }
+
+#if 0
+  //    std::cout << _name << " trimmed" << std::endl;
+  std::cout << _name << "\tsize=" << igridsize << "\t-> " << igridtrimsize;
+  if ( igridsize ) std::cout << "\t( " << igridtrimsize*100/igridsize << "% )";
+  std::cout << std::endl;
+#endif
+
+  //  d.pop();
+
+
+}
+
+
+
+
+
+void appl::igrid::fill_DIS(const double x1, const double Q2, const double* weight) 
+{  
+
+  // find preferred node for low end of interpolation range
+  int k1=fk1(x1);
+  int k3=fkappa(Q2);
+  
+  double u_y1  = ( fy(x1)-gety1(k1) )/deltay1();
+  double u_tau = ( ftau(Q2)-gettau(k3))/deltatau();
+  
+  double fillweight, fI_factor;
+  
+  // cache the interpolation coefficients so only 
+  // have to calculate each one once 
+  // NB: a (very naughty) hardcoded maximum interpolation order 
+  //     of 16 for code simplicity.
+  double _fI1[16];
+  double _fI3[16];
+  
+  for( int i1=0 ; i1<=m_yorder   ; i1++ )  _fI1[i1] = fI(i1, m_yorder, u_y1);
+  for( int i3=0 ; i3<=m_tauorder ; i3++ )  _fI3[i3] = fI(i3, m_tauorder, u_tau);
+  
+  double invwfun = 1/(weightfun(x1));
+	
+  //  double fI_tmp0 = 0;
+  //  double fI_tmp1 = 0;
+
+  for( int i3=0 ; i3<=m_tauorder ; i3++ ) { // "interpolation" loop in Q2
+
+    //	  (*m_weight[ip])(k3+i3, k1+i1, k2+i2) += fillweight;	  
+
+    //    if ( m_reweight ) fI_tmp0 = _fI3[i3]*invwfun;
+    //    else              fI_tmp0 = _fI3[i3];
+
+    for( int i1=0 ; i1<=m_yorder ; i1++ )   { // "interpolation" loop in x1      
+
+      //      fI_tmp1 = _fI1[i1] * fI_tmp0;
+      
+      fI_factor = _fI1[i1] * _fI3[i3];
+
+      if ( m_reweight ) fI_factor *= invwfun;
+						 						 
+      for( int ip=0 ; ip<m_Nproc ; ip++ ) {
+	  
+	fillweight = weight[ip] * fI_factor;
+
+	// this method only works for grids where the x1, x2 axes are identical 
+	// ranges and binning 
+	//  if ( m_symmetrise ) { 
+	//	// symmetrise the grid
+	//    if ( abs(k1+i1)<abs(k2+i2) )  (*m_weight[ip])(k3+i3, k2+i2, k1+i1) += fillweight;   
+	//    else                          (*m_weight[ip])(k3+i3, k1+i1, k2+i2) += fillweight;  
+	//  } 
+	
+	//	  std::cout << " (" << (*m_weight[ip])(k3+i3, k1+i1, k2+i2) << " -> ";
+	
+	(*m_weight[ip])(k3+i3, k1+i1, 0) += fillweight;	  
+	
+	//	  std::cout << (*m_weight[ip])(k3+i3, k1+i1, k2+i2) << ")";
+	
+	//	  m_weight[ip]->print();	  
+	//	  m_weight[ip]->fill_fast(k3+i3, k1+i1, k2+i2) += fillweight/wfun;	  
+      }
+      
+    }
+  }
+}
+
+
+
+
+
+void appl::igrid::fill(const double x1, const double x2, const double Q2, const double* weight) 
+{  
+  if ( isDISgrid() ) return fill_DIS( x1, Q2, weight );
+
+  // find preferred node for low end of interpolation range
+  int k1=fk1(x1);
+  int k2=fk2(x2);
+  int k3=fkappa(Q2);
+  
+  double u_y1  = ( fy(x1)-gety1(k1) )/deltay1();
+  double u_y2  = ( fy(x2)-gety2(k2) )/deltay2();
+  double u_tau = ( ftau(Q2)-gettau(k3))/deltatau();
+  
+  double fillweight, fI_factor;
+  
+  // cache the interpolation coefficients so only 
+  // have to calculate each one once 
+  // NB: a (very naughty) hardcoded maximum interpolation order 
+  //     of 16 for code simplicity.
+  double _fI1[16];
+  double _fI2[16];
+  double _fI3[16];
+  
+  for( int i1=0 ; i1<=m_yorder   ; i1++ )  _fI1[i1] = fI(i1, m_yorder, u_y1);
+  for( int i2=0 ; i2<=m_yorder   ; i2++ )  _fI2[i2] = fI(i2, m_yorder, u_y2);
+  for( int i3=0 ; i3<=m_tauorder ; i3++ )  _fI3[i3] = fI(i3, m_tauorder, u_tau);
+  
+  double invwfun = 1/(weightfun(x1)*weightfun(x2));  
+	
+  //  double fI_tmp0 = 0;
+  //  double fI_tmp1 = 0;
+
+  for( int i3=0 ; i3<=m_tauorder ; i3++ ) { // "interpolation" loop in Q2
+
+    //	  (*m_weight[ip])(k3+i3, k1+i1, k2+i2) += fillweight;	  
+
+    //    if ( m_reweight ) fI_tmp0 = _fI3[i3]*invwfun;
+    //    else              fI_tmp0 = _fI3[i3];
+
+    for( int i1=0 ; i1<=m_yorder ; i1++ )   { // "interpolation" loop in x1      
+
+      //      fI_tmp1 = _fI1[i1] * fI_tmp0;
+      
+      for( int i2=0 ; i2<=m_yorder ; i2++ ) { // "interpolation" loop in x2
+	
+	// fI_factor = fI_tmp1 * _fI2[i2];
+
+	fI_factor = _fI1[i1] * _fI2[i2] * _fI3[i3];
+
+	if ( m_reweight ) fI_factor *= invwfun;
+						 						 
+	for( int ip=0 ; ip<m_Nproc ; ip++ ) {
+	  
+	  fillweight = weight[ip] * fI_factor;
+
+	  // this method only works for grids where the x1, x2 axes are identical 
+	  // ranges and binning 
+	  //  if ( m_symmetrise ) { 
+	  //	// symmetrise the grid
+	  //    if ( abs(k1+i1)<abs(k2+i2) )  (*m_weight[ip])(k3+i3, k2+i2, k1+i1) += fillweight;   
+	  //    else                          (*m_weight[ip])(k3+i3, k1+i1, k2+i2) += fillweight;  
+	  //  } 
+
+	  //	  std::cout << " (" << (*m_weight[ip])(k3+i3, k1+i1, k2+i2) << " -> ";
+
+	  (*m_weight[ip])(k3+i3, k1+i1, k2+i2) += fillweight;	  
+	 	  
+	  //	  std::cout << (*m_weight[ip])(k3+i3, k1+i1, k2+i2) << ")";
+
+	  //	  m_weight[ip]->print();	  
+	  //	  m_weight[ip]->fill_fast(k3+i3, k1+i1, k2+i2) += fillweight/wfun;	  
+	}
+
+	//	std::cout << std::endl;
+      }
+    }
+  }
+}
+
+
+void appl::igrid::fill_phasespace(const double x1, const double x2, const double Q2, const double* weight) { 
+
+  if ( isDISgrid() ) return fill_DIS_phasespace( x1, Q2, weight ); 
+
+  int k1=fk1(x1);
+  int k2=fk2(x2);
+  int k3=fkappa(Q2);
+
+  //  std::cout << "appl::igrid::fill_phasespace() k: " << k1 << " " << k2 << " " << k3 << std::endl;
+  //  std::cout << "\tweights: ";
+
+  //  for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << " " << weight[ip];
+  //  std::cout << std::endl;
+
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) (*m_weight[ip])(k3, k1, k2) += weight[ip];
+
+  //  for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << "\t" << ip << "\tsparse size " << m_weight[ip]->size() << std::endl; 
+
+  //  for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << "\t" << ip << "\tweight " << (*m_weight[ip])(k3, k1, k2) << std::endl; 
+
+} 
+
+
+
+void appl::igrid::fill_DIS_phasespace(const double x1, const double Q2, const double* weight) { 
+
+  int k1=fk1(x1);
+  int k3=fkappa(Q2);
+
+  //  std::cout << "appl::igrid::fill_phasespace() k: " << k1 << " " << k2 << " " << k3 << std::endl;
+  //  std::cout << "\tweights: ";
+
+  //  for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << " " << weight[ip];
+  //  std::cout << std::endl;
+
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) (*m_weight[ip])(k3, k1, 0) += weight[ip];
+
+  //  for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << "\t" << ip << "\tsparse size " << m_weight[ip]->size() << std::endl; 
+
+  //  for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << "\t" << ip << "\tweight " << (*m_weight[ip])(k3, k1, k2) << std::endl; 
+
+} 
+
+
+
+
+
+void appl::igrid::fill_index(const int ix1, const int ix2, const int iQ2, const double* weight) { 
+
+  //  int k1=ix1;
+  //  int k2=ix2;
+  //  int k3=iQ2;
+
+  //  for ( int ip=0 ; ip<m_Nproc ; ip++ ) (*m_weight[ip])(i3, k1, k2) += weight[ip];
+
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) (*m_weight[ip])(iQ2, ix1, ix2) += weight[ip];
+
+} 
+
+
+
+
+void appl::igrid::setuppdf(double (*alphas)(const double&),
+			   NodeCache* pdf0,
+			   NodeCache* pdf1,
+			   int _nloop,
+			   double rscale_factor,
+			   double fscale_factor,
+			   double beam_scale0, 
+			   double beam_scale1 ) 
+{
+
+  /// if this igrid is empty, don't do anything 
+
+  //  std::cout << m_taufilledmin << " " << m_taufilledmax << " " 
+  //	        << m_y1filledmin  << " " << m_y1filledmax  << " " 
+  //	        << m_y2filledmin  << " " << m_y2filledmax  << std::endl;
+
+  if ( ( m_taufilledmin==-1 || m_taufilledmax==-1 ) ||
+       (  m_y1filledmin==-1 ||  m_y1filledmax==-1 ) ) return;
+
+  if ( !isDISgrid() && (  m_y2filledmin==-1 ||  m_y2filledmax==-1 ) ) return; 
+  
+
+  int nloop = std::fabs(_nloop);
+
+  if ( pdf1==0 ) pdf1 = pdf0;
+
+  bool initialise_hoppet = false;
+
+
+  bool use_split = (  ( fscale_factor != 1 )  || fuserscale );
+
+#ifdef HAVE_HOPPET
+  // check if we need to use the splitting function, and if so see if we 
+  // need to initialise it again, and do so if required ???
+  if ( use_split ) {
+    if ( pdf1 && pdf1!=pdf0 ) initialise_hoppet = true;
+  }
+#endif
+
+
+  void (*splitting)(const double& , const double&, double*, int ) = Splitting;
+		     
+  //  static const double twopi = 2*M_PI;
+
+  // static const int nc = 3;
+  //// TC   const int nf = 6;
+  //  static const int nf = 5;
+  //  static double beta0=(11.*nc - 2.*nf)/(6*twopi);
+  //  static double beta0=(11.*nc - 2.*nf)/6.;
+  
+  /// not needed here ?? 
+  //  static double beta1=(34.*nc*nc - 3.*(nc-1./nc)*nf - 10.*nc*nf)/(6.*twopi);
+
+  double logf_save = std::log( fscale_factor*fscale_factor );
+  //  double logr = std::log( rscale_factor*rscale_factor );
+
+  double logf = logf_save;
+
+  // pdf table
+
+  const double invtwopi = 0.5/(M_PI);
+
+  m_alphas = std::vector<double>( Ntau(), 0 );
+  
+  m_fg1 = std::vector< std::vector< std::vector<double> > >( Ntau(), std::vector<std::vector<double> >(Ny1(), std::vector<double>(14,0) ) );  
+  if ( !isSymmetric() && !isDISgrid() ) { 
+    m_fg2 = std::vector< std::vector< std::vector<double> > >( Ntau(), std::vector<std::vector<double> >(Ny2(), std::vector<double>(14,0) ) );  
+  }
+  
+  if ( nloop>=1 && use_split ) { 
+    m_fsplit1 = std::vector< std::vector< std::vector<double> > >( Ntau(), std::vector<std::vector<double> >(Ny1(), std::vector<double>(14,0) ) );  
+    if ( !isSymmetric() && !isDISgrid() ) { 
+      m_fsplit2 = std::vector< std::vector< std::vector<double> > >( Ntau(), std::vector<std::vector<double> >(Ny2(), std::vector<double>(14,0) ) );  
+    }
+    if ( nloop==2 ) { 
+      m_fsplit12 = std::vector< std::vector< std::vector<double> > >( Ntau(), std::vector<std::vector<double> >(Ny1(), std::vector<double>(14,0) ) );  
+      if ( !isSymmetric() && !isDISgrid() ) { 
+	m_fsplit22 = std::vector< std::vector< std::vector<double> > >( Ntau(), std::vector<std::vector<double> >(Ny2(), std::vector<double>(14,0) ) );  
+      }
+    }
+  }
+
+
+  const int n_y1  = Ny1();
+  const int n_y2  = Ny2();
+
+  
+  bool scale_beams = false;
+  if ( beam_scale0!=1 || beam_scale1!=1 ) scale_beams = true;
+
+  if ( initialise_hoppet ) hoppet_init::assign( pdf0->pdf() );
+  
+  // set up pdf grid, splitting function grid 
+  // and alpha_s grid
+  //  for ( int itau=m_Ntau ; itau-- ; ) {
+  for ( int itau=0 ; itau<m_Ntau ; itau++  ) {
+    
+    if ( itau<m_taufilledmin || itau>m_taufilledmax ) continue;
+
+    double tau = gettau(itau);
+    double Q2  = fQ2(tau);
+    double fQ  = std::sqrt(Q2); 
+    
+    double Q = fQ;
+
+#if 1
+    /// not iplemented yet ...
+    if ( fuserscale ) { 
+      Q = fuserscale(fQ);
+      double scale_factor = Q/fQ;
+      logf = logf_save + 2*std::log(scale_factor);  
+    }
+#endif
+
+    // alpha_s table 
+    m_alphas[itau] = alphas(rscale_factor*Q)*invtwopi;
+
+#if 0
+
+    std::cout << itau << "\ttau " << tau 
+	      << "\tQ2 " << Q2 << "\tQ " << Q 
+	      << "\talphas " << m_alphas[itau] << std::endl; 
+
+
+    int iymin1 = m_weight[0]->ymin();
+    int iymax1 = m_weight[0]->ymax();
+    
+    if ( isSymmetric() ) { 
+      int iymin2 = iymin1;
+      int iymax2 = iymax1;
+    }
+    else {
+      int iymin2 = m_weight[0]->zmin();
+      int iymax2 = m_weight[0]->zmax();
+    }  
+#endif
+
+    // grid not filled for iy<iymin || iy>iymax so no need to 
+    // consider outside this range
+    
+    // y1 tables
+    for ( int iy=n_y1 ; iy-- ;  ) { 
+      
+      if ( iy<m_y1filledmin || iy>m_y1filledmax ) continue;
+
+      double y = gety1(iy);
+      double x = fx(y);
+      double fun = 1;
+
+#if 0      
+      static bool ff = true;
+
+      if ( ff ) { std::cout << "ff: " << m_reweight << " " << fun << std::endl; ff = false; } 
+
+      if ( itau==0 ) { 
+	std::cout << iy << "  y: " << y << "  x: " << x << std::endl; 
+      }
+#endif
+
+      if ( scale_beams ) x *= beam_scale0;
+
+      if ( x>=1 ) { 
+	for ( int ip=0 ; ip<m_partons ; ip++ ) m_fg1[itau][iy][ip]=0; 
+	if ( nloop>=1 && use_split ) { 
+	  for ( int ip=0 ; ip<m_partons ; ip++ ) m_fsplit1[itau][iy][ip] = 0;
+	  if (nloop==2)
+	    for ( int ip=0 ; ip<m_partons ; ip++ ) m_fsplit12[itau][iy][ip] = 0;
+	}
+	continue; 
+      }    
+
+      if ( m_reweight ) fun = weightfun(x);
+
+      pdf0->evaluate(x, fscale_factor*Q, m_fg1[itau][iy]);
+      
+      //      std::cout << "PDF1: itau " << itau << "   Q: " << Q << " ( " << fscale_factor*Q << " )    iy1: " << iy << "   x: " << x << "  g-pdf: " << m_fg1[itau][iy][6] << " - " << (m_fg1[itau][iy][6]/x) << std::endl; 
+     
+      double invx = 1/x;
+      for ( int ip=0 ; ip<m_partons ; ip++ ) m_fg1[itau][iy][ip] *= invx;
+      if ( m_reweight ) for ( int ip=0 ; ip<m_partons ; ip++ ) m_fg1[itau][iy][ip] *= fun;
+      
+      // splitting function table
+      if ( nloop>=1 && use_split ) { 
+
+	double PLO[14]; for ( int ip=14 ; ip-- ; ) PLO[ip] = 0;  
+
+	splitting( x, fscale_factor*Q, PLO, 1 ); /// PLO x pdf	
+
+	for ( int ip=m_partons ; ip-- ; ) m_fsplit1[itau][iy][ip] = PLO[ip]*invx;
+
+	if ( m_reweight ) for ( int ip=m_partons ; ip-- ; ) m_fsplit1[itau][iy][ip] *= fun;
+
+	if (nloop>1) {
+	  //	splitting( x, fscale_factor*Q, m_fsplit12[itau][iy], 1 ); placeholder for hoppet call
+
+	  double P2LO[14]; for ( int ip=14 ; ip-- ; ) P2LO[ip] = 0;  
+
+	  double PNLO[14]; for ( int ip=14 ; ip-- ; ) PNLO[ip] = 0;  
+
+	  splitting( x, fscale_factor*Q, P2LO, 11 ); /// PLO x PLO x pdf
+	  splitting( x, fscale_factor*Q, PNLO, 2 );  /// PNLO x pdf
+
+	  //	  for ( int ip=0 ; ip<13 ; ip++ ) m_fsplit12[itau][iy][ip] = logf*(beta0*(0.5*logf - logr)*PLO[ip] + 0.5*logf*P2LO[ip] - PNLO[ip] )*invx ;
+	  //	  for ( int ip=0 ; ip<13 ; ip++ ) m_fsplit12[itau][iy][ip] = logf*(beta0*(0.5*logf - logr)*PLO[ip] + 0.5*logf*P2LO[ip] - PNLO[ip] )*invx ;
+
+	  for ( int ip=0 ; ip<m_partons ; ip++ ) m_fsplit12[itau][iy][ip] = logf*( 0.5*logf*P2LO[ip] - PNLO[ip] )*invx ;
+
+	  if ( m_reweight ) for ( int ip=0 ; ip<m_partons ; ip++ ) m_fsplit12[itau][iy][ip] *= fun;
+
+	}
+      }
+    }
+  }
+
+  if ( initialise_hoppet ) hoppet_init::assign( pdf1->pdf() );
+  
+  if ( ( !isSymmetric() && !isDISgrid() ) || ( pdf1 && pdf1!=pdf0 ) ) {
+    
+    for ( int itau=0 ; itau<m_Ntau ; itau++  ) {
+ 
+      if ( itau<m_taufilledmin || itau>m_taufilledmax ) continue;
+
+      double tau = gettau(itau);
+      double Q2  = fQ2(tau);
+      double fQ  = std::sqrt(Q2); 
+    
+      double Q = fQ;
+
+#if 1     
+      /// not iplemented yet ...
+      if ( fuserscale ) { 
+	Q = fuserscale(fQ);
+	double scale_factor = Q/fQ;
+	logf = logf_save + 2*std::log(scale_factor);  
+      }
+#endif     
+ 
+      /// alpha_s table has already been filled  
+      /// m_alphas[itau] = alphas(rscale_factor*Q)*invtwopi;
+      
+      // y2 tables
+      //    for ( int iy=iymin2 ; iy<=iymax2 ; iy++ ) { 
+      for ( int iy=n_y2 ; iy-- ;  ) { 
+
+	if ( iy<m_y2filledmin || iy>m_y2filledmax ) continue;
+		
+	double y = gety2(iy);
+	double x = fx(y);
+	double fun = 1;
+
+	if ( scale_beams ) x *= beam_scale1;
+	
+	if ( x>=1 ) { 
+	  for ( int ip=0 ; ip<m_partons ; ip++ ) m_fg2[itau][iy][ip]=0; 
+	  if ( nloop>=1 && use_split ) { 
+	    for ( int ip=0 ; ip<m_partons ; ip++ ) m_fsplit2[itau][iy][ip] = 0;
+	    if (nloop==2)
+	      for ( int ip=0 ; ip<m_partons ; ip++ ) m_fsplit22[itau][iy][ip] = 0;
+	  }
+	  continue; 
+	}
+      
+	if (m_reweight) fun = weightfun(x);
+	      
+	pdf1->evaluate(x, fscale_factor*Q, m_fg2[itau][iy]);
+
+	//	std::cout << "PDF2: itau " << itau << "   Q: " << Q << " ( " << fscale_factor*Q << " )    iy1: " << iy << "   x: " << x << "  g-pdf: " << m_fg2[itau][iy][6] << " - " << (m_fg2[itau][iy][6]/x) << std::endl; 
+	
+	double invx = 1/x;
+	for ( int ip=0 ; ip<m_partons ; ip++ ) m_fg2[itau][iy][ip] *= invx;
+	if ( m_reweight ) for ( int ip=0 ; ip<m_partons ; ip++ ) m_fg2[itau][iy][ip] *= fun;      
+	
+	// splitting functions
+	if ( nloop>=1 && use_split ) { 
+
+	  double PLO[14]; for ( int ip=14 ; ip-- ; ) PLO[ip] = 0;  
+	  splitting( x, fscale_factor*Q, PLO, 1 ); /// PLO x pdf
+	  for ( int ip=m_partons ; ip-- ; ) m_fsplit2[itau][iy][ip] = PLO[ip]*invx;
+	  if ( m_reweight ) for ( int ip=m_partons ; ip-- ; ) m_fsplit2[itau][iy][ip] *= fun;
+	  
+	  if (nloop > 1) {
+	    //splitting( x, fscale_factor*Q, m_fsplit22[itau][iy], 1 );
+ 
+	    double P2LO[14]; for ( int ip=14 ; ip-- ; ) P2LO[ip] = 0;  
+	    double PNLO[14]; for ( int ip=14 ; ip-- ; ) PNLO[ip] = 0;  
+	    
+	    splitting( x, fscale_factor*Q, P2LO, 11 ); /// PLO x PLO x pdf
+	    splitting( x, fscale_factor*Q, PNLO, 2 );  /// PNLO x pdf
+	    
+	    //	    for ( int ip=0 ; ip<13 ; ip++ ) m_fsplit22[itau][iy][ip] = 0.5*logf*(beta0*(logf - 2*logr)*PLO[ip] + logf*P2LO[ip] - 2*PNLO[ip] )*invx ;
+
+  	    for ( int ip=0 ; ip<m_partons ; ip++ ) m_fsplit22[itau][iy][ip] = logf*( 0.5*logf*P2LO[ip] - PNLO[ip] )*invx ;
+ 
+	    
+	    if ( m_reweight ) for ( int ip=0 ; ip<m_partons ; ip++ ) m_fsplit22[itau][iy][ip] *= fun;
+
+	  }
+	}
+      }
+
+
+    } // loop over itau
+
+  } // isSymmetric()
+
+  if ( isSymmetric() && !isDISgrid() ) { 
+    m_fg2      = m_fg1;
+    m_fsplit2  = m_fsplit1;
+    m_fsplit22 = m_fsplit12;
+  }
+
+}
+
+
+
+
+#if 0
+void igrid::pdfinterp(double x, double Q2, double* f)
+{
+  int k1=fk1(x);
+  int k3=fkappa(Q2);
+
+  double u_y1  = ( fy(x)-gety1(k1) )/deltay1();
+  double u_tau = ( ftau(Q2)-gettau(k3))/deltatau();
+
+  //  double rint = 0.;
+
+  for ( int i=0 ; i<14 ; i++ ) f[i]=0.;
+
+  double fI_factor;
+
+  for( int i3=0 ; i3<=m_tauorder ; i3++ ) { // interpolation loop in Q2
+   
+    //    double fI3 = fI(i3, m_tauorder, u_tau);
+
+    for( int i1=0 ; i1<=m_yorder ; i1++ ) { // interpolation loop in x1
+      
+      //      double fI1 = fI(i1, m_yorder, u_y1); 
+
+      fI_factor=fI(i1, m_yorder, u_y1) * fI(i3, m_tauorder, u_tau);
+                    
+      for ( int ip1=0 ; ip1<14 ; ip1++ ) { 
+	f[ip1] += fI_factor*m_fg1[k3+i3][std::abs(k1+i1)][ip1];     
+      }
+    }
+  }
+   
+  double fun = weightfun(x); 
+
+  if ( m_reweight ) for ( int ip1=0 ; ip1<14 ; ip1++ ) f[ip1] /= fun;
+
+}
+#endif
+
+
+
+
+// takes pdf as the pdf lib wrapper for the pdf set for the convolution.
+// takes genpdf as a function to form the generalised parton distribution.
+// alphas is a function for the calculation of alpha_s (surprisingly)
+// lo_order is the order of the calculation, ie for inclusive, or two jets, 
+// it's O(alpha_s) (ie lo_order=1) and for 3 jet it would be O(alpha_s^2)
+// (ie lo_order=2)
+// nloop is the number of loops, ie for the leading order component it is
+// 0, for the nlo component, interestingly it is also 0, since the leading order
+// grids are seperate from the nlo grids, if nloop=1 then we must be calculating 
+// {r,f}scale_factor ie f*mu then *scale_factor=f
+// splitting, is the splitting function 
+
+double appl::igrid::convolute(NodeCache* pdf0,
+			      NodeCache* pdf1,
+			      appl_pdf*  genpdf,
+			      double (*alphas)(const double& ), 
+			      int     lo_order,  
+			      int     _nloop, 
+			      double  rscale_factor,
+			      double  fscale_factor,
+			      double Escale0, 
+			      double Escale1 ) 
+{ 
+
+  //  double Escale = Escale0;
+
+  //  m_transvar = m_transvarlocal;
+
+  if ( pdf1==0 ) pdf1 = pdf0; 
+
+
+  m_conv_param.pdf0 = pdf0;
+  m_conv_param.pdf1 = pdf1;
+  m_conv_param.alphas = alphas;
+
+
+  m_conv_param.lo_order =  lo_order;
+  m_conv_param._nloop   = _nloop;
+  m_conv_param.Escale0   =  Escale0;
+  m_conv_param.Escale1   =  Escale1;
+
+  m_conv_param.rscale_factor = rscale_factor;
+  m_conv_param.fscale_factor = fscale_factor;
+
+  m_conv_param.genpdf = genpdf;
+
+  m_conv_param.dsigma     = 0;
+  m_conv_param.dsigmaNLO  = 0;
+  m_conv_param.dsigmaNNLO = 0;
+
+  double dsigma     = 0; 
+  //  double dsigmaNLO  = 0; 
+  //  double dsigmaNNLO = 0; 
+
+  // std::cout << name() << "\n" << *dynamic_cast<const lumi_pdf*>(genpdf) << std::endl;
+  
+#ifndef PDFTHREAD
+
+  // is the grid empty
+  int size=0;
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+    //    std::cout << "\t" << name() << " " << ip << " :: " << m_weight[ip]->empty() << std::endl; 
+    if ( m_weight[ip]->empty() ) continue;
+    if ( !m_weight[ip]->trimmed() ) m_weight[ip]->trim();
+    //    size += m_weight[ip]->xmax() - m_weight[ip]->xmin() + 1;
+    size++;
+  }
+
+  //  std::cout << "\t" << name() << "  :: size: " << size << std::endl; 
+    
+  // grid is empty
+  if ( size==0 )  return 0;
+
+  int nloop = std::fabs(_nloop); 
+
+  setuppdf( alphas, pdf0, pdf1, nloop, rscale_factor, fscale_factor, Escale0, Escale1 );
+
+#endif
+
+  if ( threads_disabled ) convolute_internal();
+  else                    process();            
+
+  dsigma     = m_conv_param.dsigma; 
+  //  dsigmaNLO  = m_conv_param.dsigmaNLO; 
+  //  dsigmaNNLO = m_conv_param.dsigmaNNLO; 
+
+  return dsigma;
+}
+
+
+
+
+
+
+
+void appl::igrid::convolute_internal() { 
+
+  //  struct timeval mytimer = appl_timer_start();
+
+  int     lo_order = m_conv_param.lo_order;  
+  int     _nloop   = m_conv_param._nloop;  
+
+  int      nloop = std::fabs(_nloop); 
+
+  double  rscale_factor = m_conv_param.rscale_factor;  
+  double  fscale_factor = m_conv_param.fscale_factor;  
+
+  appl_pdf* genpdf = m_conv_param.genpdf;
+
+  //  static const double twopi = 2*M_PI;
+  //  static const int nc = 3;
+  //  //TC   const int nf = 6;
+  //  static const int nf = 5;
+  //  static double beta0=(11.*nc-2.*nf)/(6.*twopi);
+  //  const bool debug=false;  
+
+
+  double dsigma     = 0.; //, xsigma = 0.;
+  double dsigmaNLO  = 0.; //, xsigma = 0.;
+  double dsigmaNNLO = 0.; //, xsigma = 0.;
+
+  int size=0;
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+    if ( m_weight[ip]->empty() ) continue;
+    if ( !m_weight[ip]->trimmed() ) m_weight[ip]->trim();
+    //    size += m_weight[ip]->xmax() - m_weight[ip]->xmin() + 1;
+    size++;
+  }
+
+  // grid is empty
+  if ( size==0 )  return;
+
+#ifdef PDFTHREAD
+
+  double (*alphas)(const double& ) = m_conv_param.alphas;   
+
+  NodeCache* pdf0 = m_conv_param.pdf0;   
+  NodeCache* pdf1 = m_conv_param.pdf1;   
+  
+  double   Escale0 =  m_conv_param.Escale0;   
+  double   Escale1 =  m_conv_param.Escale1;   
+
+  //  double _alphasn = 1.;
+  //  double  alphanplus1 = 0.;
+  // do the convolution  
+  // if (debug) std::cout<<name<<" nloop= "<<nloop<<endl;
+  //  std::cout << "\torder=" << lo_order << "\tnloop=" << nloop << std::endl;
+  // is the grid empty
+
+  int size=0;
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+    
+    if ( m_weight[ip]->empty() ) continue;
+
+    if ( !m_weight[ip]->trimmed() )  {
+      /// std::cerr << "igrid::convolute() naughty, naughty!" << std::endl;
+      m_weight[ip]->trim();
+    }
+    size += m_weight[ip]->xmax() - m_weight[ip]->xmin() + 1;
+  }
+
+  //  std::cout << "\t" << name() << " " << ip << " :: size " << size << std::endl; 
+
+  // grid is empty
+  if ( size==0 )  return;
+
+
+  setuppdf( alphas, pdf0, pdf1, nloop, rscale_factor, fscale_factor, Escale0, Escale1 );
+
+
+#endif
+
+  double* sig = new double[m_Nproc];  // weights from grid
+
+  double* H   = new double[m_Nproc];  // generalised pdf  
+
+  double* HA  = 0;  // generalised splitting functions
+  double* HB  = 0;  // generalised splitting functions
+
+  double* HA2 = 0;  // generalised splitting functions at NNLO
+  double* HB2 = 0;  // generalised splitting functions at NNLO
+  double* HAB = 0;  // generalised splitting functions at NNLO
+
+  if ( nloop>=1 && fscale_factor!=1 ) { 
+    HA  = new double[m_Nproc];  // generalised splitting functions
+    HB  = new double[m_Nproc];  // generalised splitting functions
+    if (nloop > 1 ) {
+      HA2  = new double[m_Nproc];  // generalised splitting functions at NNLO
+      HB2  = new double[m_Nproc];  // generalised splitting functions at NNLO
+      HAB  = new double[m_Nproc];  // generalised splitting functions at NNLO
+    }
+  }
+  
+  // cross section for this igrid  
+
+  //char name[]="appl_grid:igrid::convolute(): ";
+  //  static const double twopi = 2*M_PI;
+  static const int nc = 3;
+  //TC   const int nf = 6;
+  static const int nf = 5;
+  //  static double beta0=(11.*nc - 2.*nf)/(6.*twopi);
+  //  static double beta0=(11.*nc - 2.*nf)/(6.*twopi);
+  static double beta0_twopi=(11.*nc - 2.*nf)/6;
+
+  //  static double beta1=(34.*nc*nc - 3.*(nc-1./nc)*nf - 10.*nc*nf)/(12.*twopi*twopi);  
+  static double beta1_twopi_twopi=(34.*nc*nc - 3.*(nc-1./nc)*nf - 10.*nc*nf)/12.;  
+ 
+
+  double alphas_tmp = 0.;  
+
+  double _alphasn = 1.;
+  double  alphanplus1 = 0.;
+  //  double  alphanplus2 = 0.;
+
+
+  m_conv_param.dsigma     = 0;
+  m_conv_param.dsigmaNLO  = 0;
+  m_conv_param.dsigmaNNLO = 0;
+
+
+  // loop over the grid 
+  // 
+
+  double logr2 = std::log(rscale_factor*rscale_factor);
+  double logf2 = std::log(fscale_factor*fscale_factor);
+
+  //  std::vector<int>    ccks( m_Nproc, 0 );
+
+
+  double rlocal = rscale_factor;
+  double flocal = fscale_factor;
+
+  if ( genpdf->size() != m_Nproc ) { 
+    throw exception( "igrid::convolute() parton luminosity mismatch: "+
+		     std::to_string(genpdf->size())+" "+
+		     std::to_string(m_Nproc) );
+  }
+  
+  for ( int itau=0 ; itau<Ntau() ; itau++  ) {
+
+    _alphasn  = 1;    
+
+    alphas_tmp = m_alphas[itau];
+
+    for ( int iorder=0 ; iorder<lo_order ; iorder++ ) _alphasn *= alphas_tmp;
+    alphanplus1 = _alphasn*alphas_tmp;
+    //    alphanplus2 =  alphanplus1*alphas_tmp;
+
+#if 1
+    if ( fuserscale ) { 
+
+      double tau = gettau(itau);
+      double Q2  = fQ2(tau);
+      double Q   = std::sqrt(Q2); 
+    
+      double fQ = fuserscale(Q);
+      
+      double scale_factor = fQ/Q;
+      
+      logr2 = 2*std::log(scale_factor) + std::log(rscale_factor*rscale_factor);
+      logf2 = 2*std::log(scale_factor) + std::log(fscale_factor*fscale_factor);
+
+      rlocal = scale_factor * rscale_factor;
+      flocal = scale_factor * fscale_factor;
+
+    }
+#endif
+
+    //    for ( int iy1=0 ; iy1<Ny1() ; iy1++ ) {            
+    //      for ( int iy2=0 ; iy2<Ny2() ; iy2++ ) { 
+    for ( int iy1=Ny1() ; iy1-- ;  ) {            
+
+      for ( int iy2=Ny2() ; iy2-- ;  ) { 
+ 	// test if this element is actually filled
+	// if ( !m_weight[0]->trimmed(itau,iy1,iy2) ) continue; 
+	bool nonzero = false;
+	// basic convolution order component for either the born level
+	// or the convolution of the nlo grid with the pdf 
+	
+	for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+	  if ( (sig[ip] = (*(const SparseMatrix3d*)m_weight[ip])(itau,iy1,iy2)) ) nonzero = true;
+	}
+	
+	if ( nonzero ) { 	
+	  // build the generalised pdfs from the actual pdfs
+
+	  if ( !isDISgrid() ) genpdf->evaluate( &m_fg1[itau][iy1][0],  &m_fg2[itau][iy2][0], H );
+	  else                genpdf->evaluate( &m_fg1[itau][iy1][0],  0, H );
+
+	  //	  std::cout << "genpdf: " << *genpdf << "\tH: " << H[0] << " " << H[1] <<  " " << H[2] << std::endl; 
+
+	  //	  std::cout << iy1 << " " << iy2 << " " << itau << " " << std::endl;
+	  
+	  //	  for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << "\t" << "  ip " << ip << "  y1 " << iy1 << "  y2 " << iy2 << "  t " << itau << "  s " << sig[ip]*H[ip] << " : " <<  sig[ip] << " " << H[ip] << std::endl;
+
+	  //	  std::cout << "pdf1: " << m_fg1[itau][iy1][6] << "   pdf2: " << m_fg2[itau][iy2][6] << "   H: " << H[0] << std::endl;
+
+	  //	  std::cout << *dynamic_cast<const lumi_pdf*>(genpdf) << std::endl;
+	  
+	  // do the convolution
+          double xsigma=0.;
+
+	  if ( m_parent && m_parent->subproc()!=-1 ) { 
+	    int ip=m_parent->subproc();
+	    xsigma+= sig[ip]*H[ip];
+	  }
+	  else { 
+	    for ( int ip=0 ; ip<m_Nproc ; ip++ ) xsigma+= sig[ip]*H[ip];
+	    //  for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << "\t" << name() << "  ip " << ip << "  y1 " << iy1 << "  y2 " << iy2 << "  t " << itau << "  s " << sig[ip]*H[ip] << " : " <<  sig[ip] << " " << H[ip] << std::endl;
+	    //  for ( int ip=0 ; ip<1 ; ip++ ) xsigma+= sig[ip]*H[ip];
+	    //  for ( int ip=0 ; ip<1 ; ip++ ) std::cout << "\t" << name() << "  ip " << ip << "  y1 " << iy1 << "  y2 " << iy2 << "  t " << itau << "  s " << sig[ip]*H[ip] << " : " <<  sig[ip] << " " << H[ip] << std::endl;
+	    //	for ( int ip=0 ; ip<1 ; ip++ ) std::cout << "\t" << name() << "  ip " << ip << " " << iy1 << " " << iy2 << " " << itau << "  s " << sig[ip] << " : H " <<  H[ip] << "  sH " << sig[ip]*H[ip] << std::endl;
+	  }
+
+	  /// if want NLO or NNLO parts only, don't add in the born term
+	  if ( _nloop >= 0 ) dsigma += _alphasn*xsigma;
+
+	  // std::cout << "\t\tdsigma: " << dsigma << "   ( alphasn: " << _alphasn << " )" << std::endl;
+
+	  // now do the convolution for the variation of factorisation and 
+	  // renormalisation scales, proportional to the leading order weights
+	  if ( nloop>=1 ) { 
+
+	    // renormalisation scale dependent bit
+	    
+	    if ( rlocal!=1 ) { 
+	      // nlo relative ln mu_R^2 term 
+
+	      double fac = alphanplus1*lo_order*logr2*xsigma; 
+	      
+	      dsigmaNLO += fac*beta0_twopi;
+	   
+	      if (nloop>1) { 
+		dsigmaNNLO += fac*alphas_tmp*( beta1_twopi_twopi + 0.5*beta0_twopi*beta0_twopi*(1+lo_order)*logr2 );
+	      }
+
+  	    }
+
+	    
+	    // factorisation scale dependent bit
+            
+	    // nlo relative ln mu_F^2 term 
+	    if ( flocal!=1 ) {
+
+	      if ( !isDISgrid() ) { 
+		   genpdf->evaluate(     &m_fg1[itau][iy1][0],  &m_fsplit2[itau][iy2][0], HA);
+		   genpdf->evaluate( &m_fsplit1[itau][iy1][0],      &m_fg2[itau][iy2][0], HB);
+	      }
+	      else { 
+		   for ( int ip=0 ; ip<m_Nproc ; ip++ ) HA[ip] = 0;
+		   genpdf->evaluate( &m_fsplit1[itau][iy1][0],  0, HB);
+	      }
+
+	      xsigma=0.; /// setting this to 0 ??? maybe this is ok because we have already used it for the rscale part
+
+	      if ( m_parent && m_parent->subproc()!=-1 ) { 
+		int ip=m_parent->subproc();
+		xsigma += sig[ip]*(HA[ip]+HB[ip]);
+	      }
+	      else { 
+		for ( int ip=0 ; ip<m_Nproc ; ip++ ) xsigma += sig[ip]*(HA[ip]+HB[ip]);
+	      }
+
+	      double fac = alphanplus1*logf2*xsigma;
+
+	      dsigmaNLO -= fac;
+
+	      if ( nloop>1 ) {
+
+		/// this is most strange - NNLOJET needs logr2*(lo_order+1)
+		/// whereas an additional calculation suggests it should be 
+		/// logr2*lo_order
+		/// hmmm
+		dsigmaNNLO += fac*alphas_tmp*beta0_twopi*( 0.5*logf2 - logr2*(lo_order+1) );
+
+		/// plus the double splitting function components ...
+		/// PDFA2 PDFB0 alphas2 +PDFA1 PDFB1 alphas2 + PDFA0 PDFB2 alphas2 
+
+		if ( !isDISgrid() ) { 
+		  genpdf->evaluate( &m_fsplit12[itau][iy1][0],        &m_fg2[itau][iy2][0], HA2 );
+		  genpdf->evaluate(      &m_fg1[itau][iy1][0],   &m_fsplit22[itau][iy2][0], HB2 );
+		  genpdf->evaluate(  &m_fsplit1[itau][iy1][0],    &m_fsplit2[itau][iy2][0], HAB );
+		}
+		else { 
+		  for ( int ip=0 ; ip<m_Nproc ; ip++ ) { HAB[ip] = HB2[ip] = 0; }
+		  genpdf->evaluate( &m_fsplit12[itau][iy1][0],   0, HA2 );
+		}
+
+		double fac2 = 0;
+		double fac3 = 0;
+
+	        if ( m_parent && m_parent->subproc()!=-1 ) { 
+		    int ip=m_parent->subproc();
+		    fac2 += sig[ip]*( HA2[ip] + HB2[ip] );
+		    fac3 += sig[ip]*( HAB[ip] );
+		}
+		else { 
+		  for ( int ip=0 ; ip<m_Nproc ; ip++ ) { 
+		    fac2 += sig[ip]*( HA2[ip] + HB2[ip] );
+		    fac3 += sig[ip]*( HAB[ip] );
+		  }
+		}
+		 
+		dsigmaNNLO += alphanplus1*alphas_tmp*(logf2*logf2*fac3 + fac2); 
+
+	      }
+
+	      //if (debug) 
+              //cout <<name<<" fscale= " << fscale_factor << " dsigma= "<<dsigma << std::endl;
+	    }
+	  }
+	}  // nonzero
+      }  // iy2
+    }  // iy1
+
+  }  // itau
+  
+  //  if ( dbg ) std::cout << name() << "\tccks: " << ccks << std::endl; 
+
+  //if (debug)  std::cout << name<<"     convoluted dsigma=" << dsigma << std::endl; 
+
+  delete[] sig;
+  delete[] H;
+  delete[] HA;
+  delete[] HB;
+
+  if ( HA2 ) { 
+    delete[] HA2;
+    delete[] HB2;
+    delete[] HAB;
+  }    
+
+  deletepdftable();
+  
+  // NB!!! the return value dsigma must be scaled by Escale*Escale which 
+  // is done in grid::vconvolute. It would be better here, but is reduces 
+  // the number of operations if in grid. 
+
+  m_conv_param.dsigma     = dsigma;
+  m_conv_param.dsigmaNLO  = dsigmaNLO;
+  m_conv_param.dsigmaNNLO = dsigmaNNLO;
+
+  //  double mytime = appl_timer_stop(mytimer);
+
+  //  std::printf("thread done: param: %lf internal time %lf ms\n", m_conv_param.dsigma, mytime );
+
+}
+
+
+
+
+
+
+
+/// this is the convolute routine for the amcatnlo convolution - essentially it 
+/// is the same as for the standard calculation, but the amcatnlo calculation
+/// stores weights for the NLO born contribution, and counterterms, so we need
+/// more grids than the usual two, 
+double appl::igrid::amc_convolute(NodeCache* pdf0,
+				  NodeCache* pdf1,
+				  appl_pdf*  genpdf,
+				  double (*alphas)(const double& ), 
+				  int     lo_order,  
+				  int     nloop, 
+				  double  rscale_factor,
+				  double  fscale_factor,
+				  double  Escale0, 
+				  double  Escale1) 
+{ 
+
+  m_conv_param.pdf0 = pdf0;
+  m_conv_param.pdf1 = pdf1;
+
+  m_conv_param.alphas = alphas;
+
+  m_conv_param.lo_order =  lo_order;
+
+  //  m_conv_param._nloop   =  nloop;
+  //  m_conv_param.Escale   =  Escale;
+
+  //  m_conv_param.rscale_factor = rscale_factor;
+  //  m_conv_param.fscale_factor = fscale_factor;
+
+  m_conv_param.genpdf = genpdf;
+
+  m_conv_param.dsigma = 0;
+
+  // is the grid empty
+  int size=0;
+   for ( int ip=0 ; ip<m_Nproc ; ip++ ) { 
+    if ( !m_weight[ip]->trimmed() )  {
+      //  std::cout << "igrid::convolute() naughty, naughty!" << std::endl;
+      m_weight[ip]->trim();
+    }
+    size += m_weight[ip]->xmax() - m_weight[ip]->xmin() + 1;
+  }
+
+  // grid is empty
+  if ( size==0 )  return 0;
+
+  setuppdf( alphas, pdf0, pdf1, nloop, rscale_factor, fscale_factor, Escale0, Escale1 );  
+
+  if ( threads_disabled ) amc_convolute_internal();
+  else                    process();
+
+  return m_conv_param.dsigma;
+}
+
+
+
+
+
+
+
+void appl::igrid::amc_convolute_internal() { 
+  
+  int     lo_order = m_conv_param.lo_order;  
+
+  //  int     _nloop    = m_conv_param._nloop;  
+  
+  //  int      nloop = std::fabs(_nloop); 
+  
+  //  double  rscale_factor = m_conv_param.rscale_factor;  
+  //  double  fscale_factor = m_conv_param.fscale_factor;  
+  
+  appl_pdf* genpdf = m_conv_param.genpdf;
+  
+  static const double eightpisquared = 8*M_PI*M_PI;
+  
+  double alphas_tmp = 0.;  
+  double dsigma  = 0.; //, xsigma = 0.;
+  double _alphasn = 1.;
+
+  double* sig = new double[m_Nproc];  // weights from grid
+  double* H   = new double[m_Nproc];  // generalised pdf  
+  double* HA  = 0;  // generalised splitting functions
+  double* HB  = 0;  // generalised splitting functions
+  //  if ( nloop==1 && fscale_factor!=1 ) { 
+  //    HA  = new double[m_Nproc];  // generalised splitting functions
+  //    HB  = new double[m_Nproc];  // generalised splitting functions
+  //  }
+
+  // cross section for this igrid  
+
+  // loop over the grid 
+  // 
+
+  for ( int itau=0 ; itau<Ntau() ; itau++  ) {
+    _alphasn  = 1;    
+    alphas_tmp = m_alphas[itau]*eightpisquared;
+    for ( int iorder=0 ; iorder<lo_order ; iorder++ ) _alphasn *= alphas_tmp;
+    //   alphanplus1 = _alphasn*alphas_tmp;
+
+    for ( int iy1=Ny1() ; iy1-- ;  ) {            
+      for ( int iy2=Ny2() ; iy2-- ;  ) { 
+ 	// test if this element is actually filled
+	// if ( !m_weight[0]->trimmed(itau,iy1,iy2) ) continue; 
+	bool nonzero = false;
+	// basic convolution order component for either the born level
+	// or the convolution of the nlo grid with the pdf 
+	for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+	  if ( (sig[ip] = (*(const SparseMatrix3d*)m_weight[ip])(itau,iy1,iy2)) ) nonzero = true;
+	}
+	
+	//	for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << "\t" << sig[ip]; 
+	//	std::cout << std::endl;
+
+	if ( nonzero ) { 	
+
+	  // build the generalised pdfs from the actual pdfs
+	  genpdf->evaluate( &m_fg1[itau][iy1][0],  &m_fg2[itau][iy2][0], H );
+
+	  // do the convolution
+
+          double xsigma=0.;
+	  for ( int ip=0 ; ip<m_Nproc ; ip++ ) xsigma+=sig[ip]*H[ip];
+	  dsigma += _alphasn*xsigma;
+
+	}  // nonzero
+      }  // iy2
+    }  // iy1
+  }  // itau
+  
+  //if (debug)  std::cout << name<<"     convoluted dsigma=" << dsigma << std::endl; 
+  
+  delete[] sig;
+  delete[] H;
+  delete[] HA;
+  delete[] HB;
+  
+  deletepdftable();
+  
+  //  std::cout << "dsigma " << dsigma << std::endl;
+
+  m_conv_param.dsigma = dsigma;
+
+  // NB!!! the return value dsigma must be scaled by Escale*Escale which 
+  // is done in grid::vconvolute. It would be better here, but is reduces 
+  // the number of operations if done in grid. 
+  return; 
+}
+
+
+
+
+
+
+bool appl::igrid::shrink( const std::vector<int>& keep ) {
+ 
+  /// save the old grids
+  int          Nproc = m_Nproc;
+  SparseMatrix3d** w = m_weight;
+
+  /// resize
+  m_Nproc  = keep.size();
+  m_weight = new SparseMatrix3d*[m_Nproc];
+
+  /// copy across the grids we want to save
+  for ( unsigned ip=0 ; ip<keep.size() ; ip++ ) {
+ 
+    //    std::cout << "\tcopy " << keep[ip] << " -> " << ip << std::endl; 
+
+    m_weight[ip] = w[keep[ip]]; /// move across the processes we want to keep
+    w[keep[ip]] = 0; /// now flag as 0 
+  }
+
+  for ( int ip=0 ; ip<Nproc ; ip++ ) if ( w[ip]!=0 ) delete w[ip];
+
+  delete[] w; 
+  
+  return true;
+}
+
+
+
+
+bool appl::igrid::combine_proc( int i, int j ) { 
+  (*m_weight[i]) += (*m_weight[j]);  
+  return true;
+}
+
+
+/// rebuild the weight grids, but eliminating the 
+/// the reqested subprocess
+
+bool appl::igrid::remove( int i ) {
+
+  if ( i>=m_Nproc ) return false; 
+
+  SparseMatrix3d** mw0 = m_weight;
+  SparseMatrix3d** mw  = new SparseMatrix3d*[m_Nproc-1];
+
+  int ir=0;
+  int ip0=0;
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) { 
+    if ( ip==i ) { 
+      delete m_weight[ip];
+      ir++;
+    }
+    else { 
+      mw[ip0++] = m_weight[ip];
+    }
+  }
+
+  m_Nproc -= ir; 
+  
+  m_weight = mw;
+
+  delete mw0;
+
+  return true;
+}
+
+
+
+
+
+// FIXME: this has lots of legacy code and stuff commented which
+// is no longer needed or even correct, so it should be tidied.
+// the point of the algorithm here is to find the extent of the filled
+// bines + 1 on either side and create a new grid with these limits
+   
+void appl::igrid::optimise(int NQ2, int Nx1, int Nx2, int extrabins) {     
+
+  //  std::cout << "\tsize(untrimmed)=" << m_weight[0]->size();
+
+  //  std::cout << "ymin=" << gety(0) << "\tymax=" << gety(m_Ny-1) 
+  //       << "\txmin=" << fx(gety(m_Ny-1)) << "\txmax=" << fx(gety(0)) << std::endl;
+
+  trim();
+
+  //  std::cout << "\tsize(trimmed)=" << m_weight[0]->size() << std::endl;
+
+#if 1
+
+  // overall igrid optimisation limits
+
+  int _y1setmin = Ny1();
+  int _y1setmax = -1;
+  
+  int _y2setmin = Ny2();
+  int _y2setmax = -1;
+  
+  int _tausetmin = Ntau(); 
+  int _tausetmax = -1; 
+  
+  //  double oldy1min = m_y1min;
+  //  double oldy1max = m_y1max;
+  
+  //  double oldy2min = m_y2min;
+  //  double oldy2max = m_y2max;
+
+  // go through all the subprocess to get the limits
+  // FIXME: this is actually redundant, at the moment, it assumes all the subprocesses 
+  //        occupy the same phase space, so only the first need be tested, and if
+  //        they don;t all occupy the same phase space, then they should each be
+  //        optimised seperately
+
+  // find limits for this grid
+  // initialise to original grid limits
+  int y1setmin = _y1setmin;
+  int y1setmax = _y1setmax;
+  
+  int y2setmin = _y2setmin;
+  int y2setmax = _y2setmax;
+  
+  int tausetmin = _tausetmin; 
+  int tausetmax = _tausetmax; 
+  
+  bool firstdebug = true;
+  bool firstempty = true;
+
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) { 
+    
+    if ( firstdebug ) std::cout << "optimise() proc " << ip 
+				<< "\tfilled Q2 weights " << m_weight[ip]->xmax() << " - " << m_weight[ip]->xmin()
+				<< "\tx1 " << m_weight[ip]->ymax() << " - " << m_weight[ip]->ymin()
+				<< "\tx2 " << m_weight[ip]->zmax() << " - " << m_weight[ip]->zmin();
+
+    // is it empty?
+    if ( m_weight[ip]->xmax()-m_weight[ip]->xmin()+1 == 0 ) { 
+      if ( firstdebug && firstempty ) std::cout << "\tempty" << std::endl;
+      firstempty = false;
+      firstdebug = false;
+      continue;
+    }
+    
+
+    firstdebug = false;
+
+    //    m_weight[ip]->print();
+
+    //    m_weight[ip]->yaxis().print(fx);
+
+    //    std::cout << "initial ip=" << ip 
+    //	       << "\tm_y2min=" << m_y2min << "\tm_y2max=" << m_y2max
+    //         << "\tm_y1min=" << m_y1min << "\tm_y1max=" << m_y1max  
+    //         << std::endl;  
+
+    //    m_weight[ip]->print();
+
+    // find actual limits
+
+    // y1 optimisation
+    int ymin1 = m_weight[ip]->ymin();
+    int ymax1 = m_weight[ip]->ymax();
+    
+    if ( ymin1<y1setmin )                 y1setmin = ymin1;
+    if ( ymin1<=ymax1 && y1setmax<ymax1 ) y1setmax = ymax1; 
+
+    //    std::cout << "ip=" << ip << std::endl; // << "\tymin1=" << ymin1 << "\tymax1=" << ymax1 << std::endl;
+    
+    // y2 optimisation
+    int ymin2 = m_weight[ip]->zmin();    
+    int ymax2 = m_weight[ip]->zmax();
+
+    if ( ymin2<y2setmin )                 y2setmin = ymin2;
+    if ( ymin2<=ymax2 && y2setmax<ymax2 ) y2setmax = ymax2; 
+    
+    // tau optimisation
+    int taumin = m_weight[ip]->xmin();    
+    int taumax = m_weight[ip]->xmax();
+
+    if ( taumin<tausetmin )                   tausetmin = taumin;
+    if ( taumin<=taumax && taumax>tausetmax ) tausetmax = taumax;
+
+  }
+  
+  // if grid is empty, do "nothing" ie create the grid with the same
+  // limits as before but with the new required number of bins 
+  if (  y1setmax==-1 || y2setmax==-1 || tausetmax==-1 ) { 
+    m_Ny1  = Nx1;
+    m_Ny2  = Nx2;
+    m_Ntau = NQ2;
+    m_deltay1  = (m_y1max-m_y1min)/(m_Ny1-1);
+    m_deltay2  = (m_y2max-m_y2min)/(m_Ny2-1);
+    m_deltatau = (m_taumax-m_taumin)/(m_Ntau-1);
+  }
+  else { 
+ 
+
+    
+    // y1 optimisation
+    //    double oldy1min = m_y1min;
+    //    double oldy1max = m_y1max;
+   
+
+    if ( isOptimised() ) { 
+      // add a bit on each side
+      y1setmin -= extrabins;
+      y1setmax += extrabins;
+    }
+    else { 
+      // not optimised yet so filled with phase space only so add 
+      // the order to the max filled grid element position also 
+      y1setmin -= extrabins;
+      y1setmax += m_yorder+extrabins;
+    }
+    
+    if ( y1setmin<0 )       y1setmin = 0;
+    if ( y1setmax>m_Ny1-1 ) y1setmax = m_Ny1-1;
+    
+    
+    double _min = gety1(y1setmin); 
+    double _max = gety1(y1setmax); 
+    
+    m_Ny1     = Nx1;
+    m_y1min   = _min;
+    m_y1max   = _max;
+    m_deltay1 = (m_y1max-m_y1min)/(m_Ny1-1);
+    
+    
+    
+    // y2 optimisation
+    //   double oldy2min = m_y2min;
+    //   double oldy2max = m_y2max;
+    
+    if ( isOptimised() ) { 
+      // add a bit on each side
+      y2setmin -= extrabins;
+      y2setmax += extrabins;
+    }
+    else { 
+      // not optimised yet so add the order to the  
+      // max filled grid element position also 
+      y2setmin -= extrabins;
+      y2setmax += m_yorder+extrabins;
+    }
+    
+    if ( y2setmin<0 )      y2setmin = 0;
+    if ( y2setmax>=m_Ny2 ) y2setmax = m_Ny2-1; 
+
+
+    //  m_Ny2 =  y2setmax-y2setmin+1;
+    _min = gety2(y2setmin); 
+    _max = gety2(y2setmax); 
+    
+    m_Ny2     = Nx2;
+    m_y2min   = _min;
+    m_y2max   = _max;
+    m_deltay2 = (m_y2max-m_y2min)/(m_Ny2-1);
+    
+    std::cout << "\t-> " 
+	      << y1setmin  << " - " << y1setmax << " : " 
+	      << y2setmin  << " - " << y2setmax 
+	      << std::endl;
+    
+    // tau optimisation
+    //   double oldtaumin = m_taumin;
+    //   double oldtaumax = m_taumax;
+    
+    // add a bit on each side
+    if ( isOptimised() ) { 
+      // add a bit on each side
+      tausetmin--;
+      tausetmax++;
+    }
+    else { 
+      // not optimised yet so add the order to the  
+      // max filled grid element position also 
+      tausetmax += m_tauorder+1;
+      tausetmin -= 1;
+    }  
+
+    if ( tausetmin<0 )         tausetmin = 0;
+    if ( tausetmax>=m_Ntau-1 ) tausetmax = m_Ntau-1;
+
+    
+    _min   = gettau(tausetmin); 
+    _max   = gettau(tausetmax);
+    
+    m_Ntau     = NQ2;
+    m_taumin   = _min;
+    m_taumax   = _max;
+    m_deltatau = (m_taumax-m_taumin)/(m_Ntau-1);
+    
+    //    std::cout << "done ip=" << ip << std::endl; 
+    
+  }
+  
+#endif
+
+  // now create the new subprocess grids with optimised limits
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) { 
+    delete m_weight[ip];
+    m_weight[ip] = new SparseMatrix3d(m_Ntau, m_taumin,   m_taumax,    
+				      m_Ny1,   m_y1min,   m_y1max, 
+				      m_Ny2,   m_y2min,   m_y2max ); 
+  }   
+  
+  m_optimised = true;
+}
+
+
+
+
+
+// numerical operators
+appl::igrid& appl::igrid::operator=(const appl::igrid& g) {
+ 
+  m_Ny1     = g.m_Ny1;
+  m_y1min   = g.m_y1min;
+  m_y1max   = g.m_y1max;
+  m_deltay1 = g.m_deltay1;
+
+  m_Ny2     = g.m_Ny2;
+  m_y2min   = g.m_y2min;
+  m_y2max   = g.m_y2max;
+  m_deltay2 = g.m_deltay2;
+
+  m_yorder = g.m_yorder;
+   
+  m_Ntau     = g.m_Ntau;
+  m_taumin   = g.m_taumin;
+  m_taumax   = g.m_taumax;
+  m_deltatau = g.m_deltatau;
+
+  m_tauorder = g.m_tauorder;
+
+
+  m_fg1.clear();
+  m_fg2.clear();
+
+  //  construct();
+
+  //  m_weight = new SparseMatrix3d*[m_Nproc];
+
+  for( int ip=0 ; ip<m_Nproc ; ip++ ) { 
+    // delete old sparse matrix
+    delete m_weight[ip];
+    // create new
+    m_weight[ip] = new SparseMatrix3d(*g.m_weight[ip]);
+  }
+
+  return *this;
+}
+
+
+// should really check all the limits and *everything* is the same
+appl::igrid& appl::igrid::operator+=(const igrid& g) { 
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+    if ( m_weight[ip] && g.m_weight[ip] ) { 
+      /// this is a complicated conditional:
+      /// do nothing if the grid to be added is empty ...
+      if ( !g.m_weight[ip]->empty() ) { 
+	if (  m_weight[ip]->empty() ) { 
+	  delete m_weight[ip];
+	  m_weight[ip] = new SparseMatrix3d( *g.m_weight[ip] );
+	}
+	else if ( m_weight[ip]->compare_axes( *g.m_weight[ip] ) ) (*m_weight[ip]) += (*g.m_weight[ip]); 
+	else { 
+	  throw exception("igrid::operator+=() grids do not match");
+	}
+      }
+    }
+  }
+  return *this;
+} 
+
+
+
+void appl::igrid::add( igrid* g ) { 
+
+    int nx1 = g->Ny1();
+    int nx2 = g->Ny2();
+    int nQ2 = g->Ntau();
+
+    /// map nx, ny and ny to x1, x2 and Q2, then just 
+    /// call fill for each !!!
+
+    igrid* ig[2] = { this, g };
+
+    std::cout << "appl::grid::add() combining bins: " << std::endl; 
+
+    for ( int i=0 ; i<2 ; i++ ) std::cout << i << "\t" << *ig[i] << std::endl;
+
+    //    for ( int i=0 ; i<m_Nproc ; i++ ) g->m_weight[i]->untrim();
+
+    std::vector<SparseMatrix3d*> w(m_Nproc);
+
+    for ( int i=0 ; i<m_Nproc ; i++ ) w[i] = g->m_weight[i];
+ 
+    for ( int i1=0 ; i1<nx1 ; i1++ ) { 
+      
+      double y1 = g->gety1(i1);
+      double x1 = g->fx(y1);
+
+      for ( int i2=0 ; i2<nx2 ; i2++ ) { 
+
+	double y2 = g->gety2(i2);
+	double x2 = g->fx(y2);
+
+	for ( int it=0 ; it<nQ2 ; it++ ) { 
+	  //	  std::cout <<  i1 << " " << i2 << " " << it << " : " << (*m_weight[i])(i1,i2,it) << std::endl; 
+
+	  std::vector<double> weights(m_Nproc,0);
+	  
+	  double nuffin = 0;
+	  
+	  for ( int i=0 ; i<m_Nproc ; i++ ) nuffin += weights[i] = (*w[i])(it,i1,i2);
+
+	  if ( nuffin ) { 
+	    //	    std::cout << "weight: " << i1 << " " << i2 << " " << it << " : " << (*w[0])(it,i1,i2) << std::endl; 
+
+	    /// need fo fill this one and we're done !!!, but better 
+	    /// to loop over Nproc here to avoid doing the 
+	    /// interpolations many times ?
+
+	    /// now get x1, x2 and Q2 
+
+	    /// rememebr to use the correct x and Q2 transforms for the grid we 
+	    /// are remapping ...
+	    double tau = g->gettau(it);
+	    double Q2  = g->fQ2(tau);
+	    
+	    /// now fill this grid !!!
+	    fill( x1, x2, Q2, &weights[0] ); 
+
+	  } 
+	
+	} /// tau
+      } /// y2      
+    } /// y1
+        
+}
+
+#include <cstdlib>
+
+void nodes( const std::string& s, double a, double b, int n ) { 
+  double d = (b-a)/n;
+  std::printf( "%s\t", s.c_str() );
+  for ( int i=0 ; i<n+1 && i<6 ; i++ ) std::printf( "\t%8.6lf", a+i*d );
+  std::printf("\n");
+}
+
+
+
+void appl::igrid::remap( appl::igrid* g ) const {  
+
+  std::cout << "remap() in" << std::endl;
+ 
+  std::cout << "Ntau: " << Ntau() << "\tNx: " << Ny1() << " " << Ny2() << "\tNproc: " << m_Nproc << std::endl;  
+
+  std::vector<double> sig(m_Nproc);  
+
+  for ( int itau=0 ; itau<Ntau() ; itau++  ) {
+
+    double Q2 = fQ2(gettau(itau));
+
+    for ( int iy1=Ny1() ; iy1-- ;  ) {                  
+      
+      double x1 = fx(gety1(iy1));
+
+      for ( int iy2=Ny2() ; iy2-- ;  ) { 
+	bool nonzero = false;
+	for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+	  if ( (sig[ip] = (*(const SparseMatrix3d*)m_weight[ip])(itau,iy1,iy2)) ) nonzero = true;
+	}
+	if ( nonzero ) { 
+	  
+	  double x2 = fx(gety2(iy2));
+
+	  //	  std::cout << "Q2: " << Q2 << "\tx: " << x1 << "\t" << x2 << std::endl;
+
+	  if ( !isDISgrid() ) g->fill( x1, x2, Q2, &sig[0] );
+	  else                g->fill_DIS( getx1(iy1), getQ2(itau), &sig[0] );
+	}
+      }
+    }
+  }
+
+  std::cout << "remap() out" << std::endl;
+
+}
+
+
+
+
+void appl::igrid::merge( igrid* g ) { 
+
+
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) std::cout << "grids: " << m_weight[ip] << std::endl;
+
+  /// map nx, ny and ny to x1, x2 and Q2, then just 
+    /// call fill for each !!!
+    
+    igrid* ig[2] = { this, g };
+
+    igrid* g0 = this;
+    igrid* g1 = g;
+
+    std::cout << *g0 << std::endl;
+    std::cout << *g1 << std::endl;
+
+
+    std::cout << "appl::grid::add() combining bins: " << std::endl; 
+
+
+    int nx1[2];
+    int nx2[2];
+    int nQ2[2];
+    
+    double y1[2];
+    double y1m[2];
+    double dx1[2]; 
+    
+    double y2[2]; 
+    double y2m[2];
+    double dx2[2]; 
+    
+    double tau[2]; 
+    double taum[2];
+    double dt[2];  
+    
+    for ( int i=0 ; i<2 ; i++ ) { 
+
+      //      std::cout << i << "\t" << *ig[i] << std::endl;
+
+      igrid* g = ig[i];
+
+      nx1[i] = g->Ny1();
+      nx2[i] = g->Ny2();
+      nQ2[i] = g->Ntau();
+
+      y1[i]  = g->gety1(0);
+      y1m[i] = g->gety1(nx1[i]-1);
+      dx1[i]  = (y1m[i]-y1[i])/nx1[i];
+
+      y2[i]  = g->gety2(0);
+      y2m[i] = g->gety2(nx2[i]-1);
+      dx2[i]  = (y2m[i]-y2[i])/nx2[i];
+
+      tau[i]  = g->gettau(0);
+      taum[i] = g->gettau(nQ2[i]-1);
+      dt[i]   = (taum[i]-tau[i])/nQ2[i];
+
+      std::cout << "grid:" << i;
+      std::cout << "\t" << nx1[i] << "\tx1: " << "\t" << y1[i] << "\t " << y1m[i];
+      std::cout << "\t" << nx2[i] << "\tx2: " << "\t" << y2[i] << "\t " << y2m[i]; 
+      std::cout << "\t" << nQ2[i] << "\tQ2: " << "\t" << tau[i] << "\t " << taum[i];
+      std::cout << std::endl; 
+ 	
+      nodes( "y1:  ", y1[i], y1m[i], nx1[i] );
+      // nodes( "y2:  ", y2[i], y2m[i], nx2[i] );
+      // nodes( "tau: ", tau[i], taum[i], nQ2[i] );
+
+    }
+    
+    bool rebuild1 = false; 
+    bool rebuild2 = false; 
+    bool rebuildt = false; 
+
+    if (  y1[1]<y1[0]  ) rebuild1 = true;
+    if ( y1m[1]>y1m[0] )  rebuild1 = true;
+
+    if (  y2[1]<y2[0]  )  rebuild2 = true;
+    if ( y2m[1]>y2m[0] )  rebuild2 = true;
+
+    if (  tau[1]<tau[0]  )  rebuildt = true;
+    if ( taum[1]>taum[0] )  rebuildt = true;
+    
+    bool rebuild = rebuild1 || rebuild2 || rebuildt;
+
+    /// gt limits, now create the new igrid
+
+    std::cout << "rebuild: " << rebuild << std::endl;
+
+    m_yorder = 5;
+    m_tauorder = 5;
+
+    if ( rebuild ) { 
+
+      m_y1min = std::min(  y1[0],  y1[1] );
+      m_y1max = std::max( y1m[0], y1m[1] );
+
+      double d1 = std::min( dx1[0], dx1[1] ); 
+
+      m_y2min = std::min(  y2[0],  y2[1] );
+      m_y2max = std::max( y2m[0], y2m[1] );
+
+      double d2 = std::min( dx2[0], dx2[1] ); 
+      
+      m_taumin = std::min(  tau[0],  tau[1] );
+      m_taumax = std::max( taum[0], taum[1] );
+
+      double ndt = std::min( dt[0], dt[1] ); 
+
+      std::cout << "d: " << ndt    << "\t" << d1    << "\t" << d2    << std::endl;
+      std::cout << "N: " << m_Ntau << "\t" << m_Ny1 << "\t" << m_Ny2 << std::endl;
+
+      if ( rebuildt ) m_Ntau = int((m_taumax-m_taumin)/ndt + 1);
+      if ( rebuild1 ) m_Ny1  = int((m_y1max-m_y1min)/d1 + 1);
+      if ( rebuild2 ) m_Ny2  = int((m_y2max-m_y2min)/d2 + 1);
+
+
+      nodes( "y1 ", m_y1min, m_y1max, m_Ny1 ); 
+      // nodes( "y2 ", m_y2min, m_y2max, m_Ny2 ); 
+      // nodes( "tau", m_taumin, m_taumax, m_Ntau ); 
+
+      std::cout << "N: " << m_Ntau << "\t" << m_Ny1 << "\t" << m_Ny2 << std::endl;
+      
+      std::cout << "xorder: " << m_yorder << "\tQorder: " << m_tauorder << std::endl;
+
+      std::cout << "copy grid" << std::endl;
+
+      igrid* gsave = new igrid( *g0 );
+
+      std::cout << "new grids" << std::endl;
+      
+      // now create the new subprocess grids with optimised limits
+      for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+
+	std::cout << "out with the old ... " << m_weight[ip] << std::endl;          
+	
+	delete m_weight[ip];
+	
+	std::cout << "and in with the new" << std::endl;          
+	
+	m_weight[ip] = new SparseMatrix3d( m_Ntau, m_taumin,   m_taumax,    
+					   m_Ny1,   m_y1min,   m_y1max, 
+					   m_Ny2,   m_y2min,   m_y2max ); 
+      }   
+      
+      std::cout << "add saved" << std::endl;
+      add( gsave );
+
+      delete gsave;
+
+    }
+    
+    std::cout << "add next" << std::endl;
+    //    add( g1 );
+    
+    
+
+#if 0
+
+
+    //    for ( int i=0 ; i<m_Nproc ; i++ ) g->m_weight[i]->untrim();
+
+    std::vector<SparseMatrix3d*> w(m_Nproc);
+
+    for ( int i=0 ; i<m_Nproc ; i++ ) w[i] = g->m_weight[i];
+ 
+    for ( int i1=0 ; i1<nx1 ; i1++ ) { 
+      
+      double y1 = g->gety1(i1);
+      double x1 = g->fx(y1);
+
+      for ( int i2=0 ; i2<nx2 ; i2++ ) { 
+
+	double y2 = g->gety2(i2);
+	double x2 = g->fx(y2);
+
+	for ( int it=0 ; it<nQ2 ; it++ ) { 
+	  //	  std::cout <<  i1 << " " << i2 << " " << it << " : " << (*m_weight[i])(i1,i2,it) << std::endl; 
+
+	  std::vector<double> weights(m_Nproc,0);
+	  
+	  double nuffin = 0;
+	  
+	  for ( int i=0 ; i<m_Nproc ; i++ ) nuffin += weights[i] = (*w[i])(it,i1,i2);
+
+	  if ( nuffin ) { 
+	    //	    std::cout << "weight: " << i1 << " " << i2 << " " << it << " : " << (*w[0])(it,i1,i2) << std::endl; 
+
+	    /// need fo fill this one and we're done !!!, but better 
+	    /// to loop over Nproc here to avoid doing the 
+	    /// interpolations many times ?
+
+	    /// now get x1, x2 and Q2 
+
+	    /// rememebr to use the correct x and Q2 transforms for the grid we 
+	    /// are remapping ...
+	    double tau = g->gettau(it);
+	    double Q2  = g->fQ2(tau);
+	    
+	    /// now fill this grid !!!
+	    fill( x1, x2, Q2, &weights[0] ); 
+
+	  } 
+	
+	} /// tau
+      } /// y2      
+    } /// y1
+
+#endif
+        
+}
+
+
+
+
+
+
+// debug print
+std::ostream&  appl::igrid::debug(std::ostream& s) const {
+    header(std::cout);
+    for ( int i=0 ; i<m_Nproc ; i++ ) { 
+      s << "sub process " << i << std::endl;
+      s << "Nx1: " << Ny1() << "\tNx2: " << Ny2() << "\tNs: " << Ntau() << std::endl;        
+
+      double xmin = m_weight[i]->xaxis().min();
+      double xmax = m_weight[i]->xaxis().max();
+
+      double ymin = m_weight[i]->yaxis().min();
+      double ymax = m_weight[i]->yaxis().max();
+
+      double zmin = m_weight[i]->zaxis().min();
+      double zmax = m_weight[i]->zaxis().max();
+
+      std::cout << "ranges: Q2  : " << fQ2(xmin) << " " << fQ2(xmax) << "   : " << xmin << " " << xmax << std::endl;
+      std::cout << "ranges: x1  : " <<  fx(ymin) << " " <<  fx(ymax) << "   : " << ymin << " " << ymax << std::endl;
+      std::cout << "ranges: x2  : " <<  fx(zmin) << " " <<  fx(zmax) << "   : " << zmin << " " << zmax << std::endl;
+
+    }
+    return s;
+}
+  
+
+std::ostream& appl::igrid::header(std::ostream& s) const { 
+  //  s << "interpolation orders: x=" << g.yorder() << "\tQ2=" << g.tauorder() << std::endl;  
+  s << "\t x:  [ "  << std::setw(2) 
+    //    << Ny1() << " ;\t" << std::setw(7) << fx(y1max())  << " - " << std::setw(7) << std::setprecision(6) << fx(y1min()) << "\t : " 
+    //    << Ny2() << " ;\t" << std::setw(7) << fx(y2max())  << " - " << std::setw(7) << std::setprecision(6) << fx(y2min()) 
+    << Ny1() << " :\t " << std::setw(7) << std::setprecision(6) << fx(y1max())  << " - " << std::setw(7) << std::setprecision(6) << fx(y1min()) << "\t : " 
+    << Ny2() << " :\t " << std::setw(7) << std::setprecision(6) << fx(y2max())  << " - " << std::setw(7) << std::setprecision(6) << fx(y2min()) 
+    << "\t : " << "\t( order=" << yorder()   << " ) ]"; 
+  s << "\t Q2: [ " 
+    << Ntau() << " :\t "  << std::setw(7) << std::setprecision(6) << fQ2(taumin()) << " - " << std::setw(7) << std::setprecision(6) << fQ2(taumax()) 
+    << "\t( order=" << tauorder() << "  - reweight " << ( m_reweight ? "on " : "off" ) << ") ]";
+  return s;
+}
+
+
+std::ostream& operator<<(std::ostream& s, const appl::igrid& g) {
+  g.header(s);
+  //  g.print(s);
+  return s;
+}
+
+
+
+
+void appl::igrid::run_thread() {
+ 
+  lock_proc();
+  mprocessing = false;
+  unlock_proc();
+
+
+  while( true ) { 
+
+    //    std::cout << "thread " << this << " " << mname << " waiting ... " << std::endl; 
+
+    /// supend immediately
+    suspend();
+
+    if ( mterminate ) break;
+
+    //    std::cout << "thread " << this << " " << mname << " running ... " << std::endl; 
+
+    //    struct timeval mytimer = appl_timer_start();
+
+    /// put the actual convoluting steps in here  
+    /// once round the colbolution step, it puts itself to 
+    /// sleep again
+    
+    if ( parent()->calculation()==grid::AMCATNLO )  amc_convolute_internal();
+    else                                            convolute_internal();
+
+    //    std::printf("thread done: param: %lf internal\n", m_conv_param.dsigma );
+ 
+    /// after starting the processing step, the calling thread must 
+    /// wait for all the threads to finish
+
+    //    double mytime = appl_timer_stop(mytimer);
+
+    //    std::printf("thread done: param: %lf internal time %lf ms\n", m_conv_param.dsigma, mytime );
+
+    /// signal computation has finished
+
+    //    std::cout << "thread " << this << " " << mname << " done [" << mytime << " ms" << "]" << std::endl; 
+  }
+
+  suspend(false);
+  
+}
+
+
+
+
+void appl::igrid::clear() { 
+  for ( int ip=0 ; ip<m_Nproc ; ip++ ) {
+    if ( m_weight[ip]->empty() ) continue;
+    m_weight[ip]->clear();
+  }
+}
Index: applgrid/tags/applgrid-01-06-36/src/TFileVector.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/TFileVector.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/TFileVector.h	(revision 2010)
@@ -0,0 +1,82 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    TFileVector.h
+ **
+ **     @brief   root TObject std::string std::vector class for writing std::string std::vectors
+ **              to root files               
+ **
+ **     @author  mark sutton
+ **     @date    Sat Mar 15 19:49:16 GMT 2008 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: TFileVector.h, v0.0   Sat Mar 15 19:49:16 GMT 2008 sutt $
+ **
+ **/
+
+
+#ifndef  TFILESTRING_H
+#define  TFILESTRING_H
+
+#include "appl_grid/appl_root.h"
+
+#ifdef USEROOT
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include "TObjString.h"
+#include "TObject.h"
+
+
+// template<class T>
+class TFileVector : public TObjString { 
+
+public:
+  
+  TFileVector(const std::string& name="") : TObjString(name.c_str()) { } 
+
+  std::vector<std::vector<double> >&       histos() { return mv; }
+  const std::vector<std::vector<double> >& histos() const { return mv; }
+
+  // get the name
+  std::string  name() const { return GetName(); } 
+
+  // get a value 
+  std::vector<double>&       operator[](int i) { return mv[i]; }
+  const std::vector<double>& operator[](int i) const { return mv[i]; }
+  
+  // get the size
+  unsigned size()  const { return mv.size(); } 
+
+  // add an element
+  void add(const std::vector<double>& s) { 
+    mv.push_back(s); 
+  }
+
+private:
+  
+  std::vector<std::vector<double> > mv;
+
+  ClassDef(TFileVector, 1)
+
+}; 
+
+
+std::ostream& operator<<(std::ostream& s, const TFileVector& fv);
+
+
+#endif
+
+#endif  //  TFILEVECTOR_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/fappl_fitter.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/fappl_fitter.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/fappl_fitter.cxx	(revision 2010)
@@ -0,0 +1,152 @@
+/**
+ **     @file    fappl_fitter.cxx
+ **
+ **     @author  mark sutton
+ **     @date    Fri 25 Oct 2013 00:49:13 CEST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: fappl_fitter.cxx, v0.0   Fri 25 Oct 2013 00:49:13 CEST sutt $
+ **
+ **/
+
+
+
+#include "fappl_grid.h"
+
+
+// /// externally defined alpha_s and pdf routines for fortran 
+// /// callable convolution wrapper
+// extern "C" double appl_fnalphas_(const double& Q);
+// extern "C" void   appl_fnpdf_(const double& x, const double& Q, double* xf);
+
+// extern "C" double fnalphas_(const double& Q) { return appl_fnalphas_( Q); }
+// extern "C" void   fnpdf_(const double& x, const double& Q, double* xf) { return appl_fnpdf_( x, Q, xf); } 
+
+
+/// create a grid
+// extern "C" void appl_bookgrid_(int& id, const int& Nobs, const double* binlims) { bookgrid_( id, Nobs, binlims); } 
+
+/// delete a grid
+extern "C" void appl_releasegrid_(int& id) { return releasegrid_( id ); }
+
+/// delete all grids
+extern "C" void appl_releasegrids_()       { return releasegrids_(); } 
+
+/// read a grid from a file
+extern "C" void appl_readgrid_(int& id, const char* s, int _len ) { return readgrid_( id,  s, _len ); } 
+
+/// write to a file 
+extern "C" void appl_writegrid_(int& id, const char* s, int _len) { return writegrid_( id, s, _len ); }  
+
+
+// /// add an entry 
+// extern "C" void appl_fillgrid_(int& id, 
+// 			       const int& ix1, const int& ix2, const int& iQ, 
+// 			       const int& iobs, 
+// 			       const double* w,
+// 			       const int& iorder ) { 
+//   return fillgrid_( id, ix1, ix2, iQ, iobs, w, iorder ); 
+// }
+
+// /// redefine the grid dimensions
+// extern "C" void appl_redefine_(int& id, 
+// 			       const int& iobs, const int& iorder, 
+// 			       const int& NQ2, const double& Q2min, const double& Q2max, 
+// 			       const int& Nx,  const double&  xmin, const double&  xmax) { 
+
+//   return redefine_( id, iobs, iorder, NQ2, Q2min, Q2max, Nx, xmin, xmax ); 
+// }
+
+
+/// get the ckm matrix for a grid
+extern "C" void appl_getckm_( int& id, double* ckm ) { return getckm_( id, ckm ); } 
+
+/// get the ckm matrix for a grid
+extern "C" void appl_setckm_( int& id, const double* ckm ) { return setckm_( id, ckm ); } 
+
+/// get number of observable bins for a grid 
+extern "C" int appl_getnbins_(int& id) { return getnbins_(id); }
+
+/// fins a bin number based on an ordinate
+extern "C" int appl_getbinnumber_(int& id, double& x) { return getbinnumber_(id,x); } 
+
+/// get low edge for a grid/bin number
+extern "C" double appl_getbinlowedge_(int& id, int& bin) { return getbinlowedge_(id,bin); }
+
+/// get bin width for a grid/bin number
+extern "C" double appl_getbinwidth_(int& id, int& bin) { return getbinwidth_(id,bin); }
+
+
+/// do the convolution!! hooray!!
+
+extern "C" void appl_convolute_(int& id, double* data) { return convolute_( id, data ); }
+
+
+extern "C" void appl_convoluteorder_(int& id, int& nloops, double* data) { return convoluteorder_( id, nloops, data); }
+
+
+extern "C" void appl_convolutewrap_(int& id, double* data, 
+			       void (*pdf)(const double& , const double&, double* ),
+			       double (*alphas)(const double& ) ) {
+  return  convolutewrap_( id,  data, pdf, alphas ); 
+}
+
+
+
+
+
+extern "C" void appl_fullconvolutewrap_(int& id, double* data, 
+					void (*pdf)(const double& , const double&, double* ),
+					double (*alphas)(const double& ),
+					int& nloops,
+					double& rscale, double& fscale  ) {
+  return  fullconvolutewrap_( id,  data, pdf, alphas, nloops, rscale, fscale ); 
+}
+
+
+
+extern "C" void appl_escaleconvolute_(int& id, double* data, double& Escale) { 
+  return escaleconvolute_( id, data, Escale); 
+}
+
+
+
+extern "C" void appl_escaleconvolutewrap_(int& id, double* data, 
+					  void (*pdf)(const double& , const double&, double* ),
+					  double (*alphas)(const double& ), double& Escale ) { 
+  return escaleconvolutewrap_( id, data, pdf, alphas, Escale );
+}
+
+
+
+extern "C" void appl_escalefullconvolutewrap_(int& id, double* data, 
+					      void (*pdf)(const double& , const double&, double* ),
+					      double (*alphas)(const double& ),
+					      int& nloops,
+					      double& rscale, double& fscale,
+					      double& Escale ) { 
+  return escalefullconvolutewrap_( id, data, pdf, alphas, nloops, rscale, fscale, Escale );
+}
+
+
+
+/// print a grid
+extern "C" void appl_printgrid_(int& id) { return printgrid_( id ); }
+
+/// print all grids
+extern "C" void appl_printgrids_() { return printgrids_(); }
+
+/// print the grid documentation
+extern "C" void appl_printgriddoc_(int& id) { return printgriddoc_(id); } 
+
+/// create grids from fastnlo
+extern "C" void appl_readfastnlogrids_(  int* ids, const char* s, int _len ) { return readfastnlogrids_( ids, s, _len ); }
+
+
+/// grid std::map management
+
+extern "C" void appl_ngrids_(int& n) { return ngrids_( n ); }
+
+extern "C" void appl_gridids_(int* ids) { return gridids_( ids ); } 
+
Index: applgrid/tags/applgrid-01-06-36/src/equals.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/equals.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/equals.h	(revision 2010)
@@ -0,0 +1,37 @@
+/* emacs: this is -*- c++ -*- */
+/**
+ **   @file    equals.h        
+ **                   
+ **   @author  sutt
+ **   @date    Wed 30 Dec 2020 11:26:54 GMT
+ **
+ **   $Id: equals.h, v0.0   Wed 30 Dec 2020 11:26:54 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  EQUALS_H
+#define  EQUALS_H
+
+
+inline bool equals( double a, double b, double limit=1 ) { 
+  if ( a==b ) return true; 
+  if ( std::fabs(a-b)<(std::fabs(std::min(a,b))*std::numeric_limits<double>::epsilon()*limit ) ) return true; 
+  if ( ( a==0 || b==0 ) && ( std::fabs(a-b)<std::numeric_limits<double>::epsilon()*limit ) ) return true;
+  return false;
+}
+
+
+#endif  // EQUALS_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/src/convert.cxx
===================================================================
--- applgrid/tags/applgrid-01-06-36/src/convert.cxx	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/src/convert.cxx	(revision 2010)
@@ -0,0 +1,66 @@
+
+
+#include "appl_grid/appl_grid.h"
+#include "amconfig.h"
+
+int Usage( std::ostream& s=std::cout, int i=0 ) { 
+  s << "Usage: applgrid-convert [options] <grid0> [grid1] [grid2] ...\n";
+  s << "\noption:\n";
+  s << "   -p | --pdf   value    \t record specifed pdf to the grid\n"; 
+  s << "   -i | --ipdf  value    \t record specifed ipdf from set to the grid\n\n"; 
+  s << "   -h | --help           \t this help\n"; 
+  s << "\nSee " << PACKAGE_URL << " for more details\n"; 
+  s << "\nReport bugs to <" << PACKAGE_BUGREPORT << ">";
+  s << std::endl;
+  return i;
+}
+
+
+int main( int argc, char** argv ) { 
+
+  std::vector<std::string> grids;
+
+  std::string  pdf = "";
+  int         ipdf = 0;
+
+  for ( int i=1 ; i<argc ; i++ ) {
+    std::string arg = argv[i];
+    
+    if      ( arg.find("-")!=0 ) grids.push_back( arg );
+    else if ( arg=="-h" || arg=="--help" ) return Usage();
+    else if ( arg=="-p" || arg=="--pdf" ) { 
+      if ( ++i<argc ) pdf = argv[i];
+      else return Usage( std::cerr, -1);
+    }
+    else if ( arg=="-i" || arg=="--ipdf" ) { 
+      if ( ++i<argc ) ipdf = std::atoi(argv[i]);
+      else return Usage( std::cerr, -1);
+    }
+   
+  }
+  
+  for ( size_t i=0 ; i<grids.size() ; i++ ) {
+	
+    std::string applname = grids[i];
+    std::string newname  = grids[i];
+    
+    size_t pos = applname.find(".root");
+    
+    if ( pos==(applname.size()-5) ) newname.replace( pos, 5, ".appl" );
+    else { 
+      size_t pos = applname.find(".appl");
+      if ( pos==(applname.size()-5) ) newname.replace( pos, 5, ".root" );
+    }
+
+    appl::grid g( applname );
+    if ( pdf!="" ) { 
+      g.setGeneratedPDF( pdf );
+      g.setGeneratediPDF( ipdf );
+    }
+
+    std::cout << "converting: " << argv[i] << " -> " << newname << std::endl; 
+    g.Write( newname );
+  }    
+  
+  return 0;
+} 
Index: applgrid/tags/applgrid-01-06-36/depcomp
===================================================================
--- applgrid/tags/applgrid-01-06-36/depcomp	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/depcomp	(revision 2010)
@@ -0,0 +1,791 @@
+#! /bin/sh
+# depcomp - compile a program generating dependencies as side-effects
+
+scriptversion=2013-05-30.07; # UTC
+
+# Copyright (C) 1999-2014 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Alexandre Oliva <oliva@dcc.unicamp.br>.
+
+case $1 in
+  '')
+    echo "$0: No command.  Try '$0 --help' for more information." 1>&2
+    exit 1;
+    ;;
+  -h | --h*)
+    cat <<\EOF
+Usage: depcomp [--help] [--version] PROGRAM [ARGS]
+
+Run PROGRAMS ARGS to compile a file, generating dependencies
+as side-effects.
+
+Environment variables:
+  depmode     Dependency tracking mode.
+  source      Source file read by 'PROGRAMS ARGS'.
+  object      Object file output by 'PROGRAMS ARGS'.
+  DEPDIR      directory where to store dependencies.
+  depfile     Dependency file to output.
+  tmpdepfile  Temporary file to use when outputting dependencies.
+  libtool     Whether libtool is used (yes/no).
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+    exit $?
+    ;;
+  -v | --v*)
+    echo "depcomp $scriptversion"
+    exit $?
+    ;;
+esac
+
+# Get the directory component of the given path, and save it in the
+# global variables '$dir'.  Note that this directory component will
+# be either empty or ending with a '/' character.  This is deliberate.
+set_dir_from ()
+{
+  case $1 in
+    */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;;
+      *) dir=;;
+  esac
+}
+
+# Get the suffix-stripped basename of the given path, and save it the
+# global variable '$base'.
+set_base_from ()
+{
+  base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'`
+}
+
+# If no dependency file was actually created by the compiler invocation,
+# we still have to create a dummy depfile, to avoid errors with the
+# Makefile "include basename.Plo" scheme.
+make_dummy_depfile ()
+{
+  echo "#dummy" > "$depfile"
+}
+
+# Factor out some common post-processing of the generated depfile.
+# Requires the auxiliary global variable '$tmpdepfile' to be set.
+aix_post_process_depfile ()
+{
+  # If the compiler actually managed to produce a dependency file,
+  # post-process it.
+  if test -f "$tmpdepfile"; then
+    # Each line is of the form 'foo.o: dependency.h'.
+    # Do two passes, one to just change these to
+    #   $object: dependency.h
+    # and one to simply output
+    #   dependency.h:
+    # which is needed to avoid the deleted-header problem.
+    { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile"
+      sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile"
+    } > "$depfile"
+    rm -f "$tmpdepfile"
+  else
+    make_dummy_depfile
+  fi
+}
+
+# A tabulation character.
+tab='	'
+# A newline character.
+nl='
+'
+# Character ranges might be problematic outside the C locale.
+# These definitions help.
+upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ
+lower=abcdefghijklmnopqrstuvwxyz
+digits=0123456789
+alpha=${upper}${lower}
+
+if test -z "$depmode" || test -z "$source" || test -z "$object"; then
+  echo "depcomp: Variables source, object and depmode must be set" 1>&2
+  exit 1
+fi
+
+# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po.
+depfile=${depfile-`echo "$object" |
+  sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`}
+tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
+
+rm -f "$tmpdepfile"
+
+# Avoid interferences from the environment.
+gccflag= dashmflag=
+
+# Some modes work just like other modes, but use different flags.  We
+# parameterize here, but still list the modes in the big case below,
+# to make depend.m4 easier to write.  Note that we *cannot* use a case
+# here, because this file can only contain one case statement.
+if test "$depmode" = hp; then
+  # HP compiler uses -M and no extra arg.
+  gccflag=-M
+  depmode=gcc
+fi
+
+if test "$depmode" = dashXmstdout; then
+  # This is just like dashmstdout with a different argument.
+  dashmflag=-xM
+  depmode=dashmstdout
+fi
+
+cygpath_u="cygpath -u -f -"
+if test "$depmode" = msvcmsys; then
+  # This is just like msvisualcpp but w/o cygpath translation.
+  # Just convert the backslash-escaped backslashes to single forward
+  # slashes to satisfy depend.m4
+  cygpath_u='sed s,\\\\,/,g'
+  depmode=msvisualcpp
+fi
+
+if test "$depmode" = msvc7msys; then
+  # This is just like msvc7 but w/o cygpath translation.
+  # Just convert the backslash-escaped backslashes to single forward
+  # slashes to satisfy depend.m4
+  cygpath_u='sed s,\\\\,/,g'
+  depmode=msvc7
+fi
+
+if test "$depmode" = xlc; then
+  # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information.
+  gccflag=-qmakedep=gcc,-MF
+  depmode=gcc
+fi
+
+case "$depmode" in
+gcc3)
+## gcc 3 implements dependency tracking that does exactly what
+## we want.  Yay!  Note: for some reason libtool 1.4 doesn't like
+## it if -MD -MP comes after the -MF stuff.  Hmm.
+## Unfortunately, FreeBSD c89 acceptance of flags depends upon
+## the command line argument order; so add the flags where they
+## appear in depend2.am.  Note that the slowdown incurred here
+## affects only configure: in makefiles, %FASTDEP% shortcuts this.
+  for arg
+  do
+    case $arg in
+    -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;;
+    *)  set fnord "$@" "$arg" ;;
+    esac
+    shift # fnord
+    shift # $arg
+  done
+  "$@"
+  stat=$?
+  if test $stat -ne 0; then
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  mv "$tmpdepfile" "$depfile"
+  ;;
+
+gcc)
+## Note that this doesn't just cater to obsosete pre-3.x GCC compilers.
+## but also to in-use compilers like IMB xlc/xlC and the HP C compiler.
+## (see the conditional assignment to $gccflag above).
+## There are various ways to get dependency output from gcc.  Here's
+## why we pick this rather obscure method:
+## - Don't want to use -MD because we'd like the dependencies to end
+##   up in a subdir.  Having to rename by hand is ugly.
+##   (We might end up doing this anyway to support other compilers.)
+## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
+##   -MM, not -M (despite what the docs say).  Also, it might not be
+##   supported by the other compilers which use the 'gcc' depmode.
+## - Using -M directly means running the compiler twice (even worse
+##   than renaming).
+  if test -z "$gccflag"; then
+    gccflag=-MD,
+  fi
+  "$@" -Wp,"$gccflag$tmpdepfile"
+  stat=$?
+  if test $stat -ne 0; then
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  rm -f "$depfile"
+  echo "$object : \\" > "$depfile"
+  # The second -e expression handles DOS-style file names with drive
+  # letters.
+  sed -e 's/^[^:]*: / /' \
+      -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
+## This next piece of magic avoids the "deleted header file" problem.
+## The problem is that when a header file which appears in a .P file
+## is deleted, the dependency causes make to die (because there is
+## typically no way to rebuild the header).  We avoid this by adding
+## dummy dependencies for each header file.  Too bad gcc doesn't do
+## this for us directly.
+## Some versions of gcc put a space before the ':'.  On the theory
+## that the space means something, we add a space to the output as
+## well.  hp depmode also adds that space, but also prefixes the VPATH
+## to the object.  Take care to not repeat it in the output.
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly.  Breaking it into two sed invocations is a workaround.
+  tr ' ' "$nl" < "$tmpdepfile" \
+    | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \
+    | sed -e 's/$/ :/' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+hp)
+  # This case exists only to let depend.m4 do its work.  It works by
+  # looking at the text of this script.  This case will never be run,
+  # since it is checked for above.
+  exit 1
+  ;;
+
+sgi)
+  if test "$libtool" = yes; then
+    "$@" "-Wp,-MDupdate,$tmpdepfile"
+  else
+    "$@" -MDupdate "$tmpdepfile"
+  fi
+  stat=$?
+  if test $stat -ne 0; then
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  rm -f "$depfile"
+
+  if test -f "$tmpdepfile"; then  # yes, the sourcefile depend on other files
+    echo "$object : \\" > "$depfile"
+    # Clip off the initial element (the dependent).  Don't try to be
+    # clever and replace this with sed code, as IRIX sed won't handle
+    # lines with more than a fixed number of characters (4096 in
+    # IRIX 6.2 sed, 8192 in IRIX 6.5).  We also remove comment lines;
+    # the IRIX cc adds comments like '#:fec' to the end of the
+    # dependency line.
+    tr ' ' "$nl" < "$tmpdepfile" \
+      | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \
+      | tr "$nl" ' ' >> "$depfile"
+    echo >> "$depfile"
+    # The second pass generates a dummy entry for each header file.
+    tr ' ' "$nl" < "$tmpdepfile" \
+      | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
+      >> "$depfile"
+  else
+    make_dummy_depfile
+  fi
+  rm -f "$tmpdepfile"
+  ;;
+
+xlc)
+  # This case exists only to let depend.m4 do its work.  It works by
+  # looking at the text of this script.  This case will never be run,
+  # since it is checked for above.
+  exit 1
+  ;;
+
+aix)
+  # The C for AIX Compiler uses -M and outputs the dependencies
+  # in a .u file.  In older versions, this file always lives in the
+  # current directory.  Also, the AIX compiler puts '$object:' at the
+  # start of each line; $object doesn't have directory information.
+  # Version 6 uses the directory in both cases.
+  set_dir_from "$object"
+  set_base_from "$object"
+  if test "$libtool" = yes; then
+    tmpdepfile1=$dir$base.u
+    tmpdepfile2=$base.u
+    tmpdepfile3=$dir.libs/$base.u
+    "$@" -Wc,-M
+  else
+    tmpdepfile1=$dir$base.u
+    tmpdepfile2=$dir$base.u
+    tmpdepfile3=$dir$base.u
+    "$@" -M
+  fi
+  stat=$?
+  if test $stat -ne 0; then
+    rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+    exit $stat
+  fi
+
+  for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+  do
+    test -f "$tmpdepfile" && break
+  done
+  aix_post_process_depfile
+  ;;
+
+tcc)
+  # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26
+  # FIXME: That version still under development at the moment of writing.
+  #        Make that this statement remains true also for stable, released
+  #        versions.
+  # It will wrap lines (doesn't matter whether long or short) with a
+  # trailing '\', as in:
+  #
+  #   foo.o : \
+  #    foo.c \
+  #    foo.h \
+  #
+  # It will put a trailing '\' even on the last line, and will use leading
+  # spaces rather than leading tabs (at least since its commit 0394caf7
+  # "Emit spaces for -MD").
+  "$@" -MD -MF "$tmpdepfile"
+  stat=$?
+  if test $stat -ne 0; then
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  rm -f "$depfile"
+  # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'.
+  # We have to change lines of the first kind to '$object: \'.
+  sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile"
+  # And for each line of the second kind, we have to emit a 'dep.h:'
+  # dummy dependency, to avoid the deleted-header problem.
+  sed -n -e 's|^  *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+## The order of this option in the case statement is important, since the
+## shell code in configure will try each of these formats in the order
+## listed in this file.  A plain '-MD' option would be understood by many
+## compilers, so we must ensure this comes after the gcc and icc options.
+pgcc)
+  # Portland's C compiler understands '-MD'.
+  # Will always output deps to 'file.d' where file is the root name of the
+  # source file under compilation, even if file resides in a subdirectory.
+  # The object file name does not affect the name of the '.d' file.
+  # pgcc 10.2 will output
+  #    foo.o: sub/foo.c sub/foo.h
+  # and will wrap long lines using '\' :
+  #    foo.o: sub/foo.c ... \
+  #     sub/foo.h ... \
+  #     ...
+  set_dir_from "$object"
+  # Use the source, not the object, to determine the base name, since
+  # that's sadly what pgcc will do too.
+  set_base_from "$source"
+  tmpdepfile=$base.d
+
+  # For projects that build the same source file twice into different object
+  # files, the pgcc approach of using the *source* file root name can cause
+  # problems in parallel builds.  Use a locking strategy to avoid stomping on
+  # the same $tmpdepfile.
+  lockdir=$base.d-lock
+  trap "
+    echo '$0: caught signal, cleaning up...' >&2
+    rmdir '$lockdir'
+    exit 1
+  " 1 2 13 15
+  numtries=100
+  i=$numtries
+  while test $i -gt 0; do
+    # mkdir is a portable test-and-set.
+    if mkdir "$lockdir" 2>/dev/null; then
+      # This process acquired the lock.
+      "$@" -MD
+      stat=$?
+      # Release the lock.
+      rmdir "$lockdir"
+      break
+    else
+      # If the lock is being held by a different process, wait
+      # until the winning process is done or we timeout.
+      while test -d "$lockdir" && test $i -gt 0; do
+        sleep 1
+        i=`expr $i - 1`
+      done
+    fi
+    i=`expr $i - 1`
+  done
+  trap - 1 2 13 15
+  if test $i -le 0; then
+    echo "$0: failed to acquire lock after $numtries attempts" >&2
+    echo "$0: check lockdir '$lockdir'" >&2
+    exit 1
+  fi
+
+  if test $stat -ne 0; then
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  rm -f "$depfile"
+  # Each line is of the form `foo.o: dependent.h',
+  # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'.
+  # Do two passes, one to just change these to
+  # `$object: dependent.h' and one to simply `dependent.h:'.
+  sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
+  # Some versions of the HPUX 10.20 sed can't process this invocation
+  # correctly.  Breaking it into two sed invocations is a workaround.
+  sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \
+    | sed -e 's/$/ :/' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+hp2)
+  # The "hp" stanza above does not work with aCC (C++) and HP's ia64
+  # compilers, which have integrated preprocessors.  The correct option
+  # to use with these is +Maked; it writes dependencies to a file named
+  # 'foo.d', which lands next to the object file, wherever that
+  # happens to be.
+  # Much of this is similar to the tru64 case; see comments there.
+  set_dir_from  "$object"
+  set_base_from "$object"
+  if test "$libtool" = yes; then
+    tmpdepfile1=$dir$base.d
+    tmpdepfile2=$dir.libs/$base.d
+    "$@" -Wc,+Maked
+  else
+    tmpdepfile1=$dir$base.d
+    tmpdepfile2=$dir$base.d
+    "$@" +Maked
+  fi
+  stat=$?
+  if test $stat -ne 0; then
+     rm -f "$tmpdepfile1" "$tmpdepfile2"
+     exit $stat
+  fi
+
+  for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2"
+  do
+    test -f "$tmpdepfile" && break
+  done
+  if test -f "$tmpdepfile"; then
+    sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile"
+    # Add 'dependent.h:' lines.
+    sed -ne '2,${
+               s/^ *//
+               s/ \\*$//
+               s/$/:/
+               p
+             }' "$tmpdepfile" >> "$depfile"
+  else
+    make_dummy_depfile
+  fi
+  rm -f "$tmpdepfile" "$tmpdepfile2"
+  ;;
+
+tru64)
+  # The Tru64 compiler uses -MD to generate dependencies as a side
+  # effect.  'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'.
+  # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
+  # dependencies in 'foo.d' instead, so we check for that too.
+  # Subdirectories are respected.
+  set_dir_from  "$object"
+  set_base_from "$object"
+
+  if test "$libtool" = yes; then
+    # Libtool generates 2 separate objects for the 2 libraries.  These
+    # two compilations output dependencies in $dir.libs/$base.o.d and
+    # in $dir$base.o.d.  We have to check for both files, because
+    # one of the two compilations can be disabled.  We should prefer
+    # $dir$base.o.d over $dir.libs/$base.o.d because the latter is
+    # automatically cleaned when .libs/ is deleted, while ignoring
+    # the former would cause a distcleancheck panic.
+    tmpdepfile1=$dir$base.o.d          # libtool 1.5
+    tmpdepfile2=$dir.libs/$base.o.d    # Likewise.
+    tmpdepfile3=$dir.libs/$base.d      # Compaq CCC V6.2-504
+    "$@" -Wc,-MD
+  else
+    tmpdepfile1=$dir$base.d
+    tmpdepfile2=$dir$base.d
+    tmpdepfile3=$dir$base.d
+    "$@" -MD
+  fi
+
+  stat=$?
+  if test $stat -ne 0; then
+    rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+    exit $stat
+  fi
+
+  for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+  do
+    test -f "$tmpdepfile" && break
+  done
+  # Same post-processing that is required for AIX mode.
+  aix_post_process_depfile
+  ;;
+
+msvc7)
+  if test "$libtool" = yes; then
+    showIncludes=-Wc,-showIncludes
+  else
+    showIncludes=-showIncludes
+  fi
+  "$@" $showIncludes > "$tmpdepfile"
+  stat=$?
+  grep -v '^Note: including file: ' "$tmpdepfile"
+  if test $stat -ne 0; then
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  rm -f "$depfile"
+  echo "$object : \\" > "$depfile"
+  # The first sed program below extracts the file names and escapes
+  # backslashes for cygpath.  The second sed program outputs the file
+  # name when reading, but also accumulates all include files in the
+  # hold buffer in order to output them again at the end.  This only
+  # works with sed implementations that can handle large buffers.
+  sed < "$tmpdepfile" -n '
+/^Note: including file:  *\(.*\)/ {
+  s//\1/
+  s/\\/\\\\/g
+  p
+}' | $cygpath_u | sort -u | sed -n '
+s/ /\\ /g
+s/\(.*\)/'"$tab"'\1 \\/p
+s/.\(.*\) \\/\1:/
+H
+$ {
+  s/.*/'"$tab"'/
+  G
+  p
+}' >> "$depfile"
+  echo >> "$depfile" # make sure the fragment doesn't end with a backslash
+  rm -f "$tmpdepfile"
+  ;;
+
+msvc7msys)
+  # This case exists only to let depend.m4 do its work.  It works by
+  # looking at the text of this script.  This case will never be run,
+  # since it is checked for above.
+  exit 1
+  ;;
+
+#nosideeffect)
+  # This comment above is used by automake to tell side-effect
+  # dependency tracking mechanisms from slower ones.
+
+dashmstdout)
+  # Important note: in order to support this mode, a compiler *must*
+  # always write the preprocessed file to stdout, regardless of -o.
+  "$@" || exit $?
+
+  # Remove the call to Libtool.
+  if test "$libtool" = yes; then
+    while test "X$1" != 'X--mode=compile'; do
+      shift
+    done
+    shift
+  fi
+
+  # Remove '-o $object'.
+  IFS=" "
+  for arg
+  do
+    case $arg in
+    -o)
+      shift
+      ;;
+    $object)
+      shift
+      ;;
+    *)
+      set fnord "$@" "$arg"
+      shift # fnord
+      shift # $arg
+      ;;
+    esac
+  done
+
+  test -z "$dashmflag" && dashmflag=-M
+  # Require at least two characters before searching for ':'
+  # in the target name.  This is to cope with DOS-style filenames:
+  # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise.
+  "$@" $dashmflag |
+    sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile"
+  rm -f "$depfile"
+  cat < "$tmpdepfile" > "$depfile"
+  # Some versions of the HPUX 10.20 sed can't process this sed invocation
+  # correctly.  Breaking it into two sed invocations is a workaround.
+  tr ' ' "$nl" < "$tmpdepfile" \
+    | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \
+    | sed -e 's/$/ :/' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+dashXmstdout)
+  # This case only exists to satisfy depend.m4.  It is never actually
+  # run, as this mode is specially recognized in the preamble.
+  exit 1
+  ;;
+
+makedepend)
+  "$@" || exit $?
+  # Remove any Libtool call
+  if test "$libtool" = yes; then
+    while test "X$1" != 'X--mode=compile'; do
+      shift
+    done
+    shift
+  fi
+  # X makedepend
+  shift
+  cleared=no eat=no
+  for arg
+  do
+    case $cleared in
+    no)
+      set ""; shift
+      cleared=yes ;;
+    esac
+    if test $eat = yes; then
+      eat=no
+      continue
+    fi
+    case "$arg" in
+    -D*|-I*)
+      set fnord "$@" "$arg"; shift ;;
+    # Strip any option that makedepend may not understand.  Remove
+    # the object too, otherwise makedepend will parse it as a source file.
+    -arch)
+      eat=yes ;;
+    -*|$object)
+      ;;
+    *)
+      set fnord "$@" "$arg"; shift ;;
+    esac
+  done
+  obj_suffix=`echo "$object" | sed 's/^.*\././'`
+  touch "$tmpdepfile"
+  ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
+  rm -f "$depfile"
+  # makedepend may prepend the VPATH from the source file name to the object.
+  # No need to regex-escape $object, excess matching of '.' is harmless.
+  sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile"
+  # Some versions of the HPUX 10.20 sed can't process the last invocation
+  # correctly.  Breaking it into two sed invocations is a workaround.
+  sed '1,2d' "$tmpdepfile" \
+    | tr ' ' "$nl" \
+    | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \
+    | sed -e 's/$/ :/' >> "$depfile"
+  rm -f "$tmpdepfile" "$tmpdepfile".bak
+  ;;
+
+cpp)
+  # Important note: in order to support this mode, a compiler *must*
+  # always write the preprocessed file to stdout.
+  "$@" || exit $?
+
+  # Remove the call to Libtool.
+  if test "$libtool" = yes; then
+    while test "X$1" != 'X--mode=compile'; do
+      shift
+    done
+    shift
+  fi
+
+  # Remove '-o $object'.
+  IFS=" "
+  for arg
+  do
+    case $arg in
+    -o)
+      shift
+      ;;
+    $object)
+      shift
+      ;;
+    *)
+      set fnord "$@" "$arg"
+      shift # fnord
+      shift # $arg
+      ;;
+    esac
+  done
+
+  "$@" -E \
+    | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+             -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+    | sed '$ s: \\$::' > "$tmpdepfile"
+  rm -f "$depfile"
+  echo "$object : \\" > "$depfile"
+  cat < "$tmpdepfile" >> "$depfile"
+  sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+msvisualcpp)
+  # Important note: in order to support this mode, a compiler *must*
+  # always write the preprocessed file to stdout.
+  "$@" || exit $?
+
+  # Remove the call to Libtool.
+  if test "$libtool" = yes; then
+    while test "X$1" != 'X--mode=compile'; do
+      shift
+    done
+    shift
+  fi
+
+  IFS=" "
+  for arg
+  do
+    case "$arg" in
+    -o)
+      shift
+      ;;
+    $object)
+      shift
+      ;;
+    "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
+        set fnord "$@"
+        shift
+        shift
+        ;;
+    *)
+        set fnord "$@" "$arg"
+        shift
+        shift
+        ;;
+    esac
+  done
+  "$@" -E 2>/dev/null |
+  sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile"
+  rm -f "$depfile"
+  echo "$object : \\" > "$depfile"
+  sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile"
+  echo "$tab" >> "$depfile"
+  sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+msvcmsys)
+  # This case exists only to let depend.m4 do its work.  It works by
+  # looking at the text of this script.  This case will never be run,
+  # since it is checked for above.
+  exit 1
+  ;;
+
+none)
+  exec "$@"
+  ;;
+
+*)
+  echo "Unknown depmode $depmode" 1>&2
+  exit 1
+  ;;
+esac
+
+exit 0
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:

Property changes on: applgrid/tags/applgrid-01-06-36/depcomp
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: applgrid/tags/applgrid-01-06-36/amconfig.in
===================================================================
--- applgrid/tags/applgrid-01-06-36/amconfig.in	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/amconfig.in	(revision 2010)
@@ -0,0 +1,81 @@
+/* amconfig.in.  Generated from configure.ac by autoheader.  */
+
+/* Define 0 if the cern root package is installed */
+#undef HAVE_CERNROOT
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#undef HAVE_DLFCN_H
+
+/* Define 1 if hoppet is installed */
+#undef HAVE_HOPPET
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define 1 if LHAPDF is installed */
+#undef HAVE_LHAPDF
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdio.h> header file. */
+#undef HAVE_STDIO_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Define to 1 if you have the <utime.h> header file. */
+#undef HAVE_UTIME_H
+
+/* Define to 1 if `utime(file, NULL)' sets file's timestamp to the present. */
+#undef HAVE_UTIME_NULL
+
+/* LHAPDF version */
+#undef LHAPDF_VERSION
+
+/* Define to the sub-directory where libtool stores uninstalled libraries. */
+#undef LT_OBJDIR
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the home page for this package. */
+#undef PACKAGE_URL
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Define to 1 if all of the C90 standard headers exist (not just the ones
+   required in a freestanding environment). This macro is provided for
+   backward compatibility; new code need not use it. */
+#undef STDC_HEADERS
+
+/* Version number of package */
+#undef VERSION
Index: applgrid/tags/applgrid-01-06-36/compile
===================================================================
--- applgrid/tags/applgrid-01-06-36/compile	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/compile	(revision 2010)
@@ -0,0 +1,347 @@
+#! /bin/sh
+# Wrapper for compilers which do not understand '-c -o'.
+
+scriptversion=2012-10-14.11; # UTC
+
+# Copyright (C) 1999-2014 Free Software Foundation, Inc.
+# Written by Tom Tromey <tromey@cygnus.com>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# This file is maintained in Automake, please report
+# bugs to <bug-automake@gnu.org> or send patches to
+# <automake-patches@gnu.org>.
+
+nl='
+'
+
+# We need space, tab and new line, in precisely that order.  Quoting is
+# there to prevent tools from complaining about whitespace usage.
+IFS=" ""	$nl"
+
+file_conv=
+
+# func_file_conv build_file lazy
+# Convert a $build file to $host form and store it in $file
+# Currently only supports Windows hosts. If the determined conversion
+# type is listed in (the comma separated) LAZY, no conversion will
+# take place.
+func_file_conv ()
+{
+  file=$1
+  case $file in
+    / | /[!/]*) # absolute file, and not a UNC file
+      if test -z "$file_conv"; then
+	# lazily determine how to convert abs files
+	case `uname -s` in
+	  MINGW*)
+	    file_conv=mingw
+	    ;;
+	  CYGWIN*)
+	    file_conv=cygwin
+	    ;;
+	  *)
+	    file_conv=wine
+	    ;;
+	esac
+      fi
+      case $file_conv/,$2, in
+	*,$file_conv,*)
+	  ;;
+	mingw/*)
+	  file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'`
+	  ;;
+	cygwin/*)
+	  file=`cygpath -m "$file" || echo "$file"`
+	  ;;
+	wine/*)
+	  file=`winepath -w "$file" || echo "$file"`
+	  ;;
+      esac
+      ;;
+  esac
+}
+
+# func_cl_dashL linkdir
+# Make cl look for libraries in LINKDIR
+func_cl_dashL ()
+{
+  func_file_conv "$1"
+  if test -z "$lib_path"; then
+    lib_path=$file
+  else
+    lib_path="$lib_path;$file"
+  fi
+  linker_opts="$linker_opts -LIBPATH:$file"
+}
+
+# func_cl_dashl library
+# Do a library search-path lookup for cl
+func_cl_dashl ()
+{
+  lib=$1
+  found=no
+  save_IFS=$IFS
+  IFS=';'
+  for dir in $lib_path $LIB
+  do
+    IFS=$save_IFS
+    if $shared && test -f "$dir/$lib.dll.lib"; then
+      found=yes
+      lib=$dir/$lib.dll.lib
+      break
+    fi
+    if test -f "$dir/$lib.lib"; then
+      found=yes
+      lib=$dir/$lib.lib
+      break
+    fi
+    if test -f "$dir/lib$lib.a"; then
+      found=yes
+      lib=$dir/lib$lib.a
+      break
+    fi
+  done
+  IFS=$save_IFS
+
+  if test "$found" != yes; then
+    lib=$lib.lib
+  fi
+}
+
+# func_cl_wrapper cl arg...
+# Adjust compile command to suit cl
+func_cl_wrapper ()
+{
+  # Assume a capable shell
+  lib_path=
+  shared=:
+  linker_opts=
+  for arg
+  do
+    if test -n "$eat"; then
+      eat=
+    else
+      case $1 in
+	-o)
+	  # configure might choose to run compile as 'compile cc -o foo foo.c'.
+	  eat=1
+	  case $2 in
+	    *.o | *.[oO][bB][jJ])
+	      func_file_conv "$2"
+	      set x "$@" -Fo"$file"
+	      shift
+	      ;;
+	    *)
+	      func_file_conv "$2"
+	      set x "$@" -Fe"$file"
+	      shift
+	      ;;
+	  esac
+	  ;;
+	-I)
+	  eat=1
+	  func_file_conv "$2" mingw
+	  set x "$@" -I"$file"
+	  shift
+	  ;;
+	-I*)
+	  func_file_conv "${1#-I}" mingw
+	  set x "$@" -I"$file"
+	  shift
+	  ;;
+	-l)
+	  eat=1
+	  func_cl_dashl "$2"
+	  set x "$@" "$lib"
+	  shift
+	  ;;
+	-l*)
+	  func_cl_dashl "${1#-l}"
+	  set x "$@" "$lib"
+	  shift
+	  ;;
+	-L)
+	  eat=1
+	  func_cl_dashL "$2"
+	  ;;
+	-L*)
+	  func_cl_dashL "${1#-L}"
+	  ;;
+	-static)
+	  shared=false
+	  ;;
+	-Wl,*)
+	  arg=${1#-Wl,}
+	  save_ifs="$IFS"; IFS=','
+	  for flag in $arg; do
+	    IFS="$save_ifs"
+	    linker_opts="$linker_opts $flag"
+	  done
+	  IFS="$save_ifs"
+	  ;;
+	-Xlinker)
+	  eat=1
+	  linker_opts="$linker_opts $2"
+	  ;;
+	-*)
+	  set x "$@" "$1"
+	  shift
+	  ;;
+	*.cc | *.CC | *.cxx | *.CXX | *.[cC]++)
+	  func_file_conv "$1"
+	  set x "$@" -Tp"$file"
+	  shift
+	  ;;
+	*.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO])
+	  func_file_conv "$1" mingw
+	  set x "$@" "$file"
+	  shift
+	  ;;
+	*)
+	  set x "$@" "$1"
+	  shift
+	  ;;
+      esac
+    fi
+    shift
+  done
+  if test -n "$linker_opts"; then
+    linker_opts="-link$linker_opts"
+  fi
+  exec "$@" $linker_opts
+  exit 1
+}
+
+eat=
+
+case $1 in
+  '')
+     echo "$0: No command.  Try '$0 --help' for more information." 1>&2
+     exit 1;
+     ;;
+  -h | --h*)
+    cat <<\EOF
+Usage: compile [--help] [--version] PROGRAM [ARGS]
+
+Wrapper for compilers which do not understand '-c -o'.
+Remove '-o dest.o' from ARGS, run PROGRAM with the remaining
+arguments, and rename the output as expected.
+
+If you are trying to build a whole package this is not the
+right script to run: please start by reading the file 'INSTALL'.
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+    exit $?
+    ;;
+  -v | --v*)
+    echo "compile $scriptversion"
+    exit $?
+    ;;
+  cl | *[/\\]cl | cl.exe | *[/\\]cl.exe )
+    func_cl_wrapper "$@"      # Doesn't return...
+    ;;
+esac
+
+ofile=
+cfile=
+
+for arg
+do
+  if test -n "$eat"; then
+    eat=
+  else
+    case $1 in
+      -o)
+	# configure might choose to run compile as 'compile cc -o foo foo.c'.
+	# So we strip '-o arg' only if arg is an object.
+	eat=1
+	case $2 in
+	  *.o | *.obj)
+	    ofile=$2
+	    ;;
+	  *)
+	    set x "$@" -o "$2"
+	    shift
+	    ;;
+	esac
+	;;
+      *.c)
+	cfile=$1
+	set x "$@" "$1"
+	shift
+	;;
+      *)
+	set x "$@" "$1"
+	shift
+	;;
+    esac
+  fi
+  shift
+done
+
+if test -z "$ofile" || test -z "$cfile"; then
+  # If no '-o' option was seen then we might have been invoked from a
+  # pattern rule where we don't need one.  That is ok -- this is a
+  # normal compilation that the losing compiler can handle.  If no
+  # '.c' file was seen then we are probably linking.  That is also
+  # ok.
+  exec "$@"
+fi
+
+# Name of file we expect compiler to create.
+cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'`
+
+# Create the lock directory.
+# Note: use '[/\\:.-]' here to ensure that we don't use the same name
+# that we are using for the .o file.  Also, base the name on the expected
+# object file name, since that is what matters with a parallel build.
+lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d
+while true; do
+  if mkdir "$lockdir" >/dev/null 2>&1; then
+    break
+  fi
+  sleep 1
+done
+# FIXME: race condition here if user kills between mkdir and trap.
+trap "rmdir '$lockdir'; exit 1" 1 2 15
+
+# Run the compile.
+"$@"
+ret=$?
+
+if test -f "$cofile"; then
+  test "$cofile" = "$ofile" || mv "$cofile" "$ofile"
+elif test -f "${cofile}bj"; then
+  test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile"
+fi
+
+rmdir "$lockdir"
+exit $ret
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:

Property changes on: applgrid/tags/applgrid-01-06-36/compile
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: applgrid/tags/applgrid-01-06-36/config.guess
===================================================================
--- applgrid/tags/applgrid-01-06-36/config.guess	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/config.guess	(revision 2010)
@@ -0,0 +1,1421 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+#   Copyright 1992-2014 Free Software Foundation, Inc.
+
+timestamp='2014-11-04'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program.  This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+#
+# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+#
+# Please send patches to <config-patches@gnu.org>.
+
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+  -h, --help         print this help, then exit
+  -t, --time-stamp   print date of last modification, then exit
+  -v, --version      print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright 1992-2014 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+  case $1 in
+    --time-stamp | --time* | -t )
+       echo "$timestamp" ; exit ;;
+    --version | -v )
+       echo "$version" ; exit ;;
+    --help | --h* | -h )
+       echo "$usage"; exit ;;
+    -- )     # Stop option processing
+       shift; break ;;
+    - )	# Use stdin as input.
+       break ;;
+    -* )
+       echo "$me: invalid option $1$help" >&2
+       exit 1 ;;
+    * )
+       break ;;
+  esac
+done
+
+if test $# != 0; then
+  echo "$me: too many arguments$help" >&2
+  exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,)    echo "int x;" > $dummy.c ;
+	for c in cc gcc c89 c99 ; do
+	  if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+	     CC_FOR_BUILD="$c"; break ;
+	  fi ;
+	done ;
+	if test x"$CC_FOR_BUILD" = x ; then
+	  CC_FOR_BUILD=no_compiler_found ;
+	fi
+	;;
+ ,,*)   CC_FOR_BUILD=$CC ;;
+ ,*,*)  CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+	PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null`  || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+case "${UNAME_SYSTEM}" in
+Linux|GNU|GNU/*)
+	# If the system lacks a compiler, then just pick glibc.
+	# We could probably try harder.
+	LIBC=gnu
+
+	eval $set_cc_for_build
+	cat <<-EOF > $dummy.c
+	#include <features.h>
+	#if defined(__UCLIBC__)
+	LIBC=uclibc
+	#elif defined(__dietlibc__)
+	LIBC=dietlibc
+	#else
+	LIBC=gnu
+	#endif
+	EOF
+	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`
+	;;
+esac
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+    *:NetBSD:*:*)
+	# NetBSD (nbsd) targets should (where applicable) match one or
+	# more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+	# *-*-netbsdecoff* and *-*-netbsd*.  For targets that recently
+	# switched to ELF, *-*-netbsd* would select the old
+	# object file format.  This provides both forward
+	# compatibility and a consistent mechanism for selecting the
+	# object file format.
+	#
+	# Note: NetBSD doesn't particularly care about the vendor
+	# portion of the name.  We always set it to "unknown".
+	sysctl="sysctl -n hw.machine_arch"
+	UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+	    /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+	case "${UNAME_MACHINE_ARCH}" in
+	    armeb) machine=armeb-unknown ;;
+	    arm*) machine=arm-unknown ;;
+	    sh3el) machine=shl-unknown ;;
+	    sh3eb) machine=sh-unknown ;;
+	    sh5el) machine=sh5le-unknown ;;
+	    *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+	esac
+	# The Operating System including object format, if it has switched
+	# to ELF recently, or will in the future.
+	case "${UNAME_MACHINE_ARCH}" in
+	    arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+		eval $set_cc_for_build
+		if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+			| grep -q __ELF__
+		then
+		    # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+		    # Return netbsd for either.  FIX?
+		    os=netbsd
+		else
+		    os=netbsdelf
+		fi
+		;;
+	    *)
+		os=netbsd
+		;;
+	esac
+	# The OS release
+	# Debian GNU/NetBSD machines have a different userland, and
+	# thus, need a distinct triplet. However, they do not need
+	# kernel version information, so it can be replaced with a
+	# suitable tag, in the style of linux-gnu.
+	case "${UNAME_VERSION}" in
+	    Debian*)
+		release='-gnu'
+		;;
+	    *)
+		release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+		;;
+	esac
+	# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+	# contains redundant information, the shorter form:
+	# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+	echo "${machine}-${os}${release}"
+	exit ;;
+    *:Bitrig:*:*)
+	UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+	echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+	exit ;;
+    *:OpenBSD:*:*)
+	UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+	echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+	exit ;;
+    *:ekkoBSD:*:*)
+	echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+	exit ;;
+    *:SolidBSD:*:*)
+	echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+	exit ;;
+    macppc:MirBSD:*:*)
+	echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+	exit ;;
+    *:MirBSD:*:*)
+	echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+	exit ;;
+    alpha:OSF1:*:*)
+	case $UNAME_RELEASE in
+	*4.0)
+		UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+		;;
+	*5.*)
+		UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+		;;
+	esac
+	# According to Compaq, /usr/sbin/psrinfo has been available on
+	# OSF/1 and Tru64 systems produced since 1995.  I hope that
+	# covers most systems running today.  This code pipes the CPU
+	# types through head -n 1, so we only detect the type of CPU 0.
+	ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^  The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+	case "$ALPHA_CPU_TYPE" in
+	    "EV4 (21064)")
+		UNAME_MACHINE="alpha" ;;
+	    "EV4.5 (21064)")
+		UNAME_MACHINE="alpha" ;;
+	    "LCA4 (21066/21068)")
+		UNAME_MACHINE="alpha" ;;
+	    "EV5 (21164)")
+		UNAME_MACHINE="alphaev5" ;;
+	    "EV5.6 (21164A)")
+		UNAME_MACHINE="alphaev56" ;;
+	    "EV5.6 (21164PC)")
+		UNAME_MACHINE="alphapca56" ;;
+	    "EV5.7 (21164PC)")
+		UNAME_MACHINE="alphapca57" ;;
+	    "EV6 (21264)")
+		UNAME_MACHINE="alphaev6" ;;
+	    "EV6.7 (21264A)")
+		UNAME_MACHINE="alphaev67" ;;
+	    "EV6.8CB (21264C)")
+		UNAME_MACHINE="alphaev68" ;;
+	    "EV6.8AL (21264B)")
+		UNAME_MACHINE="alphaev68" ;;
+	    "EV6.8CX (21264D)")
+		UNAME_MACHINE="alphaev68" ;;
+	    "EV6.9A (21264/EV69A)")
+		UNAME_MACHINE="alphaev69" ;;
+	    "EV7 (21364)")
+		UNAME_MACHINE="alphaev7" ;;
+	    "EV7.9 (21364A)")
+		UNAME_MACHINE="alphaev79" ;;
+	esac
+	# A Pn.n version is a patched version.
+	# A Vn.n version is a released version.
+	# A Tn.n version is a released field test version.
+	# A Xn.n version is an unreleased experimental baselevel.
+	# 1.2 uses "1.2" for uname -r.
+	echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+	# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+	exitcode=$?
+	trap '' 0
+	exit $exitcode ;;
+    Alpha\ *:Windows_NT*:*)
+	# How do we know it's Interix rather than the generic POSIX subsystem?
+	# Should we change UNAME_MACHINE based on the output of uname instead
+	# of the specific Alpha model?
+	echo alpha-pc-interix
+	exit ;;
+    21064:Windows_NT:50:3)
+	echo alpha-dec-winnt3.5
+	exit ;;
+    Amiga*:UNIX_System_V:4.0:*)
+	echo m68k-unknown-sysv4
+	exit ;;
+    *:[Aa]miga[Oo][Ss]:*:*)
+	echo ${UNAME_MACHINE}-unknown-amigaos
+	exit ;;
+    *:[Mm]orph[Oo][Ss]:*:*)
+	echo ${UNAME_MACHINE}-unknown-morphos
+	exit ;;
+    *:OS/390:*:*)
+	echo i370-ibm-openedition
+	exit ;;
+    *:z/VM:*:*)
+	echo s390-ibm-zvmoe
+	exit ;;
+    *:OS400:*:*)
+	echo powerpc-ibm-os400
+	exit ;;
+    arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+	echo arm-acorn-riscix${UNAME_RELEASE}
+	exit ;;
+    arm*:riscos:*:*|arm*:RISCOS:*:*)
+	echo arm-unknown-riscos
+	exit ;;
+    SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+	echo hppa1.1-hitachi-hiuxmpp
+	exit ;;
+    Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+	# akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+	if test "`(/bin/universe) 2>/dev/null`" = att ; then
+		echo pyramid-pyramid-sysv3
+	else
+		echo pyramid-pyramid-bsd
+	fi
+	exit ;;
+    NILE*:*:*:dcosx)
+	echo pyramid-pyramid-svr4
+	exit ;;
+    DRS?6000:unix:4.0:6*)
+	echo sparc-icl-nx6
+	exit ;;
+    DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+	case `/usr/bin/uname -p` in
+	    sparc) echo sparc-icl-nx7; exit ;;
+	esac ;;
+    s390x:SunOS:*:*)
+	echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4H:SunOS:5.*:*)
+	echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+	echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+	echo i386-pc-auroraux${UNAME_RELEASE}
+	exit ;;
+    i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+	eval $set_cc_for_build
+	SUN_ARCH="i386"
+	# If there is a compiler, see if it is configured for 64-bit objects.
+	# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+	# This test works for both compilers.
+	if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+	    if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+		(CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+		grep IS_64BIT_ARCH >/dev/null
+	    then
+		SUN_ARCH="x86_64"
+	    fi
+	fi
+	echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4*:SunOS:6*:*)
+	# According to config.sub, this is the proper way to canonicalize
+	# SunOS6.  Hard to guess exactly what SunOS6 will be like, but
+	# it's likely to be more like Solaris than SunOS4.
+	echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4*:SunOS:*:*)
+	case "`/usr/bin/arch -k`" in
+	    Series*|S4*)
+		UNAME_RELEASE=`uname -v`
+		;;
+	esac
+	# Japanese Language versions have a version number like `4.1.3-JL'.
+	echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+	exit ;;
+    sun3*:SunOS:*:*)
+	echo m68k-sun-sunos${UNAME_RELEASE}
+	exit ;;
+    sun*:*:4.2BSD:*)
+	UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+	test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+	case "`/bin/arch`" in
+	    sun3)
+		echo m68k-sun-sunos${UNAME_RELEASE}
+		;;
+	    sun4)
+		echo sparc-sun-sunos${UNAME_RELEASE}
+		;;
+	esac
+	exit ;;
+    aushp:SunOS:*:*)
+	echo sparc-auspex-sunos${UNAME_RELEASE}
+	exit ;;
+    # The situation for MiNT is a little confusing.  The machine name
+    # can be virtually everything (everything which is not
+    # "atarist" or "atariste" at least should have a processor
+    # > m68000).  The system name ranges from "MiNT" over "FreeMiNT"
+    # to the lowercase version "mint" (or "freemint").  Finally
+    # the system name "TOS" denotes a system which is actually not
+    # MiNT.  But MiNT is downward compatible to TOS, so this should
+    # be no problem.
+    atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+	echo m68k-atari-mint${UNAME_RELEASE}
+	exit ;;
+    atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+	echo m68k-atari-mint${UNAME_RELEASE}
+	exit ;;
+    *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+	echo m68k-atari-mint${UNAME_RELEASE}
+	exit ;;
+    milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+	echo m68k-milan-mint${UNAME_RELEASE}
+	exit ;;
+    hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+	echo m68k-hades-mint${UNAME_RELEASE}
+	exit ;;
+    *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+	echo m68k-unknown-mint${UNAME_RELEASE}
+	exit ;;
+    m68k:machten:*:*)
+	echo m68k-apple-machten${UNAME_RELEASE}
+	exit ;;
+    powerpc:machten:*:*)
+	echo powerpc-apple-machten${UNAME_RELEASE}
+	exit ;;
+    RISC*:Mach:*:*)
+	echo mips-dec-mach_bsd4.3
+	exit ;;
+    RISC*:ULTRIX:*:*)
+	echo mips-dec-ultrix${UNAME_RELEASE}
+	exit ;;
+    VAX*:ULTRIX*:*:*)
+	echo vax-dec-ultrix${UNAME_RELEASE}
+	exit ;;
+    2020:CLIX:*:* | 2430:CLIX:*:*)
+	echo clipper-intergraph-clix${UNAME_RELEASE}
+	exit ;;
+    mips:*:*:UMIPS | mips:*:*:RISCos)
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h>  /* for printf() prototype */
+	int main (int argc, char *argv[]) {
+#else
+	int main (argc, argv) int argc; char *argv[]; {
+#endif
+	#if defined (host_mips) && defined (MIPSEB)
+	#if defined (SYSTYPE_SYSV)
+	  printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+	#endif
+	#if defined (SYSTYPE_SVR4)
+	  printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+	#endif
+	#if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+	  printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+	#endif
+	#endif
+	  exit (-1);
+	}
+EOF
+	$CC_FOR_BUILD -o $dummy $dummy.c &&
+	  dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+	  SYSTEM_NAME=`$dummy $dummyarg` &&
+	    { echo "$SYSTEM_NAME"; exit; }
+	echo mips-mips-riscos${UNAME_RELEASE}
+	exit ;;
+    Motorola:PowerMAX_OS:*:*)
+	echo powerpc-motorola-powermax
+	exit ;;
+    Motorola:*:4.3:PL8-*)
+	echo powerpc-harris-powermax
+	exit ;;
+    Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+	echo powerpc-harris-powermax
+	exit ;;
+    Night_Hawk:Power_UNIX:*:*)
+	echo powerpc-harris-powerunix
+	exit ;;
+    m88k:CX/UX:7*:*)
+	echo m88k-harris-cxux7
+	exit ;;
+    m88k:*:4*:R4*)
+	echo m88k-motorola-sysv4
+	exit ;;
+    m88k:*:3*:R3*)
+	echo m88k-motorola-sysv3
+	exit ;;
+    AViiON:dgux:*:*)
+	# DG/UX returns AViiON for all architectures
+	UNAME_PROCESSOR=`/usr/bin/uname -p`
+	if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+	then
+	    if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+	       [ ${TARGET_BINARY_INTERFACE}x = x ]
+	    then
+		echo m88k-dg-dgux${UNAME_RELEASE}
+	    else
+		echo m88k-dg-dguxbcs${UNAME_RELEASE}
+	    fi
+	else
+	    echo i586-dg-dgux${UNAME_RELEASE}
+	fi
+	exit ;;
+    M88*:DolphinOS:*:*)	# DolphinOS (SVR3)
+	echo m88k-dolphin-sysv3
+	exit ;;
+    M88*:*:R3*:*)
+	# Delta 88k system running SVR3
+	echo m88k-motorola-sysv3
+	exit ;;
+    XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+	echo m88k-tektronix-sysv3
+	exit ;;
+    Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+	echo m68k-tektronix-bsd
+	exit ;;
+    *:IRIX*:*:*)
+	echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+	exit ;;
+    ????????:AIX?:[12].1:2)   # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+	echo romp-ibm-aix     # uname -m gives an 8 hex-code CPU id
+	exit ;;               # Note that: echo "'`uname -s`'" gives 'AIX '
+    i*86:AIX:*:*)
+	echo i386-ibm-aix
+	exit ;;
+    ia64:AIX:*:*)
+	if [ -x /usr/bin/oslevel ] ; then
+		IBM_REV=`/usr/bin/oslevel`
+	else
+		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+	fi
+	echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+	exit ;;
+    *:AIX:2:3)
+	if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+		eval $set_cc_for_build
+		sed 's/^		//' << EOF >$dummy.c
+		#include <sys/systemcfg.h>
+
+		main()
+			{
+			if (!__power_pc())
+				exit(1);
+			puts("powerpc-ibm-aix3.2.5");
+			exit(0);
+			}
+EOF
+		if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+		then
+			echo "$SYSTEM_NAME"
+		else
+			echo rs6000-ibm-aix3.2.5
+		fi
+	elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+		echo rs6000-ibm-aix3.2.4
+	else
+		echo rs6000-ibm-aix3.2
+	fi
+	exit ;;
+    *:AIX:*:[4567])
+	IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+	if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+		IBM_ARCH=rs6000
+	else
+		IBM_ARCH=powerpc
+	fi
+	if [ -x /usr/bin/lslpp ] ; then
+		IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
+			   awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
+	else
+		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+	fi
+	echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+	exit ;;
+    *:AIX:*:*)
+	echo rs6000-ibm-aix
+	exit ;;
+    ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+	echo romp-ibm-bsd4.4
+	exit ;;
+    ibmrt:*BSD:*|romp-ibm:BSD:*)            # covers RT/PC BSD and
+	echo romp-ibm-bsd${UNAME_RELEASE}   # 4.3 with uname added to
+	exit ;;                             # report: romp-ibm BSD 4.3
+    *:BOSX:*:*)
+	echo rs6000-bull-bosx
+	exit ;;
+    DPX/2?00:B.O.S.:*:*)
+	echo m68k-bull-sysv3
+	exit ;;
+    9000/[34]??:4.3bsd:1.*:*)
+	echo m68k-hp-bsd
+	exit ;;
+    hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+	echo m68k-hp-bsd4.4
+	exit ;;
+    9000/[34678]??:HP-UX:*:*)
+	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+	case "${UNAME_MACHINE}" in
+	    9000/31? )            HP_ARCH=m68000 ;;
+	    9000/[34]?? )         HP_ARCH=m68k ;;
+	    9000/[678][0-9][0-9])
+		if [ -x /usr/bin/getconf ]; then
+		    sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+		    sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+		    case "${sc_cpu_version}" in
+		      523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+		      528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+		      532)                      # CPU_PA_RISC2_0
+			case "${sc_kernel_bits}" in
+			  32) HP_ARCH="hppa2.0n" ;;
+			  64) HP_ARCH="hppa2.0w" ;;
+			  '') HP_ARCH="hppa2.0" ;;   # HP-UX 10.20
+			esac ;;
+		    esac
+		fi
+		if [ "${HP_ARCH}" = "" ]; then
+		    eval $set_cc_for_build
+		    sed 's/^		//' << EOF >$dummy.c
+
+		#define _HPUX_SOURCE
+		#include <stdlib.h>
+		#include <unistd.h>
+
+		int main ()
+		{
+		#if defined(_SC_KERNEL_BITS)
+		    long bits = sysconf(_SC_KERNEL_BITS);
+		#endif
+		    long cpu  = sysconf (_SC_CPU_VERSION);
+
+		    switch (cpu)
+			{
+			case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+			case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+			case CPU_PA_RISC2_0:
+		#if defined(_SC_KERNEL_BITS)
+			    switch (bits)
+				{
+				case 64: puts ("hppa2.0w"); break;
+				case 32: puts ("hppa2.0n"); break;
+				default: puts ("hppa2.0"); break;
+				} break;
+		#else  /* !defined(_SC_KERNEL_BITS) */
+			    puts ("hppa2.0"); break;
+		#endif
+			default: puts ("hppa1.0"); break;
+			}
+		    exit (0);
+		}
+EOF
+		    (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+		    test -z "$HP_ARCH" && HP_ARCH=hppa
+		fi ;;
+	esac
+	if [ ${HP_ARCH} = "hppa2.0w" ]
+	then
+	    eval $set_cc_for_build
+
+	    # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+	    # 32-bit code.  hppa64-hp-hpux* has the same kernel and a compiler
+	    # generating 64-bit code.  GNU and HP use different nomenclature:
+	    #
+	    # $ CC_FOR_BUILD=cc ./config.guess
+	    # => hppa2.0w-hp-hpux11.23
+	    # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+	    # => hppa64-hp-hpux11.23
+
+	    if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+		grep -q __LP64__
+	    then
+		HP_ARCH="hppa2.0w"
+	    else
+		HP_ARCH="hppa64"
+	    fi
+	fi
+	echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+	exit ;;
+    ia64:HP-UX:*:*)
+	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+	echo ia64-hp-hpux${HPUX_REV}
+	exit ;;
+    3050*:HI-UX:*:*)
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+	#include <unistd.h>
+	int
+	main ()
+	{
+	  long cpu = sysconf (_SC_CPU_VERSION);
+	  /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+	     true for CPU_PA_RISC1_0.  CPU_IS_PA_RISC returns correct
+	     results, however.  */
+	  if (CPU_IS_PA_RISC (cpu))
+	    {
+	      switch (cpu)
+		{
+		  case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+		  case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+		  case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+		  default: puts ("hppa-hitachi-hiuxwe2"); break;
+		}
+	    }
+	  else if (CPU_IS_HP_MC68K (cpu))
+	    puts ("m68k-hitachi-hiuxwe2");
+	  else puts ("unknown-hitachi-hiuxwe2");
+	  exit (0);
+	}
+EOF
+	$CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+		{ echo "$SYSTEM_NAME"; exit; }
+	echo unknown-hitachi-hiuxwe2
+	exit ;;
+    9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+	echo hppa1.1-hp-bsd
+	exit ;;
+    9000/8??:4.3bsd:*:*)
+	echo hppa1.0-hp-bsd
+	exit ;;
+    *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+	echo hppa1.0-hp-mpeix
+	exit ;;
+    hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+	echo hppa1.1-hp-osf
+	exit ;;
+    hp8??:OSF1:*:*)
+	echo hppa1.0-hp-osf
+	exit ;;
+    i*86:OSF1:*:*)
+	if [ -x /usr/sbin/sysversion ] ; then
+	    echo ${UNAME_MACHINE}-unknown-osf1mk
+	else
+	    echo ${UNAME_MACHINE}-unknown-osf1
+	fi
+	exit ;;
+    parisc*:Lites*:*:*)
+	echo hppa1.1-hp-lites
+	exit ;;
+    C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+	echo c1-convex-bsd
+	exit ;;
+    C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+	if getsysinfo -f scalar_acc
+	then echo c32-convex-bsd
+	else echo c2-convex-bsd
+	fi
+	exit ;;
+    C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+	echo c34-convex-bsd
+	exit ;;
+    C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+	echo c38-convex-bsd
+	exit ;;
+    C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+	echo c4-convex-bsd
+	exit ;;
+    CRAY*Y-MP:*:*:*)
+	echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*[A-Z]90:*:*:*)
+	echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+	| sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+	      -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+	      -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*TS:*:*:*)
+	echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*T3E:*:*:*)
+	echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*SV1:*:*:*)
+	echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    *:UNICOS/mp:*:*)
+	echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+	FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+	FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+	echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+	exit ;;
+    5000:UNIX_System_V:4.*:*)
+	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+	FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+	echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+	exit ;;
+    i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+	echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+	exit ;;
+    sparc*:BSD/OS:*:*)
+	echo sparc-unknown-bsdi${UNAME_RELEASE}
+	exit ;;
+    *:BSD/OS:*:*)
+	echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+	exit ;;
+    *:FreeBSD:*:*)
+	UNAME_PROCESSOR=`/usr/bin/uname -p`
+	case ${UNAME_PROCESSOR} in
+	    amd64)
+		echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+	    *)
+		echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+	esac
+	exit ;;
+    i*:CYGWIN*:*)
+	echo ${UNAME_MACHINE}-pc-cygwin
+	exit ;;
+    *:MINGW64*:*)
+	echo ${UNAME_MACHINE}-pc-mingw64
+	exit ;;
+    *:MINGW*:*)
+	echo ${UNAME_MACHINE}-pc-mingw32
+	exit ;;
+    *:MSYS*:*)
+	echo ${UNAME_MACHINE}-pc-msys
+	exit ;;
+    i*:windows32*:*)
+	# uname -m includes "-pc" on this system.
+	echo ${UNAME_MACHINE}-mingw32
+	exit ;;
+    i*:PW*:*)
+	echo ${UNAME_MACHINE}-pc-pw32
+	exit ;;
+    *:Interix*:*)
+	case ${UNAME_MACHINE} in
+	    x86)
+		echo i586-pc-interix${UNAME_RELEASE}
+		exit ;;
+	    authenticamd | genuineintel | EM64T)
+		echo x86_64-unknown-interix${UNAME_RELEASE}
+		exit ;;
+	    IA64)
+		echo ia64-unknown-interix${UNAME_RELEASE}
+		exit ;;
+	esac ;;
+    [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+	echo i${UNAME_MACHINE}-pc-mks
+	exit ;;
+    8664:Windows_NT:*)
+	echo x86_64-pc-mks
+	exit ;;
+    i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+	# How do we know it's Interix rather than the generic POSIX subsystem?
+	# It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+	# UNAME_MACHINE based on the output of uname instead of i386?
+	echo i586-pc-interix
+	exit ;;
+    i*:UWIN*:*)
+	echo ${UNAME_MACHINE}-pc-uwin
+	exit ;;
+    amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+	echo x86_64-unknown-cygwin
+	exit ;;
+    p*:CYGWIN*:*)
+	echo powerpcle-unknown-cygwin
+	exit ;;
+    prep*:SunOS:5.*:*)
+	echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    *:GNU:*:*)
+	# the GNU system
+	echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+	exit ;;
+    *:GNU/*:*:*)
+	# other systems with GNU libc and userland
+	echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+	exit ;;
+    i*86:Minix:*:*)
+	echo ${UNAME_MACHINE}-pc-minix
+	exit ;;
+    aarch64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    aarch64_be:Linux:*:*)
+	UNAME_MACHINE=aarch64_be
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    alpha:Linux:*:*)
+	case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+	  EV5)   UNAME_MACHINE=alphaev5 ;;
+	  EV56)  UNAME_MACHINE=alphaev56 ;;
+	  PCA56) UNAME_MACHINE=alphapca56 ;;
+	  PCA57) UNAME_MACHINE=alphapca56 ;;
+	  EV6)   UNAME_MACHINE=alphaev6 ;;
+	  EV67)  UNAME_MACHINE=alphaev67 ;;
+	  EV68*) UNAME_MACHINE=alphaev68 ;;
+	esac
+	objdump --private-headers /bin/sh | grep -q ld.so.1
+	if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    arc:Linux:*:* | arceb:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    arm*:Linux:*:*)
+	eval $set_cc_for_build
+	if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+	    | grep -q __ARM_EABI__
+	then
+	    echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	else
+	    if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+		| grep -q __ARM_PCS_VFP
+	    then
+		echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
+	    else
+		echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
+	    fi
+	fi
+	exit ;;
+    avr32*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    cris:Linux:*:*)
+	echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+	exit ;;
+    crisv32:Linux:*:*)
+	echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+	exit ;;
+    frv:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    hexagon:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    i*86:Linux:*:*)
+	echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+	exit ;;
+    ia64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    m32r*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    m68*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    mips:Linux:*:* | mips64:Linux:*:*)
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+	#undef CPU
+	#undef ${UNAME_MACHINE}
+	#undef ${UNAME_MACHINE}el
+	#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+	CPU=${UNAME_MACHINE}el
+	#else
+	#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+	CPU=${UNAME_MACHINE}
+	#else
+	CPU=
+	#endif
+	#endif
+EOF
+	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+	test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
+	;;
+    openrisc*:Linux:*:*)
+	echo or1k-unknown-linux-${LIBC}
+	exit ;;
+    or32:Linux:*:* | or1k*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    padre:Linux:*:*)
+	echo sparc-unknown-linux-${LIBC}
+	exit ;;
+    parisc64:Linux:*:* | hppa64:Linux:*:*)
+	echo hppa64-unknown-linux-${LIBC}
+	exit ;;
+    parisc:Linux:*:* | hppa:Linux:*:*)
+	# Look for CPU level
+	case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+	  PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
+	  PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
+	  *)    echo hppa-unknown-linux-${LIBC} ;;
+	esac
+	exit ;;
+    ppc64:Linux:*:*)
+	echo powerpc64-unknown-linux-${LIBC}
+	exit ;;
+    ppc:Linux:*:*)
+	echo powerpc-unknown-linux-${LIBC}
+	exit ;;
+    ppc64le:Linux:*:*)
+	echo powerpc64le-unknown-linux-${LIBC}
+	exit ;;
+    ppcle:Linux:*:*)
+	echo powerpcle-unknown-linux-${LIBC}
+	exit ;;
+    s390:Linux:*:* | s390x:Linux:*:*)
+	echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
+	exit ;;
+    sh64*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    sh*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    sparc:Linux:*:* | sparc64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    tile*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    vax:Linux:*:*)
+	echo ${UNAME_MACHINE}-dec-linux-${LIBC}
+	exit ;;
+    x86_64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    xtensa*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
+    i*86:DYNIX/ptx:4*:*)
+	# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+	# earlier versions are messed up and put the nodename in both
+	# sysname and nodename.
+	echo i386-sequent-sysv4
+	exit ;;
+    i*86:UNIX_SV:4.2MP:2.*)
+	# Unixware is an offshoot of SVR4, but it has its own version
+	# number series starting with 2...
+	# I am not positive that other SVR4 systems won't match this,
+	# I just have to hope.  -- rms.
+	# Use sysv4.2uw... so that sysv4* matches it.
+	echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+	exit ;;
+    i*86:OS/2:*:*)
+	# If we were able to find `uname', then EMX Unix compatibility
+	# is probably installed.
+	echo ${UNAME_MACHINE}-pc-os2-emx
+	exit ;;
+    i*86:XTS-300:*:STOP)
+	echo ${UNAME_MACHINE}-unknown-stop
+	exit ;;
+    i*86:atheos:*:*)
+	echo ${UNAME_MACHINE}-unknown-atheos
+	exit ;;
+    i*86:syllable:*:*)
+	echo ${UNAME_MACHINE}-pc-syllable
+	exit ;;
+    i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+	echo i386-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    i*86:*DOS:*:*)
+	echo ${UNAME_MACHINE}-pc-msdosdjgpp
+	exit ;;
+    i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+	UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+	if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+		echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+	else
+		echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+	fi
+	exit ;;
+    i*86:*:5:[678]*)
+	# UnixWare 7.x, OpenUNIX and OpenServer 6.
+	case `/bin/uname -X | grep "^Machine"` in
+	    *486*)	     UNAME_MACHINE=i486 ;;
+	    *Pentium)	     UNAME_MACHINE=i586 ;;
+	    *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+	esac
+	echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+	exit ;;
+    i*86:*:3.2:*)
+	if test -f /usr/options/cb.name; then
+		UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+		echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+	elif /bin/uname -X 2>/dev/null >/dev/null ; then
+		UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+		(/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+		(/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+			&& UNAME_MACHINE=i586
+		(/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+			&& UNAME_MACHINE=i686
+		(/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+			&& UNAME_MACHINE=i686
+		echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+	else
+		echo ${UNAME_MACHINE}-pc-sysv32
+	fi
+	exit ;;
+    pc:*:*:*)
+	# Left here for compatibility:
+	# uname -m prints for DJGPP always 'pc', but it prints nothing about
+	# the processor, so we play safe by assuming i586.
+	# Note: whatever this is, it MUST be the same as what config.sub
+	# prints for the "djgpp" host, or else GDB configury will decide that
+	# this is a cross-build.
+	echo i586-pc-msdosdjgpp
+	exit ;;
+    Intel:Mach:3*:*)
+	echo i386-pc-mach3
+	exit ;;
+    paragon:*:*:*)
+	echo i860-intel-osf1
+	exit ;;
+    i860:*:4.*:*) # i860-SVR4
+	if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+	  echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+	else # Add other i860-SVR4 vendors below as they are discovered.
+	  echo i860-unknown-sysv${UNAME_RELEASE}  # Unknown i860-SVR4
+	fi
+	exit ;;
+    mini*:CTIX:SYS*5:*)
+	# "miniframe"
+	echo m68010-convergent-sysv
+	exit ;;
+    mc68k:UNIX:SYSTEM5:3.51m)
+	echo m68k-convergent-sysv
+	exit ;;
+    M680?0:D-NIX:5.3:*)
+	echo m68k-diab-dnix
+	exit ;;
+    M68*:*:R3V[5678]*:*)
+	test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+    3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+	OS_REL=''
+	test -r /etc/.relid \
+	&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+	  && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+	  && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+    3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+	  && { echo i486-ncr-sysv4; exit; } ;;
+    NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+	OS_REL='.3'
+	test -r /etc/.relid \
+	    && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+	    && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+	    && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+	/bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+	    && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+    m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+	echo m68k-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    mc68030:UNIX_System_V:4.*:*)
+	echo m68k-atari-sysv4
+	exit ;;
+    TSUNAMI:LynxOS:2.*:*)
+	echo sparc-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    rs6000:LynxOS:2.*:*)
+	echo rs6000-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+	echo powerpc-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    SM[BE]S:UNIX_SV:*:*)
+	echo mips-dde-sysv${UNAME_RELEASE}
+	exit ;;
+    RM*:ReliantUNIX-*:*:*)
+	echo mips-sni-sysv4
+	exit ;;
+    RM*:SINIX-*:*:*)
+	echo mips-sni-sysv4
+	exit ;;
+    *:SINIX-*:*:*)
+	if uname -p 2>/dev/null >/dev/null ; then
+		UNAME_MACHINE=`(uname -p) 2>/dev/null`
+		echo ${UNAME_MACHINE}-sni-sysv4
+	else
+		echo ns32k-sni-sysv
+	fi
+	exit ;;
+    PENTIUM:*:4.0*:*)	# Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+			# says <Richard.M.Bartel@ccMail.Census.GOV>
+	echo i586-unisys-sysv4
+	exit ;;
+    *:UNIX_System_V:4*:FTX*)
+	# From Gerald Hewes <hewes@openmarket.com>.
+	# How about differentiating between stratus architectures? -djm
+	echo hppa1.1-stratus-sysv4
+	exit ;;
+    *:*:*:FTX*)
+	# From seanf@swdc.stratus.com.
+	echo i860-stratus-sysv4
+	exit ;;
+    i*86:VOS:*:*)
+	# From Paul.Green@stratus.com.
+	echo ${UNAME_MACHINE}-stratus-vos
+	exit ;;
+    *:VOS:*:*)
+	# From Paul.Green@stratus.com.
+	echo hppa1.1-stratus-vos
+	exit ;;
+    mc68*:A/UX:*:*)
+	echo m68k-apple-aux${UNAME_RELEASE}
+	exit ;;
+    news*:NEWS-OS:6*:*)
+	echo mips-sony-newsos6
+	exit ;;
+    R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+	if [ -d /usr/nec ]; then
+		echo mips-nec-sysv${UNAME_RELEASE}
+	else
+		echo mips-unknown-sysv${UNAME_RELEASE}
+	fi
+	exit ;;
+    BeBox:BeOS:*:*)	# BeOS running on hardware made by Be, PPC only.
+	echo powerpc-be-beos
+	exit ;;
+    BeMac:BeOS:*:*)	# BeOS running on Mac or Mac clone, PPC only.
+	echo powerpc-apple-beos
+	exit ;;
+    BePC:BeOS:*:*)	# BeOS running on Intel PC compatible.
+	echo i586-pc-beos
+	exit ;;
+    BePC:Haiku:*:*)	# Haiku running on Intel PC compatible.
+	echo i586-pc-haiku
+	exit ;;
+    x86_64:Haiku:*:*)
+	echo x86_64-unknown-haiku
+	exit ;;
+    SX-4:SUPER-UX:*:*)
+	echo sx4-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-5:SUPER-UX:*:*)
+	echo sx5-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-6:SUPER-UX:*:*)
+	echo sx6-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-7:SUPER-UX:*:*)
+	echo sx7-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-8:SUPER-UX:*:*)
+	echo sx8-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-8R:SUPER-UX:*:*)
+	echo sx8r-nec-superux${UNAME_RELEASE}
+	exit ;;
+    Power*:Rhapsody:*:*)
+	echo powerpc-apple-rhapsody${UNAME_RELEASE}
+	exit ;;
+    *:Rhapsody:*:*)
+	echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+	exit ;;
+    *:Darwin:*:*)
+	UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+	eval $set_cc_for_build
+	if test "$UNAME_PROCESSOR" = unknown ; then
+	    UNAME_PROCESSOR=powerpc
+	fi
+	if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
+	    if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+		if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+		    (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+		    grep IS_64BIT_ARCH >/dev/null
+		then
+		    case $UNAME_PROCESSOR in
+			i386) UNAME_PROCESSOR=x86_64 ;;
+			powerpc) UNAME_PROCESSOR=powerpc64 ;;
+		    esac
+		fi
+	    fi
+	elif test "$UNAME_PROCESSOR" = i386 ; then
+	    # Avoid executing cc on OS X 10.9, as it ships with a stub
+	    # that puts up a graphical alert prompting to install
+	    # developer tools.  Any system running Mac OS X 10.7 or
+	    # later (Darwin 11 and later) is required to have a 64-bit
+	    # processor. This is not true of the ARM version of Darwin
+	    # that Apple uses in portable devices.
+	    UNAME_PROCESSOR=x86_64
+	fi
+	echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+	exit ;;
+    *:procnto*:*:* | *:QNX:[0123456789]*:*)
+	UNAME_PROCESSOR=`uname -p`
+	if test "$UNAME_PROCESSOR" = "x86"; then
+		UNAME_PROCESSOR=i386
+		UNAME_MACHINE=pc
+	fi
+	echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+	exit ;;
+    *:QNX:*:4*)
+	echo i386-pc-qnx
+	exit ;;
+    NEO-?:NONSTOP_KERNEL:*:*)
+	echo neo-tandem-nsk${UNAME_RELEASE}
+	exit ;;
+    NSE-*:NONSTOP_KERNEL:*:*)
+	echo nse-tandem-nsk${UNAME_RELEASE}
+	exit ;;
+    NSR-?:NONSTOP_KERNEL:*:*)
+	echo nsr-tandem-nsk${UNAME_RELEASE}
+	exit ;;
+    *:NonStop-UX:*:*)
+	echo mips-compaq-nonstopux
+	exit ;;
+    BS2000:POSIX*:*:*)
+	echo bs2000-siemens-sysv
+	exit ;;
+    DS/*:UNIX_System_V:*:*)
+	echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+	exit ;;
+    *:Plan9:*:*)
+	# "uname -m" is not consistent, so use $cputype instead. 386
+	# is converted to i386 for consistency with other x86
+	# operating systems.
+	if test "$cputype" = "386"; then
+	    UNAME_MACHINE=i386
+	else
+	    UNAME_MACHINE="$cputype"
+	fi
+	echo ${UNAME_MACHINE}-unknown-plan9
+	exit ;;
+    *:TOPS-10:*:*)
+	echo pdp10-unknown-tops10
+	exit ;;
+    *:TENEX:*:*)
+	echo pdp10-unknown-tenex
+	exit ;;
+    KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+	echo pdp10-dec-tops20
+	exit ;;
+    XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+	echo pdp10-xkl-tops20
+	exit ;;
+    *:TOPS-20:*:*)
+	echo pdp10-unknown-tops20
+	exit ;;
+    *:ITS:*:*)
+	echo pdp10-unknown-its
+	exit ;;
+    SEI:*:*:SEIUX)
+	echo mips-sei-seiux${UNAME_RELEASE}
+	exit ;;
+    *:DragonFly:*:*)
+	echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+	exit ;;
+    *:*VMS:*:*)
+	UNAME_MACHINE=`(uname -p) 2>/dev/null`
+	case "${UNAME_MACHINE}" in
+	    A*) echo alpha-dec-vms ; exit ;;
+	    I*) echo ia64-dec-vms ; exit ;;
+	    V*) echo vax-dec-vms ; exit ;;
+	esac ;;
+    *:XENIX:*:SysV)
+	echo i386-pc-xenix
+	exit ;;
+    i*86:skyos:*:*)
+	echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+	exit ;;
+    i*86:rdos:*:*)
+	echo ${UNAME_MACHINE}-pc-rdos
+	exit ;;
+    i*86:AROS:*:*)
+	echo ${UNAME_MACHINE}-pc-aros
+	exit ;;
+    x86_64:VMkernel:*:*)
+	echo ${UNAME_MACHINE}-unknown-esx
+	exit ;;
+esac
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X     = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo               = `(hostinfo) 2>/dev/null`
+/bin/universe          = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch              = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM  = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:

Property changes on: applgrid/tags/applgrid-01-06-36/config.guess
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: applgrid/tags/applgrid-01-06-36/ltmain.sh
===================================================================
--- applgrid/tags/applgrid-01-06-36/ltmain.sh	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/ltmain.sh	(revision 2010)
@@ -0,0 +1,11147 @@
+#! /bin/sh
+## DO NOT EDIT - This file generated from ./build-aux/ltmain.in
+##               by inline-source v2014-01-03.01
+
+# libtool (GNU libtool) 2.4.6
+# Provide generalized library-building support services.
+# Written by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+
+# Copyright (C) 1996-2015 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# GNU Libtool is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+
+PROGRAM=libtool
+PACKAGE=libtool
+VERSION=2.4.6
+package_revision=2.4.6
+
+
+## ------ ##
+## Usage. ##
+## ------ ##
+
+# Run './libtool --help' for help with using this script from the
+# command line.
+
+
+## ------------------------------- ##
+## User overridable command paths. ##
+## ------------------------------- ##
+
+# After configure completes, it has a better idea of some of the
+# shell tools we need than the defaults used by the functions shared
+# with bootstrap, so set those here where they can still be over-
+# ridden by the user, but otherwise take precedence.
+
+: ${AUTOCONF="autoconf"}
+: ${AUTOMAKE="automake"}
+
+
+## -------------------------- ##
+## Source external libraries. ##
+## -------------------------- ##
+
+# Much of our low-level functionality needs to be sourced from external
+# libraries, which are installed to $pkgauxdir.
+
+# Set a version string for this script.
+scriptversion=2015-01-20.17; # UTC
+
+# General shell script boiler plate, and helper functions.
+# Written by Gary V. Vaughan, 2004
+
+# Copyright (C) 2004-2015 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+
+# As a special exception to the GNU General Public License, if you distribute
+# this file as part of a program or library that is built using GNU Libtool,
+# you may include this file under the same distribution terms that you use
+# for the rest of that program.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNES FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Please report bugs or propose patches to gary@gnu.org.
+
+
+## ------ ##
+## Usage. ##
+## ------ ##
+
+# Evaluate this file near the top of your script to gain access to
+# the functions and variables defined here:
+#
+#   . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh
+#
+# If you need to override any of the default environment variable
+# settings, do that before evaluating this file.
+
+
+## -------------------- ##
+## Shell normalisation. ##
+## -------------------- ##
+
+# Some shells need a little help to be as Bourne compatible as possible.
+# Before doing anything else, make sure all that help has been provided!
+
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '${1+"$@"}'='"$@"'
+  setopt NO_GLOB_SUBST
+else
+  case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac
+fi
+
+# NLS nuisances: We save the old values in case they are required later.
+_G_user_locale=
+_G_safe_locale=
+for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+do
+  eval "if test set = \"\${$_G_var+set}\"; then
+          save_$_G_var=\$$_G_var
+          $_G_var=C
+	  export $_G_var
+	  _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\"
+	  _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\"
+	fi"
+done
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+# Make sure IFS has a sensible default
+sp=' '
+nl='
+'
+IFS="$sp	$nl"
+
+# There are apparently some retarded systems that use ';' as a PATH separator!
+if test "${PATH_SEPARATOR+set}" != set; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
+fi
+
+
+
+## ------------------------- ##
+## Locate command utilities. ##
+## ------------------------- ##
+
+
+# func_executable_p FILE
+# ----------------------
+# Check that FILE is an executable regular file.
+func_executable_p ()
+{
+    test -f "$1" && test -x "$1"
+}
+
+
+# func_path_progs PROGS_LIST CHECK_FUNC [PATH]
+# --------------------------------------------
+# Search for either a program that responds to --version with output
+# containing "GNU", or else returned by CHECK_FUNC otherwise, by
+# trying all the directories in PATH with each of the elements of
+# PROGS_LIST.
+#
+# CHECK_FUNC should accept the path to a candidate program, and
+# set $func_check_prog_result if it truncates its output less than
+# $_G_path_prog_max characters.
+func_path_progs ()
+{
+    _G_progs_list=$1
+    _G_check_func=$2
+    _G_PATH=${3-"$PATH"}
+
+    _G_path_prog_max=0
+    _G_path_prog_found=false
+    _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:}
+    for _G_dir in $_G_PATH; do
+      IFS=$_G_save_IFS
+      test -z "$_G_dir" && _G_dir=.
+      for _G_prog_name in $_G_progs_list; do
+        for _exeext in '' .EXE; do
+          _G_path_prog=$_G_dir/$_G_prog_name$_exeext
+          func_executable_p "$_G_path_prog" || continue
+          case `"$_G_path_prog" --version 2>&1` in
+            *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;;
+            *)     $_G_check_func $_G_path_prog
+		   func_path_progs_result=$func_check_prog_result
+		   ;;
+          esac
+          $_G_path_prog_found && break 3
+        done
+      done
+    done
+    IFS=$_G_save_IFS
+    test -z "$func_path_progs_result" && {
+      echo "no acceptable sed could be found in \$PATH" >&2
+      exit 1
+    }
+}
+
+
+# We want to be able to use the functions in this file before configure
+# has figured out where the best binaries are kept, which means we have
+# to search for them ourselves - except when the results are already set
+# where we skip the searches.
+
+# Unless the user overrides by setting SED, search the path for either GNU
+# sed, or the sed that truncates its output the least.
+test -z "$SED" && {
+  _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
+  for _G_i in 1 2 3 4 5 6 7; do
+    _G_sed_script=$_G_sed_script$nl$_G_sed_script
+  done
+  echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed
+  _G_sed_script=
+
+  func_check_prog_sed ()
+  {
+    _G_path_prog=$1
+
+    _G_count=0
+    printf 0123456789 >conftest.in
+    while :
+    do
+      cat conftest.in conftest.in >conftest.tmp
+      mv conftest.tmp conftest.in
+      cp conftest.in conftest.nl
+      echo '' >> conftest.nl
+      "$_G_path_prog" -f conftest.sed <conftest.nl >conftest.out 2>/dev/null || break
+      diff conftest.out conftest.nl >/dev/null 2>&1 || break
+      _G_count=`expr $_G_count + 1`
+      if test "$_G_count" -gt "$_G_path_prog_max"; then
+        # Best one so far, save it but keep looking for a better one
+        func_check_prog_result=$_G_path_prog
+        _G_path_prog_max=$_G_count
+      fi
+      # 10*(2^10) chars as input seems more than enough
+      test 10 -lt "$_G_count" && break
+    done
+    rm -f conftest.in conftest.tmp conftest.nl conftest.out
+  }
+
+  func_path_progs "sed gsed" func_check_prog_sed $PATH:/usr/xpg4/bin
+  rm -f conftest.sed
+  SED=$func_path_progs_result
+}
+
+
+# Unless the user overrides by setting GREP, search the path for either GNU
+# grep, or the grep that truncates its output the least.
+test -z "$GREP" && {
+  func_check_prog_grep ()
+  {
+    _G_path_prog=$1
+
+    _G_count=0
+    _G_path_prog_max=0
+    printf 0123456789 >conftest.in
+    while :
+    do
+      cat conftest.in conftest.in >conftest.tmp
+      mv conftest.tmp conftest.in
+      cp conftest.in conftest.nl
+      echo 'GREP' >> conftest.nl
+      "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' <conftest.nl >conftest.out 2>/dev/null || break
+      diff conftest.out conftest.nl >/dev/null 2>&1 || break
+      _G_count=`expr $_G_count + 1`
+      if test "$_G_count" -gt "$_G_path_prog_max"; then
+        # Best one so far, save it but keep looking for a better one
+        func_check_prog_result=$_G_path_prog
+        _G_path_prog_max=$_G_count
+      fi
+      # 10*(2^10) chars as input seems more than enough
+      test 10 -lt "$_G_count" && break
+    done
+    rm -f conftest.in conftest.tmp conftest.nl conftest.out
+  }
+
+  func_path_progs "grep ggrep" func_check_prog_grep $PATH:/usr/xpg4/bin
+  GREP=$func_path_progs_result
+}
+
+
+## ------------------------------- ##
+## User overridable command paths. ##
+## ------------------------------- ##
+
+# All uppercase variable names are used for environment variables.  These
+# variables can be overridden by the user before calling a script that
+# uses them if a suitable command of that name is not already available
+# in the command search PATH.
+
+: ${CP="cp -f"}
+: ${ECHO="printf %s\n"}
+: ${EGREP="$GREP -E"}
+: ${FGREP="$GREP -F"}
+: ${LN_S="ln -s"}
+: ${MAKE="make"}
+: ${MKDIR="mkdir"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+: ${SHELL="${CONFIG_SHELL-/bin/sh}"}
+
+
+## -------------------- ##
+## Useful sed snippets. ##
+## -------------------- ##
+
+sed_dirname='s|/[^/]*$||'
+sed_basename='s|^.*/||'
+
+# Sed substitution that helps us do robust quoting.  It backslashifies
+# metacharacters that are still active within double-quoted strings.
+sed_quote_subst='s|\([`"$\\]\)|\\\1|g'
+
+# Same as above, but do not quote variable references.
+sed_double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Sed substitution that turns a string into a regex matching for the
+# string literally.
+sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g'
+
+# Sed substitution that converts a w32 file name or path
+# that contains forward slashes, into one that contains
+# (escaped) backslashes.  A very naive implementation.
+sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g'
+
+# Re-'\' parameter expansions in output of sed_double_quote_subst that
+# were '\'-ed in input to the same.  If an odd number of '\' preceded a
+# '$' in input to sed_double_quote_subst, that '$' was protected from
+# expansion.  Since each input '\' is now two '\'s, look for any number
+# of runs of four '\'s followed by two '\'s and then a '$'.  '\' that '$'.
+_G_bs='\\'
+_G_bs2='\\\\'
+_G_bs4='\\\\\\\\'
+_G_dollar='\$'
+sed_double_backslash="\
+  s/$_G_bs4/&\\
+/g
+  s/^$_G_bs2$_G_dollar/$_G_bs&/
+  s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g
+  s/\n//g"
+
+
+## ----------------- ##
+## Global variables. ##
+## ----------------- ##
+
+# Except for the global variables explicitly listed below, the following
+# functions in the '^func_' namespace, and the '^require_' namespace
+# variables initialised in the 'Resource management' section, sourcing
+# this file will not pollute your global namespace with anything
+# else. There's no portable way to scope variables in Bourne shell
+# though, so actually running these functions will sometimes place
+# results into a variable named after the function, and often use
+# temporary variables in the '^_G_' namespace. If you are careful to
+# avoid using those namespaces casually in your sourcing script, things
+# should continue to work as you expect. And, of course, you can freely
+# overwrite any of the functions or variables defined here before
+# calling anything to customize them.
+
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+EXIT_MISMATCH=63  # $? = 63 is used to indicate version mismatch to missing.
+EXIT_SKIP=77	  # $? = 77 is used to indicate a skipped test to automake.
+
+# Allow overriding, eg assuming that you follow the convention of
+# putting '$debug_cmd' at the start of all your functions, you can get
+# bash to show function call trace with:
+#
+#    debug_cmd='eval echo "${FUNCNAME[0]} $*" >&2' bash your-script-name
+debug_cmd=${debug_cmd-":"}
+exit_cmd=:
+
+# By convention, finish your script with:
+#
+#    exit $exit_status
+#
+# so that you can set exit_status to non-zero if you want to indicate
+# something went wrong during execution without actually bailing out at
+# the point of failure.
+exit_status=$EXIT_SUCCESS
+
+# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
+# is ksh but when the shell is invoked as "sh" and the current value of
+# the _XPG environment variable is not equal to 1 (one), the special
+# positional parameter $0, within a function call, is the name of the
+# function.
+progpath=$0
+
+# The name of this program.
+progname=`$ECHO "$progpath" |$SED "$sed_basename"`
+
+# Make sure we have an absolute progpath for reexecution:
+case $progpath in
+  [\\/]*|[A-Za-z]:\\*) ;;
+  *[\\/]*)
+     progdir=`$ECHO "$progpath" |$SED "$sed_dirname"`
+     progdir=`cd "$progdir" && pwd`
+     progpath=$progdir/$progname
+     ;;
+  *)
+     _G_IFS=$IFS
+     IFS=${PATH_SEPARATOR-:}
+     for progdir in $PATH; do
+       IFS=$_G_IFS
+       test -x "$progdir/$progname" && break
+     done
+     IFS=$_G_IFS
+     test -n "$progdir" || progdir=`pwd`
+     progpath=$progdir/$progname
+     ;;
+esac
+
+
+## ----------------- ##
+## Standard options. ##
+## ----------------- ##
+
+# The following options affect the operation of the functions defined
+# below, and should be set appropriately depending on run-time para-
+# meters passed on the command line.
+
+opt_dry_run=false
+opt_quiet=false
+opt_verbose=false
+
+# Categories 'all' and 'none' are always available.  Append any others
+# you will pass as the first argument to func_warning from your own
+# code.
+warning_categories=
+
+# By default, display warnings according to 'opt_warning_types'.  Set
+# 'warning_func'  to ':' to elide all warnings, or func_fatal_error to
+# treat the next displayed warning as a fatal error.
+warning_func=func_warn_and_continue
+
+# Set to 'all' to display all warnings, 'none' to suppress all
+# warnings, or a space delimited list of some subset of
+# 'warning_categories' to display only the listed warnings.
+opt_warning_types=all
+
+
+## -------------------- ##
+## Resource management. ##
+## -------------------- ##
+
+# This section contains definitions for functions that each ensure a
+# particular resource (a file, or a non-empty configuration variable for
+# example) is available, and if appropriate to extract default values
+# from pertinent package files. Call them using their associated
+# 'require_*' variable to ensure that they are executed, at most, once.
+#
+# It's entirely deliberate that calling these functions can set
+# variables that don't obey the namespace limitations obeyed by the rest
+# of this file, in order that that they be as useful as possible to
+# callers.
+
+
+# require_term_colors
+# -------------------
+# Allow display of bold text on terminals that support it.
+require_term_colors=func_require_term_colors
+func_require_term_colors ()
+{
+    $debug_cmd
+
+    test -t 1 && {
+      # COLORTERM and USE_ANSI_COLORS environment variables take
+      # precedence, because most terminfo databases neglect to describe
+      # whether color sequences are supported.
+      test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"}
+
+      if test 1 = "$USE_ANSI_COLORS"; then
+        # Standard ANSI escape sequences
+        tc_reset=''
+        tc_bold='';   tc_standout=''
+        tc_red='';   tc_green=''
+        tc_blue='';  tc_cyan=''
+      else
+        # Otherwise trust the terminfo database after all.
+        test -n "`tput sgr0 2>/dev/null`" && {
+          tc_reset=`tput sgr0`
+          test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold`
+          tc_standout=$tc_bold
+          test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso`
+          test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1`
+          test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2`
+          test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4`
+          test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5`
+        }
+      fi
+    }
+
+    require_term_colors=:
+}
+
+
+## ----------------- ##
+## Function library. ##
+## ----------------- ##
+
+# This section contains a variety of useful functions to call in your
+# scripts. Take note of the portable wrappers for features provided by
+# some modern shells, which will fall back to slower equivalents on
+# less featureful shells.
+
+
+# func_append VAR VALUE
+# ---------------------
+# Append VALUE onto the existing contents of VAR.
+
+  # We should try to minimise forks, especially on Windows where they are
+  # unreasonably slow, so skip the feature probes when bash or zsh are
+  # being used:
+  if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then
+    : ${_G_HAVE_ARITH_OP="yes"}
+    : ${_G_HAVE_XSI_OPS="yes"}
+    # The += operator was introduced in bash 3.1
+    case $BASH_VERSION in
+      [12].* | 3.0 | 3.0*) ;;
+      *)
+        : ${_G_HAVE_PLUSEQ_OP="yes"}
+        ;;
+    esac
+  fi
+
+  # _G_HAVE_PLUSEQ_OP
+  # Can be empty, in which case the shell is probed, "yes" if += is
+  # useable or anything else if it does not work.
+  test -z "$_G_HAVE_PLUSEQ_OP" \
+    && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \
+    && _G_HAVE_PLUSEQ_OP=yes
+
+if test yes = "$_G_HAVE_PLUSEQ_OP"
+then
+  # This is an XSI compatible shell, allowing a faster implementation...
+  eval 'func_append ()
+  {
+    $debug_cmd
+
+    eval "$1+=\$2"
+  }'
+else
+  # ...otherwise fall back to using expr, which is often a shell builtin.
+  func_append ()
+  {
+    $debug_cmd
+
+    eval "$1=\$$1\$2"
+  }
+fi
+
+
+# func_append_quoted VAR VALUE
+# ----------------------------
+# Quote VALUE and append to the end of shell variable VAR, separated
+# by a space.
+if test yes = "$_G_HAVE_PLUSEQ_OP"; then
+  eval 'func_append_quoted ()
+  {
+    $debug_cmd
+
+    func_quote_for_eval "$2"
+    eval "$1+=\\ \$func_quote_for_eval_result"
+  }'
+else
+  func_append_quoted ()
+  {
+    $debug_cmd
+
+    func_quote_for_eval "$2"
+    eval "$1=\$$1\\ \$func_quote_for_eval_result"
+  }
+fi
+
+
+# func_append_uniq VAR VALUE
+# --------------------------
+# Append unique VALUE onto the existing contents of VAR, assuming
+# entries are delimited by the first character of VALUE.  For example:
+#
+#   func_append_uniq options " --another-option option-argument"
+#
+# will only append to $options if " --another-option option-argument "
+# is not already present somewhere in $options already (note spaces at
+# each end implied by leading space in second argument).
+func_append_uniq ()
+{
+    $debug_cmd
+
+    eval _G_current_value='`$ECHO $'$1'`'
+    _G_delim=`expr "$2" : '\(.\)'`
+
+    case $_G_delim$_G_current_value$_G_delim in
+      *"$2$_G_delim"*) ;;
+      *) func_append "$@" ;;
+    esac
+}
+
+
+# func_arith TERM...
+# ------------------
+# Set func_arith_result to the result of evaluating TERMs.
+  test -z "$_G_HAVE_ARITH_OP" \
+    && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \
+    && _G_HAVE_ARITH_OP=yes
+
+if test yes = "$_G_HAVE_ARITH_OP"; then
+  eval 'func_arith ()
+  {
+    $debug_cmd
+
+    func_arith_result=$(( $* ))
+  }'
+else
+  func_arith ()
+  {
+    $debug_cmd
+
+    func_arith_result=`expr "$@"`
+  }
+fi
+
+
+# func_basename FILE
+# ------------------
+# Set func_basename_result to FILE with everything up to and including
+# the last / stripped.
+if test yes = "$_G_HAVE_XSI_OPS"; then
+  # If this shell supports suffix pattern removal, then use it to avoid
+  # forking. Hide the definitions single quotes in case the shell chokes
+  # on unsupported syntax...
+  _b='func_basename_result=${1##*/}'
+  _d='case $1 in
+        */*) func_dirname_result=${1%/*}$2 ;;
+        *  ) func_dirname_result=$3        ;;
+      esac'
+
+else
+  # ...otherwise fall back to using sed.
+  _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`'
+  _d='func_dirname_result=`$ECHO "$1"  |$SED "$sed_dirname"`
+      if test "X$func_dirname_result" = "X$1"; then
+        func_dirname_result=$3
+      else
+        func_append func_dirname_result "$2"
+      fi'
+fi
+
+eval 'func_basename ()
+{
+    $debug_cmd
+
+    '"$_b"'
+}'
+
+
+# func_dirname FILE APPEND NONDIR_REPLACEMENT
+# -------------------------------------------
+# Compute the dirname of FILE.  If nonempty, add APPEND to the result,
+# otherwise set result to NONDIR_REPLACEMENT.
+eval 'func_dirname ()
+{
+    $debug_cmd
+
+    '"$_d"'
+}'
+
+
+# func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT
+# --------------------------------------------------------
+# Perform func_basename and func_dirname in a single function
+# call:
+#   dirname:  Compute the dirname of FILE.  If nonempty,
+#             add APPEND to the result, otherwise set result
+#             to NONDIR_REPLACEMENT.
+#             value returned in "$func_dirname_result"
+#   basename: Compute filename of FILE.
+#             value retuned in "$func_basename_result"
+# For efficiency, we do not delegate to the functions above but instead
+# duplicate the functionality here.
+eval 'func_dirname_and_basename ()
+{
+    $debug_cmd
+
+    '"$_b"'
+    '"$_d"'
+}'
+
+
+# func_echo ARG...
+# ----------------
+# Echo program name prefixed message.
+func_echo ()
+{
+    $debug_cmd
+
+    _G_message=$*
+
+    func_echo_IFS=$IFS
+    IFS=$nl
+    for _G_line in $_G_message; do
+      IFS=$func_echo_IFS
+      $ECHO "$progname: $_G_line"
+    done
+    IFS=$func_echo_IFS
+}
+
+
+# func_echo_all ARG...
+# --------------------
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+    $ECHO "$*"
+}
+
+
+# func_echo_infix_1 INFIX ARG...
+# ------------------------------
+# Echo program name, followed by INFIX on the first line, with any
+# additional lines not showing INFIX.
+func_echo_infix_1 ()
+{
+    $debug_cmd
+
+    $require_term_colors
+
+    _G_infix=$1; shift
+    _G_indent=$_G_infix
+    _G_prefix="$progname: $_G_infix: "
+    _G_message=$*
+
+    # Strip color escape sequences before counting printable length
+    for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan"
+    do
+      test -n "$_G_tc" && {
+        _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"`
+        _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"`
+      }
+    done
+    _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`"  " ## exclude from sc_prohibit_nested_quotes
+
+    func_echo_infix_1_IFS=$IFS
+    IFS=$nl
+    for _G_line in $_G_message; do
+      IFS=$func_echo_infix_1_IFS
+      $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2
+      _G_prefix=$_G_indent
+    done
+    IFS=$func_echo_infix_1_IFS
+}
+
+
+# func_error ARG...
+# -----------------
+# Echo program name prefixed message to standard error.
+func_error ()
+{
+    $debug_cmd
+
+    $require_term_colors
+
+    func_echo_infix_1 "  $tc_standout${tc_red}error$tc_reset" "$*" >&2
+}
+
+
+# func_fatal_error ARG...
+# -----------------------
+# Echo program name prefixed message to standard error, and exit.
+func_fatal_error ()
+{
+    $debug_cmd
+
+    func_error "$*"
+    exit $EXIT_FAILURE
+}
+
+
+# func_grep EXPRESSION FILENAME
+# -----------------------------
+# Check whether EXPRESSION matches any line of FILENAME, without output.
+func_grep ()
+{
+    $debug_cmd
+
+    $GREP "$1" "$2" >/dev/null 2>&1
+}
+
+
+# func_len STRING
+# ---------------
+# Set func_len_result to the length of STRING. STRING may not
+# start with a hyphen.
+  test -z "$_G_HAVE_XSI_OPS" \
+    && (eval 'x=a/b/c;
+      test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \
+    && _G_HAVE_XSI_OPS=yes
+
+if test yes = "$_G_HAVE_XSI_OPS"; then
+  eval 'func_len ()
+  {
+    $debug_cmd
+
+    func_len_result=${#1}
+  }'
+else
+  func_len ()
+  {
+    $debug_cmd
+
+    func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len`
+  }
+fi
+
+
+# func_mkdir_p DIRECTORY-PATH
+# ---------------------------
+# Make sure the entire path to DIRECTORY-PATH is available.
+func_mkdir_p ()
+{
+    $debug_cmd
+
+    _G_directory_path=$1
+    _G_dir_list=
+
+    if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then
+
+      # Protect directory names starting with '-'
+      case $_G_directory_path in
+        -*) _G_directory_path=./$_G_directory_path ;;
+      esac
+
+      # While some portion of DIR does not yet exist...
+      while test ! -d "$_G_directory_path"; do
+        # ...make a list in topmost first order.  Use a colon delimited
+	# list incase some portion of path contains whitespace.
+        _G_dir_list=$_G_directory_path:$_G_dir_list
+
+        # If the last portion added has no slash in it, the list is done
+        case $_G_directory_path in */*) ;; *) break ;; esac
+
+        # ...otherwise throw away the child directory and loop
+        _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"`
+      done
+      _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'`
+
+      func_mkdir_p_IFS=$IFS; IFS=:
+      for _G_dir in $_G_dir_list; do
+	IFS=$func_mkdir_p_IFS
+        # mkdir can fail with a 'File exist' error if two processes
+        # try to create one of the directories concurrently.  Don't
+        # stop in that case!
+        $MKDIR "$_G_dir" 2>/dev/null || :
+      done
+      IFS=$func_mkdir_p_IFS
+
+      # Bail out if we (or some other process) failed to create a directory.
+      test -d "$_G_directory_path" || \
+        func_fatal_error "Failed to create '$1'"
+    fi
+}
+
+
+# func_mktempdir [BASENAME]
+# -------------------------
+# Make a temporary directory that won't clash with other running
+# libtool processes, and avoids race conditions if possible.  If
+# given, BASENAME is the basename for that directory.
+func_mktempdir ()
+{
+    $debug_cmd
+
+    _G_template=${TMPDIR-/tmp}/${1-$progname}
+
+    if test : = "$opt_dry_run"; then
+      # Return a directory name, but don't create it in dry-run mode
+      _G_tmpdir=$_G_template-$$
+    else
+
+      # If mktemp works, use that first and foremost
+      _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null`
+
+      if test ! -d "$_G_tmpdir"; then
+        # Failing that, at least try and use $RANDOM to avoid a race
+        _G_tmpdir=$_G_template-${RANDOM-0}$$
+
+        func_mktempdir_umask=`umask`
+        umask 0077
+        $MKDIR "$_G_tmpdir"
+        umask $func_mktempdir_umask
+      fi
+
+      # If we're not in dry-run mode, bomb out on failure
+      test -d "$_G_tmpdir" || \
+        func_fatal_error "cannot create temporary directory '$_G_tmpdir'"
+    fi
+
+    $ECHO "$_G_tmpdir"
+}
+
+
+# func_normal_abspath PATH
+# ------------------------
+# Remove doubled-up and trailing slashes, "." path components,
+# and cancel out any ".." path components in PATH after making
+# it an absolute path.
+func_normal_abspath ()
+{
+    $debug_cmd
+
+    # These SED scripts presuppose an absolute path with a trailing slash.
+    _G_pathcar='s|^/\([^/]*\).*$|\1|'
+    _G_pathcdr='s|^/[^/]*||'
+    _G_removedotparts=':dotsl
+		s|/\./|/|g
+		t dotsl
+		s|/\.$|/|'
+    _G_collapseslashes='s|/\{1,\}|/|g'
+    _G_finalslash='s|/*$|/|'
+
+    # Start from root dir and reassemble the path.
+    func_normal_abspath_result=
+    func_normal_abspath_tpath=$1
+    func_normal_abspath_altnamespace=
+    case $func_normal_abspath_tpath in
+      "")
+        # Empty path, that just means $cwd.
+        func_stripname '' '/' "`pwd`"
+        func_normal_abspath_result=$func_stripname_result
+        return
+        ;;
+      # The next three entries are used to spot a run of precisely
+      # two leading slashes without using negated character classes;
+      # we take advantage of case's first-match behaviour.
+      ///*)
+        # Unusual form of absolute path, do nothing.
+        ;;
+      //*)
+        # Not necessarily an ordinary path; POSIX reserves leading '//'
+        # and for example Cygwin uses it to access remote file shares
+        # over CIFS/SMB, so we conserve a leading double slash if found.
+        func_normal_abspath_altnamespace=/
+        ;;
+      /*)
+        # Absolute path, do nothing.
+        ;;
+      *)
+        # Relative path, prepend $cwd.
+        func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath
+        ;;
+    esac
+
+    # Cancel out all the simple stuff to save iterations.  We also want
+    # the path to end with a slash for ease of parsing, so make sure
+    # there is one (and only one) here.
+    func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+          -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"`
+    while :; do
+      # Processed it all yet?
+      if test / = "$func_normal_abspath_tpath"; then
+        # If we ascended to the root using ".." the result may be empty now.
+        if test -z "$func_normal_abspath_result"; then
+          func_normal_abspath_result=/
+        fi
+        break
+      fi
+      func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \
+          -e "$_G_pathcar"`
+      func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+          -e "$_G_pathcdr"`
+      # Figure out what to do with it
+      case $func_normal_abspath_tcomponent in
+        "")
+          # Trailing empty path component, ignore it.
+          ;;
+        ..)
+          # Parent dir; strip last assembled component from result.
+          func_dirname "$func_normal_abspath_result"
+          func_normal_abspath_result=$func_dirname_result
+          ;;
+        *)
+          # Actual path component, append it.
+          func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent"
+          ;;
+      esac
+    done
+    # Restore leading double-slash if one was found on entry.
+    func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result
+}
+
+
+# func_notquiet ARG...
+# --------------------
+# Echo program name prefixed message only when not in quiet mode.
+func_notquiet ()
+{
+    $debug_cmd
+
+    $opt_quiet || func_echo ${1+"$@"}
+
+    # A bug in bash halts the script if the last line of a function
+    # fails when set -e is in force, so we need another command to
+    # work around that:
+    :
+}
+
+
+# func_relative_path SRCDIR DSTDIR
+# --------------------------------
+# Set func_relative_path_result to the relative path from SRCDIR to DSTDIR.
+func_relative_path ()
+{
+    $debug_cmd
+
+    func_relative_path_result=
+    func_normal_abspath "$1"
+    func_relative_path_tlibdir=$func_normal_abspath_result
+    func_normal_abspath "$2"
+    func_relative_path_tbindir=$func_normal_abspath_result
+
+    # Ascend the tree starting from libdir
+    while :; do
+      # check if we have found a prefix of bindir
+      case $func_relative_path_tbindir in
+        $func_relative_path_tlibdir)
+          # found an exact match
+          func_relative_path_tcancelled=
+          break
+          ;;
+        $func_relative_path_tlibdir*)
+          # found a matching prefix
+          func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir"
+          func_relative_path_tcancelled=$func_stripname_result
+          if test -z "$func_relative_path_result"; then
+            func_relative_path_result=.
+          fi
+          break
+          ;;
+        *)
+          func_dirname $func_relative_path_tlibdir
+          func_relative_path_tlibdir=$func_dirname_result
+          if test -z "$func_relative_path_tlibdir"; then
+            # Have to descend all the way to the root!
+            func_relative_path_result=../$func_relative_path_result
+            func_relative_path_tcancelled=$func_relative_path_tbindir
+            break
+          fi
+          func_relative_path_result=../$func_relative_path_result
+          ;;
+      esac
+    done
+
+    # Now calculate path; take care to avoid doubling-up slashes.
+    func_stripname '' '/' "$func_relative_path_result"
+    func_relative_path_result=$func_stripname_result
+    func_stripname '/' '/' "$func_relative_path_tcancelled"
+    if test -n "$func_stripname_result"; then
+      func_append func_relative_path_result "/$func_stripname_result"
+    fi
+
+    # Normalisation. If bindir is libdir, return '.' else relative path.
+    if test -n "$func_relative_path_result"; then
+      func_stripname './' '' "$func_relative_path_result"
+      func_relative_path_result=$func_stripname_result
+    fi
+
+    test -n "$func_relative_path_result" || func_relative_path_result=.
+
+    :
+}
+
+
+# func_quote_for_eval ARG...
+# --------------------------
+# Aesthetically quote ARGs to be evaled later.
+# This function returns two values:
+#   i) func_quote_for_eval_result
+#      double-quoted, suitable for a subsequent eval
+#  ii) func_quote_for_eval_unquoted_result
+#      has all characters that are still active within double
+#      quotes backslashified.
+func_quote_for_eval ()
+{
+    $debug_cmd
+
+    func_quote_for_eval_unquoted_result=
+    func_quote_for_eval_result=
+    while test 0 -lt $#; do
+      case $1 in
+        *[\\\`\"\$]*)
+	  _G_unquoted_arg=`printf '%s\n' "$1" |$SED "$sed_quote_subst"` ;;
+        *)
+          _G_unquoted_arg=$1 ;;
+      esac
+      if test -n "$func_quote_for_eval_unquoted_result"; then
+	func_append func_quote_for_eval_unquoted_result " $_G_unquoted_arg"
+      else
+        func_append func_quote_for_eval_unquoted_result "$_G_unquoted_arg"
+      fi
+
+      case $_G_unquoted_arg in
+        # Double-quote args containing shell metacharacters to delay
+        # word splitting, command substitution and variable expansion
+        # for a subsequent eval.
+        # Many Bourne shells cannot handle close brackets correctly
+        # in scan sets, so we specify it separately.
+        *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
+          _G_quoted_arg=\"$_G_unquoted_arg\"
+          ;;
+        *)
+          _G_quoted_arg=$_G_unquoted_arg
+	  ;;
+      esac
+
+      if test -n "$func_quote_for_eval_result"; then
+	func_append func_quote_for_eval_result " $_G_quoted_arg"
+      else
+        func_append func_quote_for_eval_result "$_G_quoted_arg"
+      fi
+      shift
+    done
+}
+
+
+# func_quote_for_expand ARG
+# -------------------------
+# Aesthetically quote ARG to be evaled later; same as above,
+# but do not quote variable references.
+func_quote_for_expand ()
+{
+    $debug_cmd
+
+    case $1 in
+      *[\\\`\"]*)
+	_G_arg=`$ECHO "$1" | $SED \
+	    -e "$sed_double_quote_subst" -e "$sed_double_backslash"` ;;
+      *)
+        _G_arg=$1 ;;
+    esac
+
+    case $_G_arg in
+      # Double-quote args containing shell metacharacters to delay
+      # word splitting and command substitution for a subsequent eval.
+      # Many Bourne shells cannot handle close brackets correctly
+      # in scan sets, so we specify it separately.
+      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
+        _G_arg=\"$_G_arg\"
+        ;;
+    esac
+
+    func_quote_for_expand_result=$_G_arg
+}
+
+
+# func_stripname PREFIX SUFFIX NAME
+# ---------------------------------
+# strip PREFIX and SUFFIX from NAME, and store in func_stripname_result.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+if test yes = "$_G_HAVE_XSI_OPS"; then
+  eval 'func_stripname ()
+  {
+    $debug_cmd
+
+    # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
+    # positional parameters, so assign one to ordinary variable first.
+    func_stripname_result=$3
+    func_stripname_result=${func_stripname_result#"$1"}
+    func_stripname_result=${func_stripname_result%"$2"}
+  }'
+else
+  func_stripname ()
+  {
+    $debug_cmd
+
+    case $2 in
+      .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;;
+      *)  func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;;
+    esac
+  }
+fi
+
+
+# func_show_eval CMD [FAIL_EXP]
+# -----------------------------
+# Unless opt_quiet is true, then output CMD.  Then, if opt_dryrun is
+# not true, evaluate CMD.  If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it.
+func_show_eval ()
+{
+    $debug_cmd
+
+    _G_cmd=$1
+    _G_fail_exp=${2-':'}
+
+    func_quote_for_expand "$_G_cmd"
+    eval "func_notquiet $func_quote_for_expand_result"
+
+    $opt_dry_run || {
+      eval "$_G_cmd"
+      _G_status=$?
+      if test 0 -ne "$_G_status"; then
+	eval "(exit $_G_status); $_G_fail_exp"
+      fi
+    }
+}
+
+
+# func_show_eval_locale CMD [FAIL_EXP]
+# ------------------------------------
+# Unless opt_quiet is true, then output CMD.  Then, if opt_dryrun is
+# not true, evaluate CMD.  If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it.  Use the saved locale for evaluation.
+func_show_eval_locale ()
+{
+    $debug_cmd
+
+    _G_cmd=$1
+    _G_fail_exp=${2-':'}
+
+    $opt_quiet || {
+      func_quote_for_expand "$_G_cmd"
+      eval "func_echo $func_quote_for_expand_result"
+    }
+
+    $opt_dry_run || {
+      eval "$_G_user_locale
+	    $_G_cmd"
+      _G_status=$?
+      eval "$_G_safe_locale"
+      if test 0 -ne "$_G_status"; then
+	eval "(exit $_G_status); $_G_fail_exp"
+      fi
+    }
+}
+
+
+# func_tr_sh
+# ----------
+# Turn $1 into a string suitable for a shell variable name.
+# Result is stored in $func_tr_sh_result.  All characters
+# not in the set a-zA-Z0-9_ are replaced with '_'. Further,
+# if $1 begins with a digit, a '_' is prepended as well.
+func_tr_sh ()
+{
+    $debug_cmd
+
+    case $1 in
+    [0-9]* | *[!a-zA-Z0-9_]*)
+      func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'`
+      ;;
+    * )
+      func_tr_sh_result=$1
+      ;;
+    esac
+}
+
+
+# func_verbose ARG...
+# -------------------
+# Echo program name prefixed message in verbose mode only.
+func_verbose ()
+{
+    $debug_cmd
+
+    $opt_verbose && func_echo "$*"
+
+    :
+}
+
+
+# func_warn_and_continue ARG...
+# -----------------------------
+# Echo program name prefixed warning message to standard error.
+func_warn_and_continue ()
+{
+    $debug_cmd
+
+    $require_term_colors
+
+    func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2
+}
+
+
+# func_warning CATEGORY ARG...
+# ----------------------------
+# Echo program name prefixed warning message to standard error. Warning
+# messages can be filtered according to CATEGORY, where this function
+# elides messages where CATEGORY is not listed in the global variable
+# 'opt_warning_types'.
+func_warning ()
+{
+    $debug_cmd
+
+    # CATEGORY must be in the warning_categories list!
+    case " $warning_categories " in
+      *" $1 "*) ;;
+      *) func_internal_error "invalid warning category '$1'" ;;
+    esac
+
+    _G_category=$1
+    shift
+
+    case " $opt_warning_types " in
+      *" $_G_category "*) $warning_func ${1+"$@"} ;;
+    esac
+}
+
+
+# func_sort_ver VER1 VER2
+# -----------------------
+# 'sort -V' is not generally available.
+# Note this deviates from the version comparison in automake
+# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a
+# but this should suffice as we won't be specifying old
+# version formats or redundant trailing .0 in bootstrap.conf.
+# If we did want full compatibility then we should probably
+# use m4_version_compare from autoconf.
+func_sort_ver ()
+{
+    $debug_cmd
+
+    printf '%s\n%s\n' "$1" "$2" \
+      | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n
+}
+
+# func_lt_ver PREV CURR
+# ---------------------
+# Return true if PREV and CURR are in the correct order according to
+# func_sort_ver, otherwise false.  Use it like this:
+#
+#  func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..."
+func_lt_ver ()
+{
+    $debug_cmd
+
+    test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q`
+}
+
+
+# Local variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC"
+# time-stamp-time-zone: "UTC"
+# End:
+#! /bin/sh
+
+# Set a version string for this script.
+scriptversion=2014-01-07.03; # UTC
+
+# A portable, pluggable option parser for Bourne shell.
+# Written by Gary V. Vaughan, 2010
+
+# Copyright (C) 2010-2015 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Please report bugs or propose patches to gary@gnu.org.
+
+
+## ------ ##
+## Usage. ##
+## ------ ##
+
+# This file is a library for parsing options in your shell scripts along
+# with assorted other useful supporting features that you can make use
+# of too.
+#
+# For the simplest scripts you might need only:
+#
+#   #!/bin/sh
+#   . relative/path/to/funclib.sh
+#   . relative/path/to/options-parser
+#   scriptversion=1.0
+#   func_options ${1+"$@"}
+#   eval set dummy "$func_options_result"; shift
+#   ...rest of your script...
+#
+# In order for the '--version' option to work, you will need to have a
+# suitably formatted comment like the one at the top of this file
+# starting with '# Written by ' and ending with '# warranty; '.
+#
+# For '-h' and '--help' to work, you will also need a one line
+# description of your script's purpose in a comment directly above the
+# '# Written by ' line, like the one at the top of this file.
+#
+# The default options also support '--debug', which will turn on shell
+# execution tracing (see the comment above debug_cmd below for another
+# use), and '--verbose' and the func_verbose function to allow your script
+# to display verbose messages only when your user has specified
+# '--verbose'.
+#
+# After sourcing this file, you can plug processing for additional
+# options by amending the variables from the 'Configuration' section
+# below, and following the instructions in the 'Option parsing'
+# section further down.
+
+## -------------- ##
+## Configuration. ##
+## -------------- ##
+
+# You should override these variables in your script after sourcing this
+# file so that they reflect the customisations you have added to the
+# option parser.
+
+# The usage line for option parsing errors and the start of '-h' and
+# '--help' output messages. You can embed shell variables for delayed
+# expansion at the time the message is displayed, but you will need to
+# quote other shell meta-characters carefully to prevent them being
+# expanded when the contents are evaled.
+usage='$progpath [OPTION]...'
+
+# Short help message in response to '-h' and '--help'.  Add to this or
+# override it after sourcing this library to reflect the full set of
+# options your script accepts.
+usage_message="\
+       --debug        enable verbose shell tracing
+   -W, --warnings=CATEGORY
+                      report the warnings falling in CATEGORY [all]
+   -v, --verbose      verbosely report processing
+       --version      print version information and exit
+   -h, --help         print short or long help message and exit
+"
+
+# Additional text appended to 'usage_message' in response to '--help'.
+long_help_message="
+Warning categories include:
+       'all'          show all warnings
+       'none'         turn off all the warnings
+       'error'        warnings are treated as fatal errors"
+
+# Help message printed before fatal option parsing errors.
+fatal_help="Try '\$progname --help' for more information."
+
+
+
+## ------------------------- ##
+## Hook function management. ##
+## ------------------------- ##
+
+# This section contains functions for adding, removing, and running hooks
+# to the main code.  A hook is just a named list of of function, that can
+# be run in order later on.
+
+# func_hookable FUNC_NAME
+# -----------------------
+# Declare that FUNC_NAME will run hooks added with
+# 'func_add_hook FUNC_NAME ...'.
+func_hookable ()
+{
+    $debug_cmd
+
+    func_append hookable_fns " $1"
+}
+
+
+# func_add_hook FUNC_NAME HOOK_FUNC
+# ---------------------------------
+# Request that FUNC_NAME call HOOK_FUNC before it returns.  FUNC_NAME must
+# first have been declared "hookable" by a call to 'func_hookable'.
+func_add_hook ()
+{
+    $debug_cmd
+
+    case " $hookable_fns " in
+      *" $1 "*) ;;
+      *) func_fatal_error "'$1' does not accept hook functions." ;;
+    esac
+
+    eval func_append ${1}_hooks '" $2"'
+}
+
+
+# func_remove_hook FUNC_NAME HOOK_FUNC
+# ------------------------------------
+# Remove HOOK_FUNC from the list of functions called by FUNC_NAME.
+func_remove_hook ()
+{
+    $debug_cmd
+
+    eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`'
+}
+
+
+# func_run_hooks FUNC_NAME [ARG]...
+# ---------------------------------
+# Run all hook functions registered to FUNC_NAME.
+# It is assumed that the list of hook functions contains nothing more
+# than a whitespace-delimited list of legal shell function names, and
+# no effort is wasted trying to catch shell meta-characters or preserve
+# whitespace.
+func_run_hooks ()
+{
+    $debug_cmd
+
+    case " $hookable_fns " in
+      *" $1 "*) ;;
+      *) func_fatal_error "'$1' does not support hook funcions.n" ;;
+    esac
+
+    eval _G_hook_fns=\$$1_hooks; shift
+
+    for _G_hook in $_G_hook_fns; do
+      eval $_G_hook '"$@"'
+
+      # store returned options list back into positional
+      # parameters for next 'cmd' execution.
+      eval _G_hook_result=\$${_G_hook}_result
+      eval set dummy "$_G_hook_result"; shift
+    done
+
+    func_quote_for_eval ${1+"$@"}
+    func_run_hooks_result=$func_quote_for_eval_result
+}
+
+
+
+## --------------- ##
+## Option parsing. ##
+## --------------- ##
+
+# In order to add your own option parsing hooks, you must accept the
+# full positional parameter list in your hook function, remove any
+# options that you action, and then pass back the remaining unprocessed
+# options in '<hooked_function_name>_result', escaped suitably for
+# 'eval'.  Like this:
+#
+#    my_options_prep ()
+#    {
+#        $debug_cmd
+#
+#        # Extend the existing usage message.
+#        usage_message=$usage_message'
+#      -s, --silent       don'\''t print informational messages
+#    '
+#
+#        func_quote_for_eval ${1+"$@"}
+#        my_options_prep_result=$func_quote_for_eval_result
+#    }
+#    func_add_hook func_options_prep my_options_prep
+#
+#
+#    my_silent_option ()
+#    {
+#        $debug_cmd
+#
+#        # Note that for efficiency, we parse as many options as we can
+#        # recognise in a loop before passing the remainder back to the
+#        # caller on the first unrecognised argument we encounter.
+#        while test $# -gt 0; do
+#          opt=$1; shift
+#          case $opt in
+#            --silent|-s) opt_silent=: ;;
+#            # Separate non-argument short options:
+#            -s*)         func_split_short_opt "$_G_opt"
+#                         set dummy "$func_split_short_opt_name" \
+#                             "-$func_split_short_opt_arg" ${1+"$@"}
+#                         shift
+#                         ;;
+#            *)            set dummy "$_G_opt" "$*"; shift; break ;;
+#          esac
+#        done
+#
+#        func_quote_for_eval ${1+"$@"}
+#        my_silent_option_result=$func_quote_for_eval_result
+#    }
+#    func_add_hook func_parse_options my_silent_option
+#
+#
+#    my_option_validation ()
+#    {
+#        $debug_cmd
+#
+#        $opt_silent && $opt_verbose && func_fatal_help "\
+#    '--silent' and '--verbose' options are mutually exclusive."
+#
+#        func_quote_for_eval ${1+"$@"}
+#        my_option_validation_result=$func_quote_for_eval_result
+#    }
+#    func_add_hook func_validate_options my_option_validation
+#
+# You'll alse need to manually amend $usage_message to reflect the extra
+# options you parse.  It's preferable to append if you can, so that
+# multiple option parsing hooks can be added safely.
+
+
+# func_options [ARG]...
+# ---------------------
+# All the functions called inside func_options are hookable. See the
+# individual implementations for details.
+func_hookable func_options
+func_options ()
+{
+    $debug_cmd
+
+    func_options_prep ${1+"$@"}
+    eval func_parse_options \
+        ${func_options_prep_result+"$func_options_prep_result"}
+    eval func_validate_options \
+        ${func_parse_options_result+"$func_parse_options_result"}
+
+    eval func_run_hooks func_options \
+        ${func_validate_options_result+"$func_validate_options_result"}
+
+    # save modified positional parameters for caller
+    func_options_result=$func_run_hooks_result
+}
+
+
+# func_options_prep [ARG]...
+# --------------------------
+# All initialisations required before starting the option parse loop.
+# Note that when calling hook functions, we pass through the list of
+# positional parameters.  If a hook function modifies that list, and
+# needs to propogate that back to rest of this script, then the complete
+# modified list must be put in 'func_run_hooks_result' before
+# returning.
+func_hookable func_options_prep
+func_options_prep ()
+{
+    $debug_cmd
+
+    # Option defaults:
+    opt_verbose=false
+    opt_warning_types=
+
+    func_run_hooks func_options_prep ${1+"$@"}
+
+    # save modified positional parameters for caller
+    func_options_prep_result=$func_run_hooks_result
+}
+
+
+# func_parse_options [ARG]...
+# ---------------------------
+# The main option parsing loop.
+func_hookable func_parse_options
+func_parse_options ()
+{
+    $debug_cmd
+
+    func_parse_options_result=
+
+    # this just eases exit handling
+    while test $# -gt 0; do
+      # Defer to hook functions for initial option parsing, so they
+      # get priority in the event of reusing an option name.
+      func_run_hooks func_parse_options ${1+"$@"}
+
+      # Adjust func_parse_options positional parameters to match
+      eval set dummy "$func_run_hooks_result"; shift
+
+      # Break out of the loop if we already parsed every option.
+      test $# -gt 0 || break
+
+      _G_opt=$1
+      shift
+      case $_G_opt in
+        --debug|-x)   debug_cmd='set -x'
+                      func_echo "enabling shell trace mode"
+                      $debug_cmd
+                      ;;
+
+        --no-warnings|--no-warning|--no-warn)
+                      set dummy --warnings none ${1+"$@"}
+                      shift
+		      ;;
+
+        --warnings|--warning|-W)
+                      test $# = 0 && func_missing_arg $_G_opt && break
+                      case " $warning_categories $1" in
+                        *" $1 "*)
+                          # trailing space prevents matching last $1 above
+                          func_append_uniq opt_warning_types " $1"
+                          ;;
+                        *all)
+                          opt_warning_types=$warning_categories
+                          ;;
+                        *none)
+                          opt_warning_types=none
+                          warning_func=:
+                          ;;
+                        *error)
+                          opt_warning_types=$warning_categories
+                          warning_func=func_fatal_error
+                          ;;
+                        *)
+                          func_fatal_error \
+                             "unsupported warning category: '$1'"
+                          ;;
+                      esac
+                      shift
+                      ;;
+
+        --verbose|-v) opt_verbose=: ;;
+        --version)    func_version ;;
+        -\?|-h)       func_usage ;;
+        --help)       func_help ;;
+
+	# Separate optargs to long options (plugins may need this):
+	--*=*)        func_split_equals "$_G_opt"
+	              set dummy "$func_split_equals_lhs" \
+                          "$func_split_equals_rhs" ${1+"$@"}
+                      shift
+                      ;;
+
+       # Separate optargs to short options:
+        -W*)
+                      func_split_short_opt "$_G_opt"
+                      set dummy "$func_split_short_opt_name" \
+                          "$func_split_short_opt_arg" ${1+"$@"}
+                      shift
+                      ;;
+
+        # Separate non-argument short options:
+        -\?*|-h*|-v*|-x*)
+                      func_split_short_opt "$_G_opt"
+                      set dummy "$func_split_short_opt_name" \
+                          "-$func_split_short_opt_arg" ${1+"$@"}
+                      shift
+                      ;;
+
+        --)           break ;;
+        -*)           func_fatal_help "unrecognised option: '$_G_opt'" ;;
+        *)            set dummy "$_G_opt" ${1+"$@"}; shift; break ;;
+      esac
+    done
+
+    # save modified positional parameters for caller
+    func_quote_for_eval ${1+"$@"}
+    func_parse_options_result=$func_quote_for_eval_result
+}
+
+
+# func_validate_options [ARG]...
+# ------------------------------
+# Perform any sanity checks on option settings and/or unconsumed
+# arguments.
+func_hookable func_validate_options
+func_validate_options ()
+{
+    $debug_cmd
+
+    # Display all warnings if -W was not given.
+    test -n "$opt_warning_types" || opt_warning_types=" $warning_categories"
+
+    func_run_hooks func_validate_options ${1+"$@"}
+
+    # Bail if the options were screwed!
+    $exit_cmd $EXIT_FAILURE
+
+    # save modified positional parameters for caller
+    func_validate_options_result=$func_run_hooks_result
+}
+
+
+
+## ----------------- ##
+## Helper functions. ##
+## ----------------- ##
+
+# This section contains the helper functions used by the rest of the
+# hookable option parser framework in ascii-betical order.
+
+
+# func_fatal_help ARG...
+# ----------------------
+# Echo program name prefixed message to standard error, followed by
+# a help hint, and exit.
+func_fatal_help ()
+{
+    $debug_cmd
+
+    eval \$ECHO \""Usage: $usage"\"
+    eval \$ECHO \""$fatal_help"\"
+    func_error ${1+"$@"}
+    exit $EXIT_FAILURE
+}
+
+
+# func_help
+# ---------
+# Echo long help message to standard output and exit.
+func_help ()
+{
+    $debug_cmd
+
+    func_usage_message
+    $ECHO "$long_help_message"
+    exit 0
+}
+
+
+# func_missing_arg ARGNAME
+# ------------------------
+# Echo program name prefixed message to standard error and set global
+# exit_cmd.
+func_missing_arg ()
+{
+    $debug_cmd
+
+    func_error "Missing argument for '$1'."
+    exit_cmd=exit
+}
+
+
+# func_split_equals STRING
+# ------------------------
+# Set func_split_equals_lhs and func_split_equals_rhs shell variables after
+# splitting STRING at the '=' sign.
+test -z "$_G_HAVE_XSI_OPS" \
+    && (eval 'x=a/b/c;
+      test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \
+    && _G_HAVE_XSI_OPS=yes
+
+if test yes = "$_G_HAVE_XSI_OPS"
+then
+  # This is an XSI compatible shell, allowing a faster implementation...
+  eval 'func_split_equals ()
+  {
+      $debug_cmd
+
+      func_split_equals_lhs=${1%%=*}
+      func_split_equals_rhs=${1#*=}
+      test "x$func_split_equals_lhs" = "x$1" \
+        && func_split_equals_rhs=
+  }'
+else
+  # ...otherwise fall back to using expr, which is often a shell builtin.
+  func_split_equals ()
+  {
+      $debug_cmd
+
+      func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'`
+      func_split_equals_rhs=
+      test "x$func_split_equals_lhs" = "x$1" \
+        || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'`
+  }
+fi #func_split_equals
+
+
+# func_split_short_opt SHORTOPT
+# -----------------------------
+# Set func_split_short_opt_name and func_split_short_opt_arg shell
+# variables after splitting SHORTOPT after the 2nd character.
+if test yes = "$_G_HAVE_XSI_OPS"
+then
+  # This is an XSI compatible shell, allowing a faster implementation...
+  eval 'func_split_short_opt ()
+  {
+      $debug_cmd
+
+      func_split_short_opt_arg=${1#??}
+      func_split_short_opt_name=${1%"$func_split_short_opt_arg"}
+  }'
+else
+  # ...otherwise fall back to using expr, which is often a shell builtin.
+  func_split_short_opt ()
+  {
+      $debug_cmd
+
+      func_split_short_opt_name=`expr "x$1" : 'x-\(.\)'`
+      func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'`
+  }
+fi #func_split_short_opt
+
+
+# func_usage
+# ----------
+# Echo short help message to standard output and exit.
+func_usage ()
+{
+    $debug_cmd
+
+    func_usage_message
+    $ECHO "Run '$progname --help |${PAGER-more}' for full usage"
+    exit 0
+}
+
+
+# func_usage_message
+# ------------------
+# Echo short help message to standard output.
+func_usage_message ()
+{
+    $debug_cmd
+
+    eval \$ECHO \""Usage: $usage"\"
+    echo
+    $SED -n 's|^# ||
+        /^Written by/{
+          x;p;x
+        }
+	h
+	/^Written by/q' < "$progpath"
+    echo
+    eval \$ECHO \""$usage_message"\"
+}
+
+
+# func_version
+# ------------
+# Echo version message to standard output and exit.
+func_version ()
+{
+    $debug_cmd
+
+    printf '%s\n' "$progname $scriptversion"
+    $SED -n '
+        /(C)/!b go
+        :more
+        /\./!{
+          N
+          s|\n# | |
+          b more
+        }
+        :go
+        /^# Written by /,/# warranty; / {
+          s|^# ||
+          s|^# *$||
+          s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2|
+          p
+        }
+        /^# Written by / {
+          s|^# ||
+          p
+        }
+        /^warranty; /q' < "$progpath"
+
+    exit $?
+}
+
+
+# Local variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC"
+# time-stamp-time-zone: "UTC"
+# End:
+
+# Set a version string.
+scriptversion='(GNU libtool) 2.4.6'
+
+
+# func_echo ARG...
+# ----------------
+# Libtool also displays the current mode in messages, so override
+# funclib.sh func_echo with this custom definition.
+func_echo ()
+{
+    $debug_cmd
+
+    _G_message=$*
+
+    func_echo_IFS=$IFS
+    IFS=$nl
+    for _G_line in $_G_message; do
+      IFS=$func_echo_IFS
+      $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line"
+    done
+    IFS=$func_echo_IFS
+}
+
+
+# func_warning ARG...
+# -------------------
+# Libtool warnings are not categorized, so override funclib.sh
+# func_warning with this simpler definition.
+func_warning ()
+{
+    $debug_cmd
+
+    $warning_func ${1+"$@"}
+}
+
+
+## ---------------- ##
+## Options parsing. ##
+## ---------------- ##
+
+# Hook in the functions to make sure our own options are parsed during
+# the option parsing loop.
+
+usage='$progpath [OPTION]... [MODE-ARG]...'
+
+# Short help message in response to '-h'.
+usage_message="Options:
+       --config             show all configuration variables
+       --debug              enable verbose shell tracing
+   -n, --dry-run            display commands without modifying any files
+       --features           display basic configuration information and exit
+       --mode=MODE          use operation mode MODE
+       --no-warnings        equivalent to '-Wnone'
+       --preserve-dup-deps  don't remove duplicate dependency libraries
+       --quiet, --silent    don't print informational messages
+       --tag=TAG            use configuration variables from tag TAG
+   -v, --verbose            print more informational messages than default
+       --version            print version information
+   -W, --warnings=CATEGORY  report the warnings falling in CATEGORY [all]
+   -h, --help, --help-all   print short, long, or detailed help message
+"
+
+# Additional text appended to 'usage_message' in response to '--help'.
+func_help ()
+{
+    $debug_cmd
+
+    func_usage_message
+    $ECHO "$long_help_message
+
+MODE must be one of the following:
+
+       clean           remove files from the build directory
+       compile         compile a source file into a libtool object
+       execute         automatically set library path, then run a program
+       finish          complete the installation of libtool libraries
+       install         install libraries or executables
+       link            create a library or an executable
+       uninstall       remove libraries from an installed directory
+
+MODE-ARGS vary depending on the MODE.  When passed as first option,
+'--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that.
+Try '$progname --help --mode=MODE' for a more detailed description of MODE.
+
+When reporting a bug, please describe a test case to reproduce it and
+include the following information:
+
+       host-triplet:   $host
+       shell:          $SHELL
+       compiler:       $LTCC
+       compiler flags: $LTCFLAGS
+       linker:         $LD (gnu? $with_gnu_ld)
+       version:        $progname (GNU libtool) 2.4.6
+       automake:       `($AUTOMAKE --version) 2>/dev/null |$SED 1q`
+       autoconf:       `($AUTOCONF --version) 2>/dev/null |$SED 1q`
+
+Report bugs to <bug-libtool@gnu.org>.
+GNU libtool home page: <http://www.gnu.org/software/libtool/>.
+General help using GNU software: <http://www.gnu.org/gethelp/>."
+    exit 0
+}
+
+
+# func_lo2o OBJECT-NAME
+# ---------------------
+# Transform OBJECT-NAME from a '.lo' suffix to the platform specific
+# object suffix.
+
+lo2o=s/\\.lo\$/.$objext/
+o2lo=s/\\.$objext\$/.lo/
+
+if test yes = "$_G_HAVE_XSI_OPS"; then
+  eval 'func_lo2o ()
+  {
+    case $1 in
+      *.lo) func_lo2o_result=${1%.lo}.$objext ;;
+      *   ) func_lo2o_result=$1               ;;
+    esac
+  }'
+
+  # func_xform LIBOBJ-OR-SOURCE
+  # ---------------------------
+  # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise)
+  # suffix to a '.lo' libtool-object suffix.
+  eval 'func_xform ()
+  {
+    func_xform_result=${1%.*}.lo
+  }'
+else
+  # ...otherwise fall back to using sed.
+  func_lo2o ()
+  {
+    func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"`
+  }
+
+  func_xform ()
+  {
+    func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'`
+  }
+fi
+
+
+# func_fatal_configuration ARG...
+# -------------------------------
+# Echo program name prefixed message to standard error, followed by
+# a configuration failure hint, and exit.
+func_fatal_configuration ()
+{
+    func__fatal_error ${1+"$@"} \
+      "See the $PACKAGE documentation for more information." \
+      "Fatal configuration error."
+}
+
+
+# func_config
+# -----------
+# Display the configuration for all the tags in this script.
+func_config ()
+{
+    re_begincf='^# ### BEGIN LIBTOOL'
+    re_endcf='^# ### END LIBTOOL'
+
+    # Default configuration.
+    $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath"
+
+    # Now print the configurations for the tags.
+    for tagname in $taglist; do
+      $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath"
+    done
+
+    exit $?
+}
+
+
+# func_features
+# -------------
+# Display the features supported by this script.
+func_features ()
+{
+    echo "host: $host"
+    if test yes = "$build_libtool_libs"; then
+      echo "enable shared libraries"
+    else
+      echo "disable shared libraries"
+    fi
+    if test yes = "$build_old_libs"; then
+      echo "enable static libraries"
+    else
+      echo "disable static libraries"
+    fi
+
+    exit $?
+}
+
+
+# func_enable_tag TAGNAME
+# -----------------------
+# Verify that TAGNAME is valid, and either flag an error and exit, or
+# enable the TAGNAME tag.  We also add TAGNAME to the global $taglist
+# variable here.
+func_enable_tag ()
+{
+    # Global variable:
+    tagname=$1
+
+    re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$"
+    re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$"
+    sed_extractcf=/$re_begincf/,/$re_endcf/p
+
+    # Validate tagname.
+    case $tagname in
+      *[!-_A-Za-z0-9,/]*)
+        func_fatal_error "invalid tag name: $tagname"
+        ;;
+    esac
+
+    # Don't test for the "default" C tag, as we know it's
+    # there but not specially marked.
+    case $tagname in
+        CC) ;;
+    *)
+        if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then
+	  taglist="$taglist $tagname"
+
+	  # Evaluate the configuration.  Be careful to quote the path
+	  # and the sed script, to avoid splitting on whitespace, but
+	  # also don't use non-portable quotes within backquotes within
+	  # quotes we have to do it in 2 steps:
+	  extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"`
+	  eval "$extractedcf"
+        else
+	  func_error "ignoring unknown tag $tagname"
+        fi
+        ;;
+    esac
+}
+
+
+# func_check_version_match
+# ------------------------
+# Ensure that we are using m4 macros, and libtool script from the same
+# release of libtool.
+func_check_version_match ()
+{
+    if test "$package_revision" != "$macro_revision"; then
+      if test "$VERSION" != "$macro_version"; then
+        if test -z "$macro_version"; then
+          cat >&2 <<_LT_EOF
+$progname: Version mismatch error.  This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from an older release.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+        else
+          cat >&2 <<_LT_EOF
+$progname: Version mismatch error.  This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from $PACKAGE $macro_version.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+        fi
+      else
+        cat >&2 <<_LT_EOF
+$progname: Version mismatch error.  This is $PACKAGE $VERSION, revision $package_revision,
+$progname: but the definition of this LT_INIT comes from revision $macro_revision.
+$progname: You should recreate aclocal.m4 with macros from revision $package_revision
+$progname: of $PACKAGE $VERSION and run autoconf again.
+_LT_EOF
+      fi
+
+      exit $EXIT_MISMATCH
+    fi
+}
+
+
+# libtool_options_prep [ARG]...
+# -----------------------------
+# Preparation for options parsed by libtool.
+libtool_options_prep ()
+{
+    $debug_mode
+
+    # Option defaults:
+    opt_config=false
+    opt_dlopen=
+    opt_dry_run=false
+    opt_help=false
+    opt_mode=
+    opt_preserve_dup_deps=false
+    opt_quiet=false
+
+    nonopt=
+    preserve_args=
+
+    # Shorthand for --mode=foo, only valid as the first argument
+    case $1 in
+    clean|clea|cle|cl)
+      shift; set dummy --mode clean ${1+"$@"}; shift
+      ;;
+    compile|compil|compi|comp|com|co|c)
+      shift; set dummy --mode compile ${1+"$@"}; shift
+      ;;
+    execute|execut|execu|exec|exe|ex|e)
+      shift; set dummy --mode execute ${1+"$@"}; shift
+      ;;
+    finish|finis|fini|fin|fi|f)
+      shift; set dummy --mode finish ${1+"$@"}; shift
+      ;;
+    install|instal|insta|inst|ins|in|i)
+      shift; set dummy --mode install ${1+"$@"}; shift
+      ;;
+    link|lin|li|l)
+      shift; set dummy --mode link ${1+"$@"}; shift
+      ;;
+    uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u)
+      shift; set dummy --mode uninstall ${1+"$@"}; shift
+      ;;
+    esac
+
+    # Pass back the list of options.
+    func_quote_for_eval ${1+"$@"}
+    libtool_options_prep_result=$func_quote_for_eval_result
+}
+func_add_hook func_options_prep libtool_options_prep
+
+
+# libtool_parse_options [ARG]...
+# ---------------------------------
+# Provide handling for libtool specific options.
+libtool_parse_options ()
+{
+    $debug_cmd
+
+    # Perform our own loop to consume as many options as possible in
+    # each iteration.
+    while test $# -gt 0; do
+      _G_opt=$1
+      shift
+      case $_G_opt in
+        --dry-run|--dryrun|-n)
+                        opt_dry_run=:
+                        ;;
+
+        --config)       func_config ;;
+
+        --dlopen|-dlopen)
+                        opt_dlopen="${opt_dlopen+$opt_dlopen
+}$1"
+                        shift
+                        ;;
+
+        --preserve-dup-deps)
+                        opt_preserve_dup_deps=: ;;
+
+        --features)     func_features ;;
+
+        --finish)       set dummy --mode finish ${1+"$@"}; shift ;;
+
+        --help)         opt_help=: ;;
+
+        --help-all)     opt_help=': help-all' ;;
+
+        --mode)         test $# = 0 && func_missing_arg $_G_opt && break
+                        opt_mode=$1
+                        case $1 in
+                          # Valid mode arguments:
+                          clean|compile|execute|finish|install|link|relink|uninstall) ;;
+
+                          # Catch anything else as an error
+                          *) func_error "invalid argument for $_G_opt"
+                             exit_cmd=exit
+                             break
+                             ;;
+                        esac
+                        shift
+                        ;;
+
+        --no-silent|--no-quiet)
+                        opt_quiet=false
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+        --no-warnings|--no-warning|--no-warn)
+                        opt_warning=false
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+        --no-verbose)
+                        opt_verbose=false
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+        --silent|--quiet)
+                        opt_quiet=:
+                        opt_verbose=false
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+        --tag)          test $# = 0 && func_missing_arg $_G_opt && break
+                        opt_tag=$1
+                        func_append preserve_args " $_G_opt $1"
+                        func_enable_tag "$1"
+                        shift
+                        ;;
+
+        --verbose|-v)   opt_quiet=false
+                        opt_verbose=:
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+	# An option not handled by this hook function:
+        *)		set dummy "$_G_opt" ${1+"$@"};	shift; break  ;;
+      esac
+    done
+
+
+    # save modified positional parameters for caller
+    func_quote_for_eval ${1+"$@"}
+    libtool_parse_options_result=$func_quote_for_eval_result
+}
+func_add_hook func_parse_options libtool_parse_options
+
+
+
+# libtool_validate_options [ARG]...
+# ---------------------------------
+# Perform any sanity checks on option settings and/or unconsumed
+# arguments.
+libtool_validate_options ()
+{
+    # save first non-option argument
+    if test 0 -lt $#; then
+      nonopt=$1
+      shift
+    fi
+
+    # preserve --debug
+    test : = "$debug_cmd" || func_append preserve_args " --debug"
+
+    case $host in
+      # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452
+      # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788
+      *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*)
+        # don't eliminate duplications in $postdeps and $predeps
+        opt_duplicate_compiler_generated_deps=:
+        ;;
+      *)
+        opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps
+        ;;
+    esac
+
+    $opt_help || {
+      # Sanity checks first:
+      func_check_version_match
+
+      test yes != "$build_libtool_libs" \
+        && test yes != "$build_old_libs" \
+        && func_fatal_configuration "not configured to build any kind of library"
+
+      # Darwin sucks
+      eval std_shrext=\"$shrext_cmds\"
+
+      # Only execute mode is allowed to have -dlopen flags.
+      if test -n "$opt_dlopen" && test execute != "$opt_mode"; then
+        func_error "unrecognized option '-dlopen'"
+        $ECHO "$help" 1>&2
+        exit $EXIT_FAILURE
+      fi
+
+      # Change the help message to a mode-specific one.
+      generic_help=$help
+      help="Try '$progname --help --mode=$opt_mode' for more information."
+    }
+
+    # Pass back the unparsed argument list
+    func_quote_for_eval ${1+"$@"}
+    libtool_validate_options_result=$func_quote_for_eval_result
+}
+func_add_hook func_validate_options libtool_validate_options
+
+
+# Process options as early as possible so that --help and --version
+# can return quickly.
+func_options ${1+"$@"}
+eval set dummy "$func_options_result"; shift
+
+
+
+## ----------- ##
+##    Main.    ##
+## ----------- ##
+
+magic='%%%MAGIC variable%%%'
+magic_exe='%%%MAGIC EXE variable%%%'
+
+# Global variables.
+extracted_archives=
+extracted_serial=0
+
+# If this variable is set in any of the actions, the command in it
+# will be execed at the end.  This prevents here-documents from being
+# left over by shells.
+exec_cmd=
+
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+$1
+_LTECHO_EOF'
+}
+
+# func_generated_by_libtool
+# True iff stdin has been generated by Libtool. This function is only
+# a basic sanity check; it will hardly flush out determined imposters.
+func_generated_by_libtool_p ()
+{
+  $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1
+}
+
+# func_lalib_p file
+# True iff FILE is a libtool '.la' library or '.lo' object file.
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_lalib_p ()
+{
+    test -f "$1" &&
+      $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p
+}
+
+# func_lalib_unsafe_p file
+# True iff FILE is a libtool '.la' library or '.lo' object file.
+# This function implements the same check as func_lalib_p without
+# resorting to external programs.  To this end, it redirects stdin and
+# closes it afterwards, without saving the original file descriptor.
+# As a safety measure, use it only where a negative result would be
+# fatal anyway.  Works if 'file' does not exist.
+func_lalib_unsafe_p ()
+{
+    lalib_p=no
+    if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then
+	for lalib_p_l in 1 2 3 4
+	do
+	    read lalib_p_line
+	    case $lalib_p_line in
+		\#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;;
+	    esac
+	done
+	exec 0<&5 5<&-
+    fi
+    test yes = "$lalib_p"
+}
+
+# func_ltwrapper_script_p file
+# True iff FILE is a libtool wrapper script
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_script_p ()
+{
+    test -f "$1" &&
+      $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p
+}
+
+# func_ltwrapper_executable_p file
+# True iff FILE is a libtool wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_executable_p ()
+{
+    func_ltwrapper_exec_suffix=
+    case $1 in
+    *.exe) ;;
+    *) func_ltwrapper_exec_suffix=.exe ;;
+    esac
+    $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1
+}
+
+# func_ltwrapper_scriptname file
+# Assumes file is an ltwrapper_executable
+# uses $file to determine the appropriate filename for a
+# temporary ltwrapper_script.
+func_ltwrapper_scriptname ()
+{
+    func_dirname_and_basename "$1" "" "."
+    func_stripname '' '.exe' "$func_basename_result"
+    func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper
+}
+
+# func_ltwrapper_p file
+# True iff FILE is a libtool wrapper script or wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_p ()
+{
+    func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1"
+}
+
+
+# func_execute_cmds commands fail_cmd
+# Execute tilde-delimited COMMANDS.
+# If FAIL_CMD is given, eval that upon failure.
+# FAIL_CMD may read-access the current command in variable CMD!
+func_execute_cmds ()
+{
+    $debug_cmd
+
+    save_ifs=$IFS; IFS='~'
+    for cmd in $1; do
+      IFS=$sp$nl
+      eval cmd=\"$cmd\"
+      IFS=$save_ifs
+      func_show_eval "$cmd" "${2-:}"
+    done
+    IFS=$save_ifs
+}
+
+
+# func_source file
+# Source FILE, adding directory component if necessary.
+# Note that it is not necessary on cygwin/mingw to append a dot to
+# FILE even if both FILE and FILE.exe exist: automatic-append-.exe
+# behavior happens only for exec(3), not for open(2)!  Also, sourcing
+# 'FILE.' does not work on cygwin managed mounts.
+func_source ()
+{
+    $debug_cmd
+
+    case $1 in
+    */* | *\\*)	. "$1" ;;
+    *)		. "./$1" ;;
+    esac
+}
+
+
+# func_resolve_sysroot PATH
+# Replace a leading = in PATH with a sysroot.  Store the result into
+# func_resolve_sysroot_result
+func_resolve_sysroot ()
+{
+  func_resolve_sysroot_result=$1
+  case $func_resolve_sysroot_result in
+  =*)
+    func_stripname '=' '' "$func_resolve_sysroot_result"
+    func_resolve_sysroot_result=$lt_sysroot$func_stripname_result
+    ;;
+  esac
+}
+
+# func_replace_sysroot PATH
+# If PATH begins with the sysroot, replace it with = and
+# store the result into func_replace_sysroot_result.
+func_replace_sysroot ()
+{
+  case $lt_sysroot:$1 in
+  ?*:"$lt_sysroot"*)
+    func_stripname "$lt_sysroot" '' "$1"
+    func_replace_sysroot_result='='$func_stripname_result
+    ;;
+  *)
+    # Including no sysroot.
+    func_replace_sysroot_result=$1
+    ;;
+  esac
+}
+
+# func_infer_tag arg
+# Infer tagged configuration to use if any are available and
+# if one wasn't chosen via the "--tag" command line option.
+# Only attempt this if the compiler in the base compile
+# command doesn't match the default compiler.
+# arg is usually of the form 'gcc ...'
+func_infer_tag ()
+{
+    $debug_cmd
+
+    if test -n "$available_tags" && test -z "$tagname"; then
+      CC_quoted=
+      for arg in $CC; do
+	func_append_quoted CC_quoted "$arg"
+      done
+      CC_expanded=`func_echo_all $CC`
+      CC_quoted_expanded=`func_echo_all $CC_quoted`
+      case $@ in
+      # Blanks in the command may have been stripped by the calling shell,
+      # but not from the CC environment variable when configure was run.
+      " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+      " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;;
+      # Blanks at the start of $base_compile will cause this to fail
+      # if we don't check for them as well.
+      *)
+	for z in $available_tags; do
+	  if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then
+	    # Evaluate the configuration.
+	    eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`"
+	    CC_quoted=
+	    for arg in $CC; do
+	      # Double-quote args containing other shell metacharacters.
+	      func_append_quoted CC_quoted "$arg"
+	    done
+	    CC_expanded=`func_echo_all $CC`
+	    CC_quoted_expanded=`func_echo_all $CC_quoted`
+	    case "$@ " in
+	    " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+	    " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*)
+	      # The compiler in the base compile command matches
+	      # the one in the tagged configuration.
+	      # Assume this is the tagged configuration we want.
+	      tagname=$z
+	      break
+	      ;;
+	    esac
+	  fi
+	done
+	# If $tagname still isn't set, then no tagged configuration
+	# was found and let the user know that the "--tag" command
+	# line option must be used.
+	if test -z "$tagname"; then
+	  func_echo "unable to infer tagged configuration"
+	  func_fatal_error "specify a tag with '--tag'"
+#	else
+#	  func_verbose "using $tagname tagged configuration"
+	fi
+	;;
+      esac
+    fi
+}
+
+
+
+# func_write_libtool_object output_name pic_name nonpic_name
+# Create a libtool object file (analogous to a ".la" file),
+# but don't create it if we're doing a dry run.
+func_write_libtool_object ()
+{
+    write_libobj=$1
+    if test yes = "$build_libtool_libs"; then
+      write_lobj=\'$2\'
+    else
+      write_lobj=none
+    fi
+
+    if test yes = "$build_old_libs"; then
+      write_oldobj=\'$3\'
+    else
+      write_oldobj=none
+    fi
+
+    $opt_dry_run || {
+      cat >${write_libobj}T <<EOF
+# $write_libobj - a libtool object file
+# Generated by $PROGRAM (GNU $PACKAGE) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object=$write_lobj
+
+# Name of the non-PIC object
+non_pic_object=$write_oldobj
+
+EOF
+      $MV "${write_libobj}T" "$write_libobj"
+    }
+}
+
+
+##################################################
+# FILE NAME AND PATH CONVERSION HELPER FUNCTIONS #
+##################################################
+
+# func_convert_core_file_wine_to_w32 ARG
+# Helper function used by file name conversion functions when $build is *nix,
+# and $host is mingw, cygwin, or some other w32 environment. Relies on a
+# correctly configured wine environment available, with the winepath program
+# in $build's $PATH.
+#
+# ARG is the $build file name to be converted to w32 format.
+# Result is available in $func_convert_core_file_wine_to_w32_result, and will
+# be empty on error (or when ARG is empty)
+func_convert_core_file_wine_to_w32 ()
+{
+  $debug_cmd
+
+  func_convert_core_file_wine_to_w32_result=$1
+  if test -n "$1"; then
+    # Unfortunately, winepath does not exit with a non-zero error code, so we
+    # are forced to check the contents of stdout. On the other hand, if the
+    # command is not found, the shell will set an exit code of 127 and print
+    # *an error message* to stdout. So we must check for both error code of
+    # zero AND non-empty stdout, which explains the odd construction:
+    func_convert_core_file_wine_to_w32_tmp=`winepath -w "$1" 2>/dev/null`
+    if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then
+      func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" |
+        $SED -e "$sed_naive_backslashify"`
+    else
+      func_convert_core_file_wine_to_w32_result=
+    fi
+  fi
+}
+# end: func_convert_core_file_wine_to_w32
+
+
+# func_convert_core_path_wine_to_w32 ARG
+# Helper function used by path conversion functions when $build is *nix, and
+# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly
+# configured wine environment available, with the winepath program in $build's
+# $PATH. Assumes ARG has no leading or trailing path separator characters.
+#
+# ARG is path to be converted from $build format to win32.
+# Result is available in $func_convert_core_path_wine_to_w32_result.
+# Unconvertible file (directory) names in ARG are skipped; if no directory names
+# are convertible, then the result may be empty.
+func_convert_core_path_wine_to_w32 ()
+{
+  $debug_cmd
+
+  # unfortunately, winepath doesn't convert paths, only file names
+  func_convert_core_path_wine_to_w32_result=
+  if test -n "$1"; then
+    oldIFS=$IFS
+    IFS=:
+    for func_convert_core_path_wine_to_w32_f in $1; do
+      IFS=$oldIFS
+      func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f"
+      if test -n "$func_convert_core_file_wine_to_w32_result"; then
+        if test -z "$func_convert_core_path_wine_to_w32_result"; then
+          func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result
+        else
+          func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result"
+        fi
+      fi
+    done
+    IFS=$oldIFS
+  fi
+}
+# end: func_convert_core_path_wine_to_w32
+
+
+# func_cygpath ARGS...
+# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when
+# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2)
+# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or
+# (2), returns the Cygwin file name or path in func_cygpath_result (input
+# file name or path is assumed to be in w32 format, as previously converted
+# from $build's *nix or MSYS format). In case (3), returns the w32 file name
+# or path in func_cygpath_result (input file name or path is assumed to be in
+# Cygwin format). Returns an empty string on error.
+#
+# ARGS are passed to cygpath, with the last one being the file name or path to
+# be converted.
+#
+# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH
+# environment variable; do not put it in $PATH.
+func_cygpath ()
+{
+  $debug_cmd
+
+  if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then
+    func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null`
+    if test "$?" -ne 0; then
+      # on failure, ensure result is empty
+      func_cygpath_result=
+    fi
+  else
+    func_cygpath_result=
+    func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'"
+  fi
+}
+#end: func_cygpath
+
+
+# func_convert_core_msys_to_w32 ARG
+# Convert file name or path ARG from MSYS format to w32 format.  Return
+# result in func_convert_core_msys_to_w32_result.
+func_convert_core_msys_to_w32 ()
+{
+  $debug_cmd
+
+  # awkward: cmd appends spaces to result
+  func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null |
+    $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"`
+}
+#end: func_convert_core_msys_to_w32
+
+
+# func_convert_file_check ARG1 ARG2
+# Verify that ARG1 (a file name in $build format) was converted to $host
+# format in ARG2. Otherwise, emit an error message, but continue (resetting
+# func_to_host_file_result to ARG1).
+func_convert_file_check ()
+{
+  $debug_cmd
+
+  if test -z "$2" && test -n "$1"; then
+    func_error "Could not determine host file name corresponding to"
+    func_error "  '$1'"
+    func_error "Continuing, but uninstalled executables may not work."
+    # Fallback:
+    func_to_host_file_result=$1
+  fi
+}
+# end func_convert_file_check
+
+
+# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH
+# Verify that FROM_PATH (a path in $build format) was converted to $host
+# format in TO_PATH. Otherwise, emit an error message, but continue, resetting
+# func_to_host_file_result to a simplistic fallback value (see below).
+func_convert_path_check ()
+{
+  $debug_cmd
+
+  if test -z "$4" && test -n "$3"; then
+    func_error "Could not determine the host path corresponding to"
+    func_error "  '$3'"
+    func_error "Continuing, but uninstalled executables may not work."
+    # Fallback.  This is a deliberately simplistic "conversion" and
+    # should not be "improved".  See libtool.info.
+    if test "x$1" != "x$2"; then
+      lt_replace_pathsep_chars="s|$1|$2|g"
+      func_to_host_path_result=`echo "$3" |
+        $SED -e "$lt_replace_pathsep_chars"`
+    else
+      func_to_host_path_result=$3
+    fi
+  fi
+}
+# end func_convert_path_check
+
+
+# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG
+# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT
+# and appending REPL if ORIG matches BACKPAT.
+func_convert_path_front_back_pathsep ()
+{
+  $debug_cmd
+
+  case $4 in
+  $1 ) func_to_host_path_result=$3$func_to_host_path_result
+    ;;
+  esac
+  case $4 in
+  $2 ) func_append func_to_host_path_result "$3"
+    ;;
+  esac
+}
+# end func_convert_path_front_back_pathsep
+
+
+##################################################
+# $build to $host FILE NAME CONVERSION FUNCTIONS #
+##################################################
+# invoked via '$to_host_file_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# Result will be available in $func_to_host_file_result.
+
+
+# func_to_host_file ARG
+# Converts the file name ARG from $build format to $host format. Return result
+# in func_to_host_file_result.
+func_to_host_file ()
+{
+  $debug_cmd
+
+  $to_host_file_cmd "$1"
+}
+# end func_to_host_file
+
+
+# func_to_tool_file ARG LAZY
+# converts the file name ARG from $build format to toolchain format. Return
+# result in func_to_tool_file_result.  If the conversion in use is listed
+# in (the comma separated) LAZY, no conversion takes place.
+func_to_tool_file ()
+{
+  $debug_cmd
+
+  case ,$2, in
+    *,"$to_tool_file_cmd",*)
+      func_to_tool_file_result=$1
+      ;;
+    *)
+      $to_tool_file_cmd "$1"
+      func_to_tool_file_result=$func_to_host_file_result
+      ;;
+  esac
+}
+# end func_to_tool_file
+
+
+# func_convert_file_noop ARG
+# Copy ARG to func_to_host_file_result.
+func_convert_file_noop ()
+{
+  func_to_host_file_result=$1
+}
+# end func_convert_file_noop
+
+
+# func_convert_file_msys_to_w32 ARG
+# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper.  Returns result in
+# func_to_host_file_result.
+func_convert_file_msys_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    func_convert_core_msys_to_w32 "$1"
+    func_to_host_file_result=$func_convert_core_msys_to_w32_result
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_w32
+
+
+# func_convert_file_cygwin_to_w32 ARG
+# Convert file name ARG from Cygwin to w32 format.  Returns result in
+# func_to_host_file_result.
+func_convert_file_cygwin_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    # because $build is cygwin, we call "the" cygpath in $PATH; no need to use
+    # LT_CYGPATH in this case.
+    func_to_host_file_result=`cygpath -m "$1"`
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_cygwin_to_w32
+
+
+# func_convert_file_nix_to_w32 ARG
+# Convert file name ARG from *nix to w32 format.  Requires a wine environment
+# and a working winepath. Returns result in func_to_host_file_result.
+func_convert_file_nix_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    func_convert_core_file_wine_to_w32 "$1"
+    func_to_host_file_result=$func_convert_core_file_wine_to_w32_result
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_w32
+
+
+# func_convert_file_msys_to_cygwin ARG
+# Convert file name ARG from MSYS to Cygwin format.  Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_file_msys_to_cygwin ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    func_convert_core_msys_to_w32 "$1"
+    func_cygpath -u "$func_convert_core_msys_to_w32_result"
+    func_to_host_file_result=$func_cygpath_result
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_cygwin
+
+
+# func_convert_file_nix_to_cygwin ARG
+# Convert file name ARG from *nix to Cygwin format.  Requires Cygwin installed
+# in a wine environment, working winepath, and LT_CYGPATH set.  Returns result
+# in func_to_host_file_result.
+func_convert_file_nix_to_cygwin ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    # convert from *nix to w32, then use cygpath to convert from w32 to cygwin.
+    func_convert_core_file_wine_to_w32 "$1"
+    func_cygpath -u "$func_convert_core_file_wine_to_w32_result"
+    func_to_host_file_result=$func_cygpath_result
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_cygwin
+
+
+#############################################
+# $build to $host PATH CONVERSION FUNCTIONS #
+#############################################
+# invoked via '$to_host_path_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# The result will be available in $func_to_host_path_result.
+#
+# Path separators are also converted from $build format to $host format.  If
+# ARG begins or ends with a path separator character, it is preserved (but
+# converted to $host format) on output.
+#
+# All path conversion functions are named using the following convention:
+#   file name conversion function    : func_convert_file_X_to_Y ()
+#   path conversion function         : func_convert_path_X_to_Y ()
+# where, for any given $build/$host combination the 'X_to_Y' value is the
+# same.  If conversion functions are added for new $build/$host combinations,
+# the two new functions must follow this pattern, or func_init_to_host_path_cmd
+# will break.
+
+
+# func_init_to_host_path_cmd
+# Ensures that function "pointer" variable $to_host_path_cmd is set to the
+# appropriate value, based on the value of $to_host_file_cmd.
+to_host_path_cmd=
+func_init_to_host_path_cmd ()
+{
+  $debug_cmd
+
+  if test -z "$to_host_path_cmd"; then
+    func_stripname 'func_convert_file_' '' "$to_host_file_cmd"
+    to_host_path_cmd=func_convert_path_$func_stripname_result
+  fi
+}
+
+
+# func_to_host_path ARG
+# Converts the path ARG from $build format to $host format. Return result
+# in func_to_host_path_result.
+func_to_host_path ()
+{
+  $debug_cmd
+
+  func_init_to_host_path_cmd
+  $to_host_path_cmd "$1"
+}
+# end func_to_host_path
+
+
+# func_convert_path_noop ARG
+# Copy ARG to func_to_host_path_result.
+func_convert_path_noop ()
+{
+  func_to_host_path_result=$1
+}
+# end func_convert_path_noop
+
+
+# func_convert_path_msys_to_w32 ARG
+# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper.  Returns result in
+# func_to_host_path_result.
+func_convert_path_msys_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # Remove leading and trailing path separator characters from ARG.  MSYS
+    # behavior is inconsistent here; cygpath turns them into '.;' and ';.';
+    # and winepath ignores them completely.
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+    func_to_host_path_result=$func_convert_core_msys_to_w32_result
+    func_convert_path_check : ";" \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+  fi
+}
+# end func_convert_path_msys_to_w32
+
+
+# func_convert_path_cygwin_to_w32 ARG
+# Convert path ARG from Cygwin to w32 format.  Returns result in
+# func_to_host_file_result.
+func_convert_path_cygwin_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # See func_convert_path_msys_to_w32:
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"`
+    func_convert_path_check : ";" \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+  fi
+}
+# end func_convert_path_cygwin_to_w32
+
+
+# func_convert_path_nix_to_w32 ARG
+# Convert path ARG from *nix to w32 format.  Requires a wine environment and
+# a working winepath.  Returns result in func_to_host_file_result.
+func_convert_path_nix_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # See func_convert_path_msys_to_w32:
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+    func_to_host_path_result=$func_convert_core_path_wine_to_w32_result
+    func_convert_path_check : ";" \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+  fi
+}
+# end func_convert_path_nix_to_w32
+
+
+# func_convert_path_msys_to_cygwin ARG
+# Convert path ARG from MSYS to Cygwin format.  Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_path_msys_to_cygwin ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # See func_convert_path_msys_to_w32:
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+    func_cygpath -u -p "$func_convert_core_msys_to_w32_result"
+    func_to_host_path_result=$func_cygpath_result
+    func_convert_path_check : : \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+  fi
+}
+# end func_convert_path_msys_to_cygwin
+
+
+# func_convert_path_nix_to_cygwin ARG
+# Convert path ARG from *nix to Cygwin format.  Requires Cygwin installed in a
+# a wine environment, working winepath, and LT_CYGPATH set.  Returns result in
+# func_to_host_file_result.
+func_convert_path_nix_to_cygwin ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # Remove leading and trailing path separator characters from
+    # ARG. msys behavior is inconsistent here, cygpath turns them
+    # into '.;' and ';.', and winepath ignores them completely.
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+    func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result"
+    func_to_host_path_result=$func_cygpath_result
+    func_convert_path_check : : \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+  fi
+}
+# end func_convert_path_nix_to_cygwin
+
+
+# func_dll_def_p FILE
+# True iff FILE is a Windows DLL '.def' file.
+# Keep in sync with _LT_DLL_DEF_P in libtool.m4
+func_dll_def_p ()
+{
+  $debug_cmd
+
+  func_dll_def_p_tmp=`$SED -n \
+    -e 's/^[	 ]*//' \
+    -e '/^\(;.*\)*$/d' \
+    -e 's/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p' \
+    -e q \
+    "$1"`
+  test DEF = "$func_dll_def_p_tmp"
+}
+
+
+# func_mode_compile arg...
+func_mode_compile ()
+{
+    $debug_cmd
+
+    # Get the compilation command and the source file.
+    base_compile=
+    srcfile=$nonopt  #  always keep a non-empty value in "srcfile"
+    suppress_opt=yes
+    suppress_output=
+    arg_mode=normal
+    libobj=
+    later=
+    pie_flag=
+
+    for arg
+    do
+      case $arg_mode in
+      arg  )
+	# do not "continue".  Instead, add this to base_compile
+	lastarg=$arg
+	arg_mode=normal
+	;;
+
+      target )
+	libobj=$arg
+	arg_mode=normal
+	continue
+	;;
+
+      normal )
+	# Accept any command-line options.
+	case $arg in
+	-o)
+	  test -n "$libobj" && \
+	    func_fatal_error "you cannot specify '-o' more than once"
+	  arg_mode=target
+	  continue
+	  ;;
+
+	-pie | -fpie | -fPIE)
+          func_append pie_flag " $arg"
+	  continue
+	  ;;
+
+	-shared | -static | -prefer-pic | -prefer-non-pic)
+	  func_append later " $arg"
+	  continue
+	  ;;
+
+	-no-suppress)
+	  suppress_opt=no
+	  continue
+	  ;;
+
+	-Xcompiler)
+	  arg_mode=arg  #  the next one goes into the "base_compile" arg list
+	  continue      #  The current "srcfile" will either be retained or
+	  ;;            #  replaced later.  I would guess that would be a bug.
+
+	-Wc,*)
+	  func_stripname '-Wc,' '' "$arg"
+	  args=$func_stripname_result
+	  lastarg=
+	  save_ifs=$IFS; IFS=,
+	  for arg in $args; do
+	    IFS=$save_ifs
+	    func_append_quoted lastarg "$arg"
+	  done
+	  IFS=$save_ifs
+	  func_stripname ' ' '' "$lastarg"
+	  lastarg=$func_stripname_result
+
+	  # Add the arguments to base_compile.
+	  func_append base_compile " $lastarg"
+	  continue
+	  ;;
+
+	*)
+	  # Accept the current argument as the source file.
+	  # The previous "srcfile" becomes the current argument.
+	  #
+	  lastarg=$srcfile
+	  srcfile=$arg
+	  ;;
+	esac  #  case $arg
+	;;
+      esac    #  case $arg_mode
+
+      # Aesthetically quote the previous argument.
+      func_append_quoted base_compile "$lastarg"
+    done # for arg
+
+    case $arg_mode in
+    arg)
+      func_fatal_error "you must specify an argument for -Xcompile"
+      ;;
+    target)
+      func_fatal_error "you must specify a target with '-o'"
+      ;;
+    *)
+      # Get the name of the library object.
+      test -z "$libobj" && {
+	func_basename "$srcfile"
+	libobj=$func_basename_result
+      }
+      ;;
+    esac
+
+    # Recognize several different file suffixes.
+    # If the user specifies -o file.o, it is replaced with file.lo
+    case $libobj in
+    *.[cCFSifmso] | \
+    *.ada | *.adb | *.ads | *.asm | \
+    *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \
+    *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup)
+      func_xform "$libobj"
+      libobj=$func_xform_result
+      ;;
+    esac
+
+    case $libobj in
+    *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;;
+    *)
+      func_fatal_error "cannot determine name of library object from '$libobj'"
+      ;;
+    esac
+
+    func_infer_tag $base_compile
+
+    for arg in $later; do
+      case $arg in
+      -shared)
+	test yes = "$build_libtool_libs" \
+	  || func_fatal_configuration "cannot build a shared library"
+	build_old_libs=no
+	continue
+	;;
+
+      -static)
+	build_libtool_libs=no
+	build_old_libs=yes
+	continue
+	;;
+
+      -prefer-pic)
+	pic_mode=yes
+	continue
+	;;
+
+      -prefer-non-pic)
+	pic_mode=no
+	continue
+	;;
+      esac
+    done
+
+    func_quote_for_eval "$libobj"
+    test "X$libobj" != "X$func_quote_for_eval_result" \
+      && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"'	 &()|`$[]' \
+      && func_warning "libobj name '$libobj' may not contain shell special characters."
+    func_dirname_and_basename "$obj" "/" ""
+    objname=$func_basename_result
+    xdir=$func_dirname_result
+    lobj=$xdir$objdir/$objname
+
+    test -z "$base_compile" && \
+      func_fatal_help "you must specify a compilation command"
+
+    # Delete any leftover library objects.
+    if test yes = "$build_old_libs"; then
+      removelist="$obj $lobj $libobj ${libobj}T"
+    else
+      removelist="$lobj $libobj ${libobj}T"
+    fi
+
+    # On Cygwin there's no "real" PIC flag so we must build both object types
+    case $host_os in
+    cygwin* | mingw* | pw32* | os2* | cegcc*)
+      pic_mode=default
+      ;;
+    esac
+    if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then
+      # non-PIC code in shared libraries is not supported
+      pic_mode=default
+    fi
+
+    # Calculate the filename of the output object if compiler does
+    # not support -o with -c
+    if test no = "$compiler_c_o"; then
+      output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext
+      lockfile=$output_obj.lock
+    else
+      output_obj=
+      need_locks=no
+      lockfile=
+    fi
+
+    # Lock this critical section if it is needed
+    # We use this script file to make the link, it avoids creating a new file
+    if test yes = "$need_locks"; then
+      until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+	func_echo "Waiting for $lockfile to be removed"
+	sleep 2
+      done
+    elif test warn = "$need_locks"; then
+      if test -f "$lockfile"; then
+	$ECHO "\
+*** ERROR, $lockfile exists and contains:
+`cat $lockfile 2>/dev/null`
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support '-c' and '-o' together.  If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+	$opt_dry_run || $RM $removelist
+	exit $EXIT_FAILURE
+      fi
+      func_append removelist " $output_obj"
+      $ECHO "$srcfile" > "$lockfile"
+    fi
+
+    $opt_dry_run || $RM $removelist
+    func_append removelist " $lockfile"
+    trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15
+
+    func_to_tool_file "$srcfile" func_convert_file_msys_to_w32
+    srcfile=$func_to_tool_file_result
+    func_quote_for_eval "$srcfile"
+    qsrcfile=$func_quote_for_eval_result
+
+    # Only build a PIC object if we are building libtool libraries.
+    if test yes = "$build_libtool_libs"; then
+      # Without this assignment, base_compile gets emptied.
+      fbsd_hideous_sh_bug=$base_compile
+
+      if test no != "$pic_mode"; then
+	command="$base_compile $qsrcfile $pic_flag"
+      else
+	# Don't build PIC code
+	command="$base_compile $qsrcfile"
+      fi
+
+      func_mkdir_p "$xdir$objdir"
+
+      if test -z "$output_obj"; then
+	# Place PIC objects in $objdir
+	func_append command " -o $lobj"
+      fi
+
+      func_show_eval_locale "$command"	\
+          'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE'
+
+      if test warn = "$need_locks" &&
+	 test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+	$ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support '-c' and '-o' together.  If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+	$opt_dry_run || $RM $removelist
+	exit $EXIT_FAILURE
+      fi
+
+      # Just move the object if needed, then go on to compile the next one
+      if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then
+	func_show_eval '$MV "$output_obj" "$lobj"' \
+	  'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+      fi
+
+      # Allow error messages only from the first compilation.
+      if test yes = "$suppress_opt"; then
+	suppress_output=' >/dev/null 2>&1'
+      fi
+    fi
+
+    # Only build a position-dependent object if we build old libraries.
+    if test yes = "$build_old_libs"; then
+      if test yes != "$pic_mode"; then
+	# Don't build PIC code
+	command="$base_compile $qsrcfile$pie_flag"
+      else
+	command="$base_compile $qsrcfile $pic_flag"
+      fi
+      if test yes = "$compiler_c_o"; then
+	func_append command " -o $obj"
+      fi
+
+      # Suppress compiler output if we already did a PIC compilation.
+      func_append command "$suppress_output"
+      func_show_eval_locale "$command" \
+        '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE'
+
+      if test warn = "$need_locks" &&
+	 test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+	$ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support '-c' and '-o' together.  If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+	$opt_dry_run || $RM $removelist
+	exit $EXIT_FAILURE
+      fi
+
+      # Just move the object if needed
+      if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then
+	func_show_eval '$MV "$output_obj" "$obj"' \
+	  'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+      fi
+    fi
+
+    $opt_dry_run || {
+      func_write_libtool_object "$libobj" "$objdir/$objname" "$objname"
+
+      # Unlock the critical section if it was locked
+      if test no != "$need_locks"; then
+	removelist=$lockfile
+        $RM "$lockfile"
+      fi
+    }
+
+    exit $EXIT_SUCCESS
+}
+
+$opt_help || {
+  test compile = "$opt_mode" && func_mode_compile ${1+"$@"}
+}
+
+func_mode_help ()
+{
+    # We need to display help for each of the modes.
+    case $opt_mode in
+      "")
+        # Generic help is extracted from the usage comments
+        # at the start of this file.
+        func_help
+        ;;
+
+      clean)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
+
+Remove files from the build directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically '/bin/rm').  RM-OPTIONS are options (such as '-f') to be passed
+to RM.
+
+If FILE is a libtool library, object or program, all the files associated
+with it are deleted. Otherwise, only FILE itself is deleted using RM."
+        ;;
+
+      compile)
+      $ECHO \
+"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
+
+Compile a source file into a libtool library object.
+
+This mode accepts the following additional options:
+
+  -o OUTPUT-FILE    set the output file name to OUTPUT-FILE
+  -no-suppress      do not suppress compiler output for multiple passes
+  -prefer-pic       try to build PIC objects only
+  -prefer-non-pic   try to build non-PIC objects only
+  -shared           do not build a '.o' file suitable for static linking
+  -static           only build a '.o' file suitable for static linking
+  -Wc,FLAG          pass FLAG directly to the compiler
+
+COMPILE-COMMAND is a command to be used in creating a 'standard' object file
+from the given SOURCEFILE.
+
+The output file name is determined by removing the directory component from
+SOURCEFILE, then substituting the C source code suffix '.c' with the
+library object suffix, '.lo'."
+        ;;
+
+      execute)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]...
+
+Automatically set library path, then run a program.
+
+This mode accepts the following additional options:
+
+  -dlopen FILE      add the directory containing FILE to the library path
+
+This mode sets the library path environment variable according to '-dlopen'
+flags.
+
+If any of the ARGS are libtool executable wrappers, then they are translated
+into their corresponding uninstalled binary, and any of their required library
+directories are added to the library path.
+
+Then, COMMAND is executed, with ARGS as arguments."
+        ;;
+
+      finish)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=finish [LIBDIR]...
+
+Complete the installation of libtool libraries.
+
+Each LIBDIR is a directory that contains libtool libraries.
+
+The commands that this mode executes may require superuser privileges.  Use
+the '--dry-run' option if you just want to see what would be executed."
+        ;;
+
+      install)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND...
+
+Install executables or libraries.
+
+INSTALL-COMMAND is the installation command.  The first component should be
+either the 'install' or 'cp' program.
+
+The following components of INSTALL-COMMAND are treated specially:
+
+  -inst-prefix-dir PREFIX-DIR  Use PREFIX-DIR as a staging area for installation
+
+The rest of the components are interpreted as arguments to that command (only
+BSD-compatible install options are recognized)."
+        ;;
+
+      link)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=link LINK-COMMAND...
+
+Link object files or libraries together to form another library, or to
+create an executable program.
+
+LINK-COMMAND is a command using the C compiler that you would use to create
+a program from several object files.
+
+The following components of LINK-COMMAND are treated specially:
+
+  -all-static       do not do any dynamic linking at all
+  -avoid-version    do not add a version suffix if possible
+  -bindir BINDIR    specify path to binaries directory (for systems where
+                    libraries must be found in the PATH setting at runtime)
+  -dlopen FILE      '-dlpreopen' FILE if it cannot be dlopened at runtime
+  -dlpreopen FILE   link in FILE and add its symbols to lt_preloaded_symbols
+  -export-dynamic   allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
+  -export-symbols SYMFILE
+                    try to export only the symbols listed in SYMFILE
+  -export-symbols-regex REGEX
+                    try to export only the symbols matching REGEX
+  -LLIBDIR          search LIBDIR for required installed libraries
+  -lNAME            OUTPUT-FILE requires the installed library libNAME
+  -module           build a library that can dlopened
+  -no-fast-install  disable the fast-install mode
+  -no-install       link a not-installable executable
+  -no-undefined     declare that a library does not refer to external symbols
+  -o OUTPUT-FILE    create OUTPUT-FILE from the specified objects
+  -objectlist FILE  use a list of object files found in FILE to specify objects
+  -os2dllname NAME  force a short DLL name on OS/2 (no effect on other OSes)
+  -precious-files-regex REGEX
+                    don't remove output files matching REGEX
+  -release RELEASE  specify package release information
+  -rpath LIBDIR     the created library will eventually be installed in LIBDIR
+  -R[ ]LIBDIR       add LIBDIR to the runtime path of programs and libraries
+  -shared           only do dynamic linking of libtool libraries
+  -shrext SUFFIX    override the standard shared library file extension
+  -static           do not do any dynamic linking of uninstalled libtool libraries
+  -static-libtool-libs
+                    do not do any dynamic linking of libtool libraries
+  -version-info CURRENT[:REVISION[:AGE]]
+                    specify library version info [each variable defaults to 0]
+  -weak LIBNAME     declare that the target provides the LIBNAME interface
+  -Wc,FLAG
+  -Xcompiler FLAG   pass linker-specific FLAG directly to the compiler
+  -Wl,FLAG
+  -Xlinker FLAG     pass linker-specific FLAG directly to the linker
+  -XCClinker FLAG   pass link-specific FLAG to the compiler driver (CC)
+
+All other options (arguments beginning with '-') are ignored.
+
+Every other argument is treated as a filename.  Files ending in '.la' are
+treated as uninstalled libtool libraries, other files are standard or library
+object files.
+
+If the OUTPUT-FILE ends in '.la', then a libtool library is created,
+only library objects ('.lo' files) may be specified, and '-rpath' is
+required, except when creating a convenience library.
+
+If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created
+using 'ar' and 'ranlib', or on Windows using 'lib'.
+
+If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file
+is created, otherwise an executable program is created."
+        ;;
+
+      uninstall)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
+
+Remove libraries from an installation directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically '/bin/rm').  RM-OPTIONS are options (such as '-f') to be passed
+to RM.
+
+If FILE is a libtool library, all the files associated with it are deleted.
+Otherwise, only FILE itself is deleted using RM."
+        ;;
+
+      *)
+        func_fatal_help "invalid operation mode '$opt_mode'"
+        ;;
+    esac
+
+    echo
+    $ECHO "Try '$progname --help' for more information about other modes."
+}
+
+# Now that we've collected a possible --mode arg, show help if necessary
+if $opt_help; then
+  if test : = "$opt_help"; then
+    func_mode_help
+  else
+    {
+      func_help noexit
+      for opt_mode in compile link execute install finish uninstall clean; do
+	func_mode_help
+      done
+    } | $SED -n '1p; 2,$s/^Usage:/  or: /p'
+    {
+      func_help noexit
+      for opt_mode in compile link execute install finish uninstall clean; do
+	echo
+	func_mode_help
+      done
+    } |
+    $SED '1d
+      /^When reporting/,/^Report/{
+	H
+	d
+      }
+      $x
+      /information about other modes/d
+      /more detailed .*MODE/d
+      s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/'
+  fi
+  exit $?
+fi
+
+
+# func_mode_execute arg...
+func_mode_execute ()
+{
+    $debug_cmd
+
+    # The first argument is the command name.
+    cmd=$nonopt
+    test -z "$cmd" && \
+      func_fatal_help "you must specify a COMMAND"
+
+    # Handle -dlopen flags immediately.
+    for file in $opt_dlopen; do
+      test -f "$file" \
+	|| func_fatal_help "'$file' is not a file"
+
+      dir=
+      case $file in
+      *.la)
+	func_resolve_sysroot "$file"
+	file=$func_resolve_sysroot_result
+
+	# Check to see that this really is a libtool archive.
+	func_lalib_unsafe_p "$file" \
+	  || func_fatal_help "'$lib' is not a valid libtool archive"
+
+	# Read the libtool library.
+	dlname=
+	library_names=
+	func_source "$file"
+
+	# Skip this library if it cannot be dlopened.
+	if test -z "$dlname"; then
+	  # Warn if it was a shared library.
+	  test -n "$library_names" && \
+	    func_warning "'$file' was not linked with '-export-dynamic'"
+	  continue
+	fi
+
+	func_dirname "$file" "" "."
+	dir=$func_dirname_result
+
+	if test -f "$dir/$objdir/$dlname"; then
+	  func_append dir "/$objdir"
+	else
+	  if test ! -f "$dir/$dlname"; then
+	    func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'"
+	  fi
+	fi
+	;;
+
+      *.lo)
+	# Just add the directory containing the .lo file.
+	func_dirname "$file" "" "."
+	dir=$func_dirname_result
+	;;
+
+      *)
+	func_warning "'-dlopen' is ignored for non-libtool libraries and objects"
+	continue
+	;;
+      esac
+
+      # Get the absolute pathname.
+      absdir=`cd "$dir" && pwd`
+      test -n "$absdir" && dir=$absdir
+
+      # Now add the directory to shlibpath_var.
+      if eval "test -z \"\$$shlibpath_var\""; then
+	eval "$shlibpath_var=\"\$dir\""
+      else
+	eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
+      fi
+    done
+
+    # This variable tells wrapper scripts just to set shlibpath_var
+    # rather than running their programs.
+    libtool_execute_magic=$magic
+
+    # Check if any of the arguments is a wrapper script.
+    args=
+    for file
+    do
+      case $file in
+      -* | *.la | *.lo ) ;;
+      *)
+	# Do a test to see if this is really a libtool program.
+	if func_ltwrapper_script_p "$file"; then
+	  func_source "$file"
+	  # Transform arg to wrapped name.
+	  file=$progdir/$program
+	elif func_ltwrapper_executable_p "$file"; then
+	  func_ltwrapper_scriptname "$file"
+	  func_source "$func_ltwrapper_scriptname_result"
+	  # Transform arg to wrapped name.
+	  file=$progdir/$program
+	fi
+	;;
+      esac
+      # Quote arguments (to preserve shell metacharacters).
+      func_append_quoted args "$file"
+    done
+
+    if $opt_dry_run; then
+      # Display what would be done.
+      if test -n "$shlibpath_var"; then
+	eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\""
+	echo "export $shlibpath_var"
+      fi
+      $ECHO "$cmd$args"
+      exit $EXIT_SUCCESS
+    else
+      if test -n "$shlibpath_var"; then
+	# Export the shlibpath_var.
+	eval "export $shlibpath_var"
+      fi
+
+      # Restore saved environment variables
+      for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+      do
+	eval "if test \"\${save_$lt_var+set}\" = set; then
+                $lt_var=\$save_$lt_var; export $lt_var
+	      else
+		$lt_unset $lt_var
+	      fi"
+      done
+
+      # Now prepare to actually exec the command.
+      exec_cmd=\$cmd$args
+    fi
+}
+
+test execute = "$opt_mode" && func_mode_execute ${1+"$@"}
+
+
+# func_mode_finish arg...
+func_mode_finish ()
+{
+    $debug_cmd
+
+    libs=
+    libdirs=
+    admincmds=
+
+    for opt in "$nonopt" ${1+"$@"}
+    do
+      if test -d "$opt"; then
+	func_append libdirs " $opt"
+
+      elif test -f "$opt"; then
+	if func_lalib_unsafe_p "$opt"; then
+	  func_append libs " $opt"
+	else
+	  func_warning "'$opt' is not a valid libtool archive"
+	fi
+
+      else
+	func_fatal_error "invalid argument '$opt'"
+      fi
+    done
+
+    if test -n "$libs"; then
+      if test -n "$lt_sysroot"; then
+        sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"`
+        sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;"
+      else
+        sysroot_cmd=
+      fi
+
+      # Remove sysroot references
+      if $opt_dry_run; then
+        for lib in $libs; do
+          echo "removing references to $lt_sysroot and '=' prefixes from $lib"
+        done
+      else
+        tmpdir=`func_mktempdir`
+        for lib in $libs; do
+	  $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \
+	    > $tmpdir/tmp-la
+	  mv -f $tmpdir/tmp-la $lib
+	done
+        ${RM}r "$tmpdir"
+      fi
+    fi
+
+    if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+      for libdir in $libdirs; do
+	if test -n "$finish_cmds"; then
+	  # Do each command in the finish commands.
+	  func_execute_cmds "$finish_cmds" 'admincmds="$admincmds
+'"$cmd"'"'
+	fi
+	if test -n "$finish_eval"; then
+	  # Do the single finish_eval.
+	  eval cmds=\"$finish_eval\"
+	  $opt_dry_run || eval "$cmds" || func_append admincmds "
+       $cmds"
+	fi
+      done
+    fi
+
+    # Exit here if they wanted silent mode.
+    $opt_quiet && exit $EXIT_SUCCESS
+
+    if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+      echo "----------------------------------------------------------------------"
+      echo "Libraries have been installed in:"
+      for libdir in $libdirs; do
+	$ECHO "   $libdir"
+      done
+      echo
+      echo "If you ever happen to want to link against installed libraries"
+      echo "in a given directory, LIBDIR, you must either use libtool, and"
+      echo "specify the full pathname of the library, or use the '-LLIBDIR'"
+      echo "flag during linking and do at least one of the following:"
+      if test -n "$shlibpath_var"; then
+	echo "   - add LIBDIR to the '$shlibpath_var' environment variable"
+	echo "     during execution"
+      fi
+      if test -n "$runpath_var"; then
+	echo "   - add LIBDIR to the '$runpath_var' environment variable"
+	echo "     during linking"
+      fi
+      if test -n "$hardcode_libdir_flag_spec"; then
+	libdir=LIBDIR
+	eval flag=\"$hardcode_libdir_flag_spec\"
+
+	$ECHO "   - use the '$flag' linker flag"
+      fi
+      if test -n "$admincmds"; then
+	$ECHO "   - have your system administrator run these commands:$admincmds"
+      fi
+      if test -f /etc/ld.so.conf; then
+	echo "   - have your system administrator add LIBDIR to '/etc/ld.so.conf'"
+      fi
+      echo
+
+      echo "See any operating system documentation about shared libraries for"
+      case $host in
+	solaris2.[6789]|solaris2.1[0-9])
+	  echo "more information, such as the ld(1), crle(1) and ld.so(8) manual"
+	  echo "pages."
+	  ;;
+	*)
+	  echo "more information, such as the ld(1) and ld.so(8) manual pages."
+	  ;;
+      esac
+      echo "----------------------------------------------------------------------"
+    fi
+    exit $EXIT_SUCCESS
+}
+
+test finish = "$opt_mode" && func_mode_finish ${1+"$@"}
+
+
+# func_mode_install arg...
+func_mode_install ()
+{
+    $debug_cmd
+
+    # There may be an optional sh(1) argument at the beginning of
+    # install_prog (especially on Windows NT).
+    if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" ||
+       # Allow the use of GNU shtool's install command.
+       case $nonopt in *shtool*) :;; *) false;; esac
+    then
+      # Aesthetically quote it.
+      func_quote_for_eval "$nonopt"
+      install_prog="$func_quote_for_eval_result "
+      arg=$1
+      shift
+    else
+      install_prog=
+      arg=$nonopt
+    fi
+
+    # The real first argument should be the name of the installation program.
+    # Aesthetically quote it.
+    func_quote_for_eval "$arg"
+    func_append install_prog "$func_quote_for_eval_result"
+    install_shared_prog=$install_prog
+    case " $install_prog " in
+      *[\\\ /]cp\ *) install_cp=: ;;
+      *) install_cp=false ;;
+    esac
+
+    # We need to accept at least all the BSD install flags.
+    dest=
+    files=
+    opts=
+    prev=
+    install_type=
+    isdir=false
+    stripme=
+    no_mode=:
+    for arg
+    do
+      arg2=
+      if test -n "$dest"; then
+	func_append files " $dest"
+	dest=$arg
+	continue
+      fi
+
+      case $arg in
+      -d) isdir=: ;;
+      -f)
+	if $install_cp; then :; else
+	  prev=$arg
+	fi
+	;;
+      -g | -m | -o)
+	prev=$arg
+	;;
+      -s)
+	stripme=" -s"
+	continue
+	;;
+      -*)
+	;;
+      *)
+	# If the previous option needed an argument, then skip it.
+	if test -n "$prev"; then
+	  if test X-m = "X$prev" && test -n "$install_override_mode"; then
+	    arg2=$install_override_mode
+	    no_mode=false
+	  fi
+	  prev=
+	else
+	  dest=$arg
+	  continue
+	fi
+	;;
+      esac
+
+      # Aesthetically quote the argument.
+      func_quote_for_eval "$arg"
+      func_append install_prog " $func_quote_for_eval_result"
+      if test -n "$arg2"; then
+	func_quote_for_eval "$arg2"
+      fi
+      func_append install_shared_prog " $func_quote_for_eval_result"
+    done
+
+    test -z "$install_prog" && \
+      func_fatal_help "you must specify an install program"
+
+    test -n "$prev" && \
+      func_fatal_help "the '$prev' option requires an argument"
+
+    if test -n "$install_override_mode" && $no_mode; then
+      if $install_cp; then :; else
+	func_quote_for_eval "$install_override_mode"
+	func_append install_shared_prog " -m $func_quote_for_eval_result"
+      fi
+    fi
+
+    if test -z "$files"; then
+      if test -z "$dest"; then
+	func_fatal_help "no file or destination specified"
+      else
+	func_fatal_help "you must specify a destination"
+      fi
+    fi
+
+    # Strip any trailing slash from the destination.
+    func_stripname '' '/' "$dest"
+    dest=$func_stripname_result
+
+    # Check to see that the destination is a directory.
+    test -d "$dest" && isdir=:
+    if $isdir; then
+      destdir=$dest
+      destname=
+    else
+      func_dirname_and_basename "$dest" "" "."
+      destdir=$func_dirname_result
+      destname=$func_basename_result
+
+      # Not a directory, so check to see that there is only one file specified.
+      set dummy $files; shift
+      test "$#" -gt 1 && \
+	func_fatal_help "'$dest' is not a directory"
+    fi
+    case $destdir in
+    [\\/]* | [A-Za-z]:[\\/]*) ;;
+    *)
+      for file in $files; do
+	case $file in
+	*.lo) ;;
+	*)
+	  func_fatal_help "'$destdir' must be an absolute directory name"
+	  ;;
+	esac
+      done
+      ;;
+    esac
+
+    # This variable tells wrapper scripts just to set variables rather
+    # than running their programs.
+    libtool_install_magic=$magic
+
+    staticlibs=
+    future_libdirs=
+    current_libdirs=
+    for file in $files; do
+
+      # Do each installation.
+      case $file in
+      *.$libext)
+	# Do the static libraries later.
+	func_append staticlibs " $file"
+	;;
+
+      *.la)
+	func_resolve_sysroot "$file"
+	file=$func_resolve_sysroot_result
+
+	# Check to see that this really is a libtool archive.
+	func_lalib_unsafe_p "$file" \
+	  || func_fatal_help "'$file' is not a valid libtool archive"
+
+	library_names=
+	old_library=
+	relink_command=
+	func_source "$file"
+
+	# Add the libdir to current_libdirs if it is the destination.
+	if test "X$destdir" = "X$libdir"; then
+	  case "$current_libdirs " in
+	  *" $libdir "*) ;;
+	  *) func_append current_libdirs " $libdir" ;;
+	  esac
+	else
+	  # Note the libdir as a future libdir.
+	  case "$future_libdirs " in
+	  *" $libdir "*) ;;
+	  *) func_append future_libdirs " $libdir" ;;
+	  esac
+	fi
+
+	func_dirname "$file" "/" ""
+	dir=$func_dirname_result
+	func_append dir "$objdir"
+
+	if test -n "$relink_command"; then
+	  # Determine the prefix the user has applied to our future dir.
+	  inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"`
+
+	  # Don't allow the user to place us outside of our expected
+	  # location b/c this prevents finding dependent libraries that
+	  # are installed to the same prefix.
+	  # At present, this check doesn't affect windows .dll's that
+	  # are installed into $libdir/../bin (currently, that works fine)
+	  # but it's something to keep an eye on.
+	  test "$inst_prefix_dir" = "$destdir" && \
+	    func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir"
+
+	  if test -n "$inst_prefix_dir"; then
+	    # Stick the inst_prefix_dir data into the link command.
+	    relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
+	  else
+	    relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"`
+	  fi
+
+	  func_warning "relinking '$file'"
+	  func_show_eval "$relink_command" \
+	    'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"'
+	fi
+
+	# See the names of the shared library.
+	set dummy $library_names; shift
+	if test -n "$1"; then
+	  realname=$1
+	  shift
+
+	  srcname=$realname
+	  test -n "$relink_command" && srcname=${realname}T
+
+	  # Install the shared library and build the symlinks.
+	  func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \
+	      'exit $?'
+	  tstripme=$stripme
+	  case $host_os in
+	  cygwin* | mingw* | pw32* | cegcc*)
+	    case $realname in
+	    *.dll.a)
+	      tstripme=
+	      ;;
+	    esac
+	    ;;
+	  os2*)
+	    case $realname in
+	    *_dll.a)
+	      tstripme=
+	      ;;
+	    esac
+	    ;;
+	  esac
+	  if test -n "$tstripme" && test -n "$striplib"; then
+	    func_show_eval "$striplib $destdir/$realname" 'exit $?'
+	  fi
+
+	  if test "$#" -gt 0; then
+	    # Delete the old symlinks, and create new ones.
+	    # Try 'ln -sf' first, because the 'ln' binary might depend on
+	    # the symlink we replace!  Solaris /bin/ln does not understand -f,
+	    # so we also need to try rm && ln -s.
+	    for linkname
+	    do
+	      test "$linkname" != "$realname" \
+		&& func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })"
+	    done
+	  fi
+
+	  # Do each command in the postinstall commands.
+	  lib=$destdir/$realname
+	  func_execute_cmds "$postinstall_cmds" 'exit $?'
+	fi
+
+	# Install the pseudo-library for information purposes.
+	func_basename "$file"
+	name=$func_basename_result
+	instname=$dir/${name}i
+	func_show_eval "$install_prog $instname $destdir/$name" 'exit $?'
+
+	# Maybe install the static library, too.
+	test -n "$old_library" && func_append staticlibs " $dir/$old_library"
+	;;
+
+      *.lo)
+	# Install (i.e. copy) a libtool object.
+
+	# Figure out destination file name, if it wasn't already specified.
+	if test -n "$destname"; then
+	  destfile=$destdir/$destname
+	else
+	  func_basename "$file"
+	  destfile=$func_basename_result
+	  destfile=$destdir/$destfile
+	fi
+
+	# Deduce the name of the destination old-style object file.
+	case $destfile in
+	*.lo)
+	  func_lo2o "$destfile"
+	  staticdest=$func_lo2o_result
+	  ;;
+	*.$objext)
+	  staticdest=$destfile
+	  destfile=
+	  ;;
+	*)
+	  func_fatal_help "cannot copy a libtool object to '$destfile'"
+	  ;;
+	esac
+
+	# Install the libtool object if requested.
+	test -n "$destfile" && \
+	  func_show_eval "$install_prog $file $destfile" 'exit $?'
+
+	# Install the old object if enabled.
+	if test yes = "$build_old_libs"; then
+	  # Deduce the name of the old-style object file.
+	  func_lo2o "$file"
+	  staticobj=$func_lo2o_result
+	  func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?'
+	fi
+	exit $EXIT_SUCCESS
+	;;
+
+      *)
+	# Figure out destination file name, if it wasn't already specified.
+	if test -n "$destname"; then
+	  destfile=$destdir/$destname
+	else
+	  func_basename "$file"
+	  destfile=$func_basename_result
+	  destfile=$destdir/$destfile
+	fi
+
+	# If the file is missing, and there is a .exe on the end, strip it
+	# because it is most likely a libtool script we actually want to
+	# install
+	stripped_ext=
+	case $file in
+	  *.exe)
+	    if test ! -f "$file"; then
+	      func_stripname '' '.exe' "$file"
+	      file=$func_stripname_result
+	      stripped_ext=.exe
+	    fi
+	    ;;
+	esac
+
+	# Do a test to see if this is really a libtool program.
+	case $host in
+	*cygwin* | *mingw*)
+	    if func_ltwrapper_executable_p "$file"; then
+	      func_ltwrapper_scriptname "$file"
+	      wrapper=$func_ltwrapper_scriptname_result
+	    else
+	      func_stripname '' '.exe' "$file"
+	      wrapper=$func_stripname_result
+	    fi
+	    ;;
+	*)
+	    wrapper=$file
+	    ;;
+	esac
+	if func_ltwrapper_script_p "$wrapper"; then
+	  notinst_deplibs=
+	  relink_command=
+
+	  func_source "$wrapper"
+
+	  # Check the variables that should have been set.
+	  test -z "$generated_by_libtool_version" && \
+	    func_fatal_error "invalid libtool wrapper script '$wrapper'"
+
+	  finalize=:
+	  for lib in $notinst_deplibs; do
+	    # Check to see that each library is installed.
+	    libdir=
+	    if test -f "$lib"; then
+	      func_source "$lib"
+	    fi
+	    libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'`
+	    if test -n "$libdir" && test ! -f "$libfile"; then
+	      func_warning "'$lib' has not been installed in '$libdir'"
+	      finalize=false
+	    fi
+	  done
+
+	  relink_command=
+	  func_source "$wrapper"
+
+	  outputname=
+	  if test no = "$fast_install" && test -n "$relink_command"; then
+	    $opt_dry_run || {
+	      if $finalize; then
+	        tmpdir=`func_mktempdir`
+		func_basename "$file$stripped_ext"
+		file=$func_basename_result
+	        outputname=$tmpdir/$file
+	        # Replace the output file specification.
+	        relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'`
+
+	        $opt_quiet || {
+	          func_quote_for_expand "$relink_command"
+		  eval "func_echo $func_quote_for_expand_result"
+	        }
+	        if eval "$relink_command"; then :
+	          else
+		  func_error "error: relink '$file' with the above command before installing it"
+		  $opt_dry_run || ${RM}r "$tmpdir"
+		  continue
+	        fi
+	        file=$outputname
+	      else
+	        func_warning "cannot relink '$file'"
+	      fi
+	    }
+	  else
+	    # Install the binary that we compiled earlier.
+	    file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"`
+	  fi
+	fi
+
+	# remove .exe since cygwin /usr/bin/install will append another
+	# one anyway
+	case $install_prog,$host in
+	*/usr/bin/install*,*cygwin*)
+	  case $file:$destfile in
+	  *.exe:*.exe)
+	    # this is ok
+	    ;;
+	  *.exe:*)
+	    destfile=$destfile.exe
+	    ;;
+	  *:*.exe)
+	    func_stripname '' '.exe' "$destfile"
+	    destfile=$func_stripname_result
+	    ;;
+	  esac
+	  ;;
+	esac
+	func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?'
+	$opt_dry_run || if test -n "$outputname"; then
+	  ${RM}r "$tmpdir"
+	fi
+	;;
+      esac
+    done
+
+    for file in $staticlibs; do
+      func_basename "$file"
+      name=$func_basename_result
+
+      # Set up the ranlib parameters.
+      oldlib=$destdir/$name
+      func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+      tool_oldlib=$func_to_tool_file_result
+
+      func_show_eval "$install_prog \$file \$oldlib" 'exit $?'
+
+      if test -n "$stripme" && test -n "$old_striplib"; then
+	func_show_eval "$old_striplib $tool_oldlib" 'exit $?'
+      fi
+
+      # Do each command in the postinstall commands.
+      func_execute_cmds "$old_postinstall_cmds" 'exit $?'
+    done
+
+    test -n "$future_libdirs" && \
+      func_warning "remember to run '$progname --finish$future_libdirs'"
+
+    if test -n "$current_libdirs"; then
+      # Maybe just do a dry run.
+      $opt_dry_run && current_libdirs=" -n$current_libdirs"
+      exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs'
+    else
+      exit $EXIT_SUCCESS
+    fi
+}
+
+test install = "$opt_mode" && func_mode_install ${1+"$@"}
+
+
+# func_generate_dlsyms outputname originator pic_p
+# Extract symbols from dlprefiles and create ${outputname}S.o with
+# a dlpreopen symbol table.
+func_generate_dlsyms ()
+{
+    $debug_cmd
+
+    my_outputname=$1
+    my_originator=$2
+    my_pic_p=${3-false}
+    my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'`
+    my_dlsyms=
+
+    if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then
+      if test -n "$NM" && test -n "$global_symbol_pipe"; then
+	my_dlsyms=${my_outputname}S.c
+      else
+	func_error "not configured to extract global symbols from dlpreopened files"
+      fi
+    fi
+
+    if test -n "$my_dlsyms"; then
+      case $my_dlsyms in
+      "") ;;
+      *.c)
+	# Discover the nlist of each of the dlfiles.
+	nlist=$output_objdir/$my_outputname.nm
+
+	func_show_eval "$RM $nlist ${nlist}S ${nlist}T"
+
+	# Parse the name list into a source file.
+	func_verbose "creating $output_objdir/$my_dlsyms"
+
+	$opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\
+/* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */
+/* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */
+
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4))
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#endif
+
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
+#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE
+/* DATA imports from DLLs on WIN32 can't be const, because runtime
+   relocations are performed -- see ld's documentation on pseudo-relocs.  */
+# define LT_DLSYM_CONST
+#elif defined __osf__
+/* This system does not cope well with relocations in const data.  */
+# define LT_DLSYM_CONST
+#else
+# define LT_DLSYM_CONST const
+#endif
+
+#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0)
+
+/* External symbol declarations for the compiler. */\
+"
+
+	if test yes = "$dlself"; then
+	  func_verbose "generating symbol list for '$output'"
+
+	  $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist"
+
+	  # Add our own program objects to the symbol list.
+	  progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP`
+	  for progfile in $progfiles; do
+	    func_to_tool_file "$progfile" func_convert_file_msys_to_w32
+	    func_verbose "extracting global C symbols from '$func_to_tool_file_result'"
+	    $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'"
+	  done
+
+	  if test -n "$exclude_expsyms"; then
+	    $opt_dry_run || {
+	      eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
+	      eval '$MV "$nlist"T "$nlist"'
+	    }
+	  fi
+
+	  if test -n "$export_symbols_regex"; then
+	    $opt_dry_run || {
+	      eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
+	      eval '$MV "$nlist"T "$nlist"'
+	    }
+	  fi
+
+	  # Prepare the list of exported symbols
+	  if test -z "$export_symbols"; then
+	    export_symbols=$output_objdir/$outputname.exp
+	    $opt_dry_run || {
+	      $RM $export_symbols
+	      eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
+	      case $host in
+	      *cygwin* | *mingw* | *cegcc* )
+                eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+                eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"'
+	        ;;
+	      esac
+	    }
+	  else
+	    $opt_dry_run || {
+	      eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"'
+	      eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
+	      eval '$MV "$nlist"T "$nlist"'
+	      case $host in
+	        *cygwin* | *mingw* | *cegcc* )
+	          eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+	          eval 'cat "$nlist" >> "$output_objdir/$outputname.def"'
+	          ;;
+	      esac
+	    }
+	  fi
+	fi
+
+	for dlprefile in $dlprefiles; do
+	  func_verbose "extracting global C symbols from '$dlprefile'"
+	  func_basename "$dlprefile"
+	  name=$func_basename_result
+          case $host in
+	    *cygwin* | *mingw* | *cegcc* )
+	      # if an import library, we need to obtain dlname
+	      if func_win32_import_lib_p "$dlprefile"; then
+	        func_tr_sh "$dlprefile"
+	        eval "curr_lafile=\$libfile_$func_tr_sh_result"
+	        dlprefile_dlbasename=
+	        if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then
+	          # Use subshell, to avoid clobbering current variable values
+	          dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"`
+	          if test -n "$dlprefile_dlname"; then
+	            func_basename "$dlprefile_dlname"
+	            dlprefile_dlbasename=$func_basename_result
+	          else
+	            # no lafile. user explicitly requested -dlpreopen <import library>.
+	            $sharedlib_from_linklib_cmd "$dlprefile"
+	            dlprefile_dlbasename=$sharedlib_from_linklib_result
+	          fi
+	        fi
+	        $opt_dry_run || {
+	          if test -n "$dlprefile_dlbasename"; then
+	            eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"'
+	          else
+	            func_warning "Could not compute DLL name from $name"
+	            eval '$ECHO ": $name " >> "$nlist"'
+	          fi
+	          func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+	          eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe |
+	            $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'"
+	        }
+	      else # not an import lib
+	        $opt_dry_run || {
+	          eval '$ECHO ": $name " >> "$nlist"'
+	          func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+	          eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+	        }
+	      fi
+	    ;;
+	    *)
+	      $opt_dry_run || {
+	        eval '$ECHO ": $name " >> "$nlist"'
+	        func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+	        eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+	      }
+	    ;;
+          esac
+	done
+
+	$opt_dry_run || {
+	  # Make sure we have at least an empty file.
+	  test -f "$nlist" || : > "$nlist"
+
+	  if test -n "$exclude_expsyms"; then
+	    $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
+	    $MV "$nlist"T "$nlist"
+	  fi
+
+	  # Try sorting and uniquifying the output.
+	  if $GREP -v "^: " < "$nlist" |
+	      if sort -k 3 </dev/null >/dev/null 2>&1; then
+		sort -k 3
+	      else
+		sort +2
+	      fi |
+	      uniq > "$nlist"S; then
+	    :
+	  else
+	    $GREP -v "^: " < "$nlist" > "$nlist"S
+	  fi
+
+	  if test -f "$nlist"S; then
+	    eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"'
+	  else
+	    echo '/* NONE */' >> "$output_objdir/$my_dlsyms"
+	  fi
+
+	  func_show_eval '$RM "${nlist}I"'
+	  if test -n "$global_symbol_to_import"; then
+	    eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I'
+	  fi
+
+	  echo >> "$output_objdir/$my_dlsyms" "\
+
+/* The mapping between symbol names and symbols.  */
+typedef struct {
+  const char *name;
+  void *address;
+} lt_dlsymlist;
+extern LT_DLSYM_CONST lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[];\
+"
+
+	  if test -s "$nlist"I; then
+	    echo >> "$output_objdir/$my_dlsyms" "\
+static void lt_syminit(void)
+{
+  LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols;
+  for (; symbol->name; ++symbol)
+    {"
+	    $SED 's/.*/      if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms"
+	    echo >> "$output_objdir/$my_dlsyms" "\
+    }
+}"
+	  fi
+	  echo >> "$output_objdir/$my_dlsyms" "\
+LT_DLSYM_CONST lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[] =
+{ {\"$my_originator\", (void *) 0},"
+
+	  if test -s "$nlist"I; then
+	    echo >> "$output_objdir/$my_dlsyms" "\
+  {\"@INIT@\", (void *) &lt_syminit},"
+	  fi
+
+	  case $need_lib_prefix in
+	  no)
+	    eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms"
+	    ;;
+	  *)
+	    eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms"
+	    ;;
+	  esac
+	  echo >> "$output_objdir/$my_dlsyms" "\
+  {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+  return lt_${my_prefix}_LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif\
+"
+	} # !$opt_dry_run
+
+	pic_flag_for_symtable=
+	case "$compile_command " in
+	*" -static "*) ;;
+	*)
+	  case $host in
+	  # compiling the symbol table file with pic_flag works around
+	  # a FreeBSD bug that causes programs to crash when -lm is
+	  # linked before any other PIC object.  But we must not use
+	  # pic_flag when linking with -static.  The problem exists in
+	  # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
+	  *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
+	    pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;;
+	  *-*-hpux*)
+	    pic_flag_for_symtable=" $pic_flag"  ;;
+	  *)
+	    $my_pic_p && pic_flag_for_symtable=" $pic_flag"
+	    ;;
+	  esac
+	  ;;
+	esac
+	symtab_cflags=
+	for arg in $LTCFLAGS; do
+	  case $arg in
+	  -pie | -fpie | -fPIE) ;;
+	  *) func_append symtab_cflags " $arg" ;;
+	  esac
+	done
+
+	# Now compile the dynamic symbol file.
+	func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?'
+
+	# Clean up the generated files.
+	func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"'
+
+	# Transform the symbol file into the correct name.
+	symfileobj=$output_objdir/${my_outputname}S.$objext
+	case $host in
+	*cygwin* | *mingw* | *cegcc* )
+	  if test -f "$output_objdir/$my_outputname.def"; then
+	    compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+	    finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+	  else
+	    compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	    finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	  fi
+	  ;;
+	*)
+	  compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	  finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	  ;;
+	esac
+	;;
+      *)
+	func_fatal_error "unknown suffix for '$my_dlsyms'"
+	;;
+      esac
+    else
+      # We keep going just in case the user didn't refer to
+      # lt_preloaded_symbols.  The linker will fail if global_symbol_pipe
+      # really was required.
+
+      # Nullify the symbol file.
+      compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"`
+      finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"`
+    fi
+}
+
+# func_cygming_gnu_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is a GNU/binutils-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_gnu_implib_p ()
+{
+  $debug_cmd
+
+  func_to_tool_file "$1" func_convert_file_msys_to_w32
+  func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'`
+  test -n "$func_cygming_gnu_implib_tmp"
+}
+
+# func_cygming_ms_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is an MS-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_ms_implib_p ()
+{
+  $debug_cmd
+
+  func_to_tool_file "$1" func_convert_file_msys_to_w32
+  func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'`
+  test -n "$func_cygming_ms_implib_tmp"
+}
+
+# func_win32_libid arg
+# return the library type of file 'arg'
+#
+# Need a lot of goo to handle *both* DLLs and import libs
+# Has to be a shell function in order to 'eat' the argument
+# that is supplied when $file_magic_command is called.
+# Despite the name, also deal with 64 bit binaries.
+func_win32_libid ()
+{
+  $debug_cmd
+
+  win32_libid_type=unknown
+  win32_fileres=`file -L $1 2>/dev/null`
+  case $win32_fileres in
+  *ar\ archive\ import\ library*) # definitely import
+    win32_libid_type="x86 archive import"
+    ;;
+  *ar\ archive*) # could be an import, or static
+    # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD.
+    if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null |
+       $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then
+      case $nm_interface in
+      "MS dumpbin")
+	if func_cygming_ms_implib_p "$1" ||
+	   func_cygming_gnu_implib_p "$1"
+	then
+	  win32_nmres=import
+	else
+	  win32_nmres=
+	fi
+	;;
+      *)
+	func_to_tool_file "$1" func_convert_file_msys_to_w32
+	win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" |
+	  $SED -n -e '
+	    1,100{
+		/ I /{
+		    s|.*|import|
+		    p
+		    q
+		}
+	    }'`
+	;;
+      esac
+      case $win32_nmres in
+      import*)  win32_libid_type="x86 archive import";;
+      *)        win32_libid_type="x86 archive static";;
+      esac
+    fi
+    ;;
+  *DLL*)
+    win32_libid_type="x86 DLL"
+    ;;
+  *executable*) # but shell scripts are "executable" too...
+    case $win32_fileres in
+    *MS\ Windows\ PE\ Intel*)
+      win32_libid_type="x86 DLL"
+      ;;
+    esac
+    ;;
+  esac
+  $ECHO "$win32_libid_type"
+}
+
+# func_cygming_dll_for_implib ARG
+#
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+# Invoked by eval'ing the libtool variable
+#    $sharedlib_from_linklib_cmd
+# Result is available in the variable
+#    $sharedlib_from_linklib_result
+func_cygming_dll_for_implib ()
+{
+  $debug_cmd
+
+  sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"`
+}
+
+# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs
+#
+# The is the core of a fallback implementation of a
+# platform-specific function to extract the name of the
+# DLL associated with the specified import library LIBNAME.
+#
+# SECTION_NAME is either .idata$6 or .idata$7, depending
+# on the platform and compiler that created the implib.
+#
+# Echos the name of the DLL associated with the
+# specified import library.
+func_cygming_dll_for_implib_fallback_core ()
+{
+  $debug_cmd
+
+  match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"`
+  $OBJDUMP -s --section "$1" "$2" 2>/dev/null |
+    $SED '/^Contents of section '"$match_literal"':/{
+      # Place marker at beginning of archive member dllname section
+      s/.*/====MARK====/
+      p
+      d
+    }
+    # These lines can sometimes be longer than 43 characters, but
+    # are always uninteresting
+    /:[	 ]*file format pe[i]\{,1\}-/d
+    /^In archive [^:]*:/d
+    # Ensure marker is printed
+    /^====MARK====/p
+    # Remove all lines with less than 43 characters
+    /^.\{43\}/!d
+    # From remaining lines, remove first 43 characters
+    s/^.\{43\}//' |
+    $SED -n '
+      # Join marker and all lines until next marker into a single line
+      /^====MARK====/ b para
+      H
+      $ b para
+      b
+      :para
+      x
+      s/\n//g
+      # Remove the marker
+      s/^====MARK====//
+      # Remove trailing dots and whitespace
+      s/[\. \t]*$//
+      # Print
+      /./p' |
+    # we now have a list, one entry per line, of the stringified
+    # contents of the appropriate section of all members of the
+    # archive that possess that section. Heuristic: eliminate
+    # all those that have a first or second character that is
+    # a '.' (that is, objdump's representation of an unprintable
+    # character.) This should work for all archives with less than
+    # 0x302f exports -- but will fail for DLLs whose name actually
+    # begins with a literal '.' or a single character followed by
+    # a '.'.
+    #
+    # Of those that remain, print the first one.
+    $SED -e '/^\./d;/^.\./d;q'
+}
+
+# func_cygming_dll_for_implib_fallback ARG
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+#
+# This fallback implementation is for use when $DLLTOOL
+# does not support the --identify-strict option.
+# Invoked by eval'ing the libtool variable
+#    $sharedlib_from_linklib_cmd
+# Result is available in the variable
+#    $sharedlib_from_linklib_result
+func_cygming_dll_for_implib_fallback ()
+{
+  $debug_cmd
+
+  if func_cygming_gnu_implib_p "$1"; then
+    # binutils import library
+    sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"`
+  elif func_cygming_ms_implib_p "$1"; then
+    # ms-generated import library
+    sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"`
+  else
+    # unknown
+    sharedlib_from_linklib_result=
+  fi
+}
+
+
+# func_extract_an_archive dir oldlib
+func_extract_an_archive ()
+{
+    $debug_cmd
+
+    f_ex_an_ar_dir=$1; shift
+    f_ex_an_ar_oldlib=$1
+    if test yes = "$lock_old_archive_extraction"; then
+      lockfile=$f_ex_an_ar_oldlib.lock
+      until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+	func_echo "Waiting for $lockfile to be removed"
+	sleep 2
+      done
+    fi
+    func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \
+		   'stat=$?; rm -f "$lockfile"; exit $stat'
+    if test yes = "$lock_old_archive_extraction"; then
+      $opt_dry_run || rm -f "$lockfile"
+    fi
+    if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then
+     :
+    else
+      func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib"
+    fi
+}
+
+
+# func_extract_archives gentop oldlib ...
+func_extract_archives ()
+{
+    $debug_cmd
+
+    my_gentop=$1; shift
+    my_oldlibs=${1+"$@"}
+    my_oldobjs=
+    my_xlib=
+    my_xabs=
+    my_xdir=
+
+    for my_xlib in $my_oldlibs; do
+      # Extract the objects.
+      case $my_xlib in
+	[\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;;
+	*) my_xabs=`pwd`"/$my_xlib" ;;
+      esac
+      func_basename "$my_xlib"
+      my_xlib=$func_basename_result
+      my_xlib_u=$my_xlib
+      while :; do
+        case " $extracted_archives " in
+	*" $my_xlib_u "*)
+	  func_arith $extracted_serial + 1
+	  extracted_serial=$func_arith_result
+	  my_xlib_u=lt$extracted_serial-$my_xlib ;;
+	*) break ;;
+	esac
+      done
+      extracted_archives="$extracted_archives $my_xlib_u"
+      my_xdir=$my_gentop/$my_xlib_u
+
+      func_mkdir_p "$my_xdir"
+
+      case $host in
+      *-darwin*)
+	func_verbose "Extracting $my_xabs"
+	# Do not bother doing anything if just a dry run
+	$opt_dry_run || {
+	  darwin_orig_dir=`pwd`
+	  cd $my_xdir || exit $?
+	  darwin_archive=$my_xabs
+	  darwin_curdir=`pwd`
+	  func_basename "$darwin_archive"
+	  darwin_base_archive=$func_basename_result
+	  darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true`
+	  if test -n "$darwin_arches"; then
+	    darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'`
+	    darwin_arch=
+	    func_verbose "$darwin_base_archive has multiple architectures $darwin_arches"
+	    for darwin_arch in  $darwin_arches; do
+	      func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch"
+	      $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive"
+	      cd "unfat-$$/$darwin_base_archive-$darwin_arch"
+	      func_extract_an_archive "`pwd`" "$darwin_base_archive"
+	      cd "$darwin_curdir"
+	      $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive"
+	    done # $darwin_arches
+            ## Okay now we've a bunch of thin objects, gotta fatten them up :)
+	    darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u`
+	    darwin_file=
+	    darwin_files=
+	    for darwin_file in $darwin_filelist; do
+	      darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP`
+	      $LIPO -create -output "$darwin_file" $darwin_files
+	    done # $darwin_filelist
+	    $RM -rf unfat-$$
+	    cd "$darwin_orig_dir"
+	  else
+	    cd $darwin_orig_dir
+	    func_extract_an_archive "$my_xdir" "$my_xabs"
+	  fi # $darwin_arches
+	} # !$opt_dry_run
+	;;
+      *)
+        func_extract_an_archive "$my_xdir" "$my_xabs"
+	;;
+      esac
+      my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP`
+    done
+
+    func_extract_archives_result=$my_oldobjs
+}
+
+
+# func_emit_wrapper [arg=no]
+#
+# Emit a libtool wrapper script on stdout.
+# Don't directly open a file because we may want to
+# incorporate the script contents within a cygwin/mingw
+# wrapper executable.  Must ONLY be called from within
+# func_mode_link because it depends on a number of variables
+# set therein.
+#
+# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR
+# variable will take.  If 'yes', then the emitted script
+# will assume that the directory where it is stored is
+# the $objdir directory.  This is a cygwin/mingw-specific
+# behavior.
+func_emit_wrapper ()
+{
+	func_emit_wrapper_arg1=${1-no}
+
+	$ECHO "\
+#! $SHELL
+
+# $output - temporary wrapper script for $objdir/$outputname
+# Generated by $PROGRAM (GNU $PACKAGE) $VERSION
+#
+# The $output program cannot be directly executed until all the libtool
+# libraries that it depends on are installed.
+#
+# This wrapper script should never be moved out of the build directory.
+# If it is, it will not operate correctly.
+
+# Sed substitution that helps us do robust quoting.  It backslashifies
+# metacharacters that are still active within double-quoted strings.
+sed_quote_subst='$sed_quote_subst'
+
+# Be Bourne compatible
+if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then
+  emulate sh
+  NULLCMD=:
+  # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '\${1+\"\$@\"}'='\"\$@\"'
+  setopt NO_GLOB_SUBST
+else
+  case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+relink_command=\"$relink_command\"
+
+# This environment variable determines our operation mode.
+if test \"\$libtool_install_magic\" = \"$magic\"; then
+  # install mode needs the following variables:
+  generated_by_libtool_version='$macro_version'
+  notinst_deplibs='$notinst_deplibs'
+else
+  # When we are sourced in execute mode, \$file and \$ECHO are already set.
+  if test \"\$libtool_execute_magic\" != \"$magic\"; then
+    file=\"\$0\""
+
+    qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"`
+    $ECHO "\
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+\$1
+_LTECHO_EOF'
+}
+    ECHO=\"$qECHO\"
+  fi
+
+# Very basic option parsing. These options are (a) specific to
+# the libtool wrapper, (b) are identical between the wrapper
+# /script/ and the wrapper /executable/ that is used only on
+# windows platforms, and (c) all begin with the string "--lt-"
+# (application programs are unlikely to have options that match
+# this pattern).
+#
+# There are only two supported options: --lt-debug and
+# --lt-dump-script. There is, deliberately, no --lt-help.
+#
+# The first argument to this parsing function should be the
+# script's $0 value, followed by "$@".
+lt_option_debug=
+func_parse_lt_options ()
+{
+  lt_script_arg0=\$0
+  shift
+  for lt_opt
+  do
+    case \"\$lt_opt\" in
+    --lt-debug) lt_option_debug=1 ;;
+    --lt-dump-script)
+        lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\`
+        test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=.
+        lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\`
+        cat \"\$lt_dump_D/\$lt_dump_F\"
+        exit 0
+      ;;
+    --lt-*)
+        \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2
+        exit 1
+      ;;
+    esac
+  done
+
+  # Print the debug banner immediately:
+  if test -n \"\$lt_option_debug\"; then
+    echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2
+  fi
+}
+
+# Used when --lt-debug. Prints its arguments to stdout
+# (redirection is the responsibility of the caller)
+func_lt_dump_args ()
+{
+  lt_dump_args_N=1;
+  for lt_arg
+  do
+    \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\"
+    lt_dump_args_N=\`expr \$lt_dump_args_N + 1\`
+  done
+}
+
+# Core function for launching the target application
+func_exec_program_core ()
+{
+"
+  case $host in
+  # Backslashes separate directories on plain windows
+  *-*-mingw | *-*-os2* | *-cegcc*)
+    $ECHO "\
+      if test -n \"\$lt_option_debug\"; then
+        \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2
+        func_lt_dump_args \${1+\"\$@\"} 1>&2
+      fi
+      exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
+"
+    ;;
+
+  *)
+    $ECHO "\
+      if test -n \"\$lt_option_debug\"; then
+        \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2
+        func_lt_dump_args \${1+\"\$@\"} 1>&2
+      fi
+      exec \"\$progdir/\$program\" \${1+\"\$@\"}
+"
+    ;;
+  esac
+  $ECHO "\
+      \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2
+      exit 1
+}
+
+# A function to encapsulate launching the target application
+# Strips options in the --lt-* namespace from \$@ and
+# launches target application with the remaining arguments.
+func_exec_program ()
+{
+  case \" \$* \" in
+  *\\ --lt-*)
+    for lt_wr_arg
+    do
+      case \$lt_wr_arg in
+      --lt-*) ;;
+      *) set x \"\$@\" \"\$lt_wr_arg\"; shift;;
+      esac
+      shift
+    done ;;
+  esac
+  func_exec_program_core \${1+\"\$@\"}
+}
+
+  # Parse options
+  func_parse_lt_options \"\$0\" \${1+\"\$@\"}
+
+  # Find the directory that this script lives in.
+  thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\`
+  test \"x\$thisdir\" = \"x\$file\" && thisdir=.
+
+  # Follow symbolic links until we get to the real thisdir.
+  file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\`
+  while test -n \"\$file\"; do
+    destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\`
+
+    # If there was a directory component, then change thisdir.
+    if test \"x\$destdir\" != \"x\$file\"; then
+      case \"\$destdir\" in
+      [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
+      *) thisdir=\"\$thisdir/\$destdir\" ;;
+      esac
+    fi
+
+    file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\`
+    file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\`
+  done
+
+  # Usually 'no', except on cygwin/mingw when embedded into
+  # the cwrapper.
+  WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1
+  if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then
+    # special case for '.'
+    if test \"\$thisdir\" = \".\"; then
+      thisdir=\`pwd\`
+    fi
+    # remove .libs from thisdir
+    case \"\$thisdir\" in
+    *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;;
+    $objdir )   thisdir=. ;;
+    esac
+  fi
+
+  # Try to get the absolute directory name.
+  absdir=\`cd \"\$thisdir\" && pwd\`
+  test -n \"\$absdir\" && thisdir=\"\$absdir\"
+"
+
+	if test yes = "$fast_install"; then
+	  $ECHO "\
+  program=lt-'$outputname'$exeext
+  progdir=\"\$thisdir/$objdir\"
+
+  if test ! -f \"\$progdir/\$program\" ||
+     { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\
+       test \"X\$file\" != \"X\$progdir/\$program\"; }; then
+
+    file=\"\$\$-\$program\"
+
+    if test ! -d \"\$progdir\"; then
+      $MKDIR \"\$progdir\"
+    else
+      $RM \"\$progdir/\$file\"
+    fi"
+
+	  $ECHO "\
+
+    # relink executable if necessary
+    if test -n \"\$relink_command\"; then
+      if relink_command_output=\`eval \$relink_command 2>&1\`; then :
+      else
+	\$ECHO \"\$relink_command_output\" >&2
+	$RM \"\$progdir/\$file\"
+	exit 1
+      fi
+    fi
+
+    $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
+    { $RM \"\$progdir/\$program\";
+      $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; }
+    $RM \"\$progdir/\$file\"
+  fi"
+	else
+	  $ECHO "\
+  program='$outputname'
+  progdir=\"\$thisdir/$objdir\"
+"
+	fi
+
+	$ECHO "\
+
+  if test -f \"\$progdir/\$program\"; then"
+
+	# fixup the dll searchpath if we need to.
+	#
+	# Fix the DLL searchpath if we need to.  Do this before prepending
+	# to shlibpath, because on Windows, both are PATH and uninstalled
+	# libraries must come first.
+	if test -n "$dllsearchpath"; then
+	  $ECHO "\
+    # Add the dll search path components to the executable PATH
+    PATH=$dllsearchpath:\$PATH
+"
+	fi
+
+	# Export our shlibpath_var if we have one.
+	if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+	  $ECHO "\
+    # Add our own library path to $shlibpath_var
+    $shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
+
+    # Some systems cannot cope with colon-terminated $shlibpath_var
+    # The second colon is a workaround for a bug in BeOS R4 sed
+    $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\`
+
+    export $shlibpath_var
+"
+	fi
+
+	$ECHO "\
+    if test \"\$libtool_execute_magic\" != \"$magic\"; then
+      # Run the actual program with our arguments.
+      func_exec_program \${1+\"\$@\"}
+    fi
+  else
+    # The program doesn't exist.
+    \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2
+    \$ECHO \"This script is just a wrapper for \$program.\" 1>&2
+    \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2
+    exit 1
+  fi
+fi\
+"
+}
+
+
+# func_emit_cwrapperexe_src
+# emit the source code for a wrapper executable on stdout
+# Must ONLY be called from within func_mode_link because
+# it depends on a number of variable set therein.
+func_emit_cwrapperexe_src ()
+{
+	cat <<EOF
+
+/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
+   Generated by $PROGRAM (GNU $PACKAGE) $VERSION
+
+   The $output program cannot be directly executed until all the libtool
+   libraries that it depends on are installed.
+
+   This wrapper executable should never be moved out of the build directory.
+   If it is, it will not operate correctly.
+*/
+EOF
+	    cat <<"EOF"
+#ifdef _MSC_VER
+# define _CRT_SECURE_NO_DEPRECATE 1
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _MSC_VER
+# include <direct.h>
+# include <process.h>
+# include <io.h>
+#else
+# include <unistd.h>
+# include <stdint.h>
+# ifdef __CYGWIN__
+#  include <io.h>
+# endif
+#endif
+#include <malloc.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0)
+
+/* declarations of non-ANSI functions */
+#if defined __MINGW32__
+# ifdef __STRICT_ANSI__
+int _putenv (const char *);
+# endif
+#elif defined __CYGWIN__
+# ifdef __STRICT_ANSI__
+char *realpath (const char *, char *);
+int putenv (char *);
+int setenv (const char *, const char *, int);
+# endif
+/* #elif defined other_platform || defined ... */
+#endif
+
+/* portability defines, excluding path handling macros */
+#if defined _MSC_VER
+# define setmode _setmode
+# define stat    _stat
+# define chmod   _chmod
+# define getcwd  _getcwd
+# define putenv  _putenv
+# define S_IXUSR _S_IEXEC
+#elif defined __MINGW32__
+# define setmode _setmode
+# define stat    _stat
+# define chmod   _chmod
+# define getcwd  _getcwd
+# define putenv  _putenv
+#elif defined __CYGWIN__
+# define HAVE_SETENV
+# define FOPEN_WB "wb"
+/* #elif defined other platforms ... */
+#endif
+
+#if defined PATH_MAX
+# define LT_PATHMAX PATH_MAX
+#elif defined MAXPATHLEN
+# define LT_PATHMAX MAXPATHLEN
+#else
+# define LT_PATHMAX 1024
+#endif
+
+#ifndef S_IXOTH
+# define S_IXOTH 0
+#endif
+#ifndef S_IXGRP
+# define S_IXGRP 0
+#endif
+
+/* path handling portability macros */
+#ifndef DIR_SEPARATOR
+# define DIR_SEPARATOR '/'
+# define PATH_SEPARATOR ':'
+#endif
+
+#if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \
+  defined __OS2__
+# define HAVE_DOS_BASED_FILE_SYSTEM
+# define FOPEN_WB "wb"
+# ifndef DIR_SEPARATOR_2
+#  define DIR_SEPARATOR_2 '\\'
+# endif
+# ifndef PATH_SEPARATOR_2
+#  define PATH_SEPARATOR_2 ';'
+# endif
+#endif
+
+#ifndef DIR_SEPARATOR_2
+# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
+#else /* DIR_SEPARATOR_2 */
+# define IS_DIR_SEPARATOR(ch) \
+	(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
+#endif /* DIR_SEPARATOR_2 */
+
+#ifndef PATH_SEPARATOR_2
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR)
+#else /* PATH_SEPARATOR_2 */
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2)
+#endif /* PATH_SEPARATOR_2 */
+
+#ifndef FOPEN_WB
+# define FOPEN_WB "w"
+#endif
+#ifndef _O_BINARY
+# define _O_BINARY 0
+#endif
+
+#define XMALLOC(type, num)      ((type *) xmalloc ((num) * sizeof(type)))
+#define XFREE(stale) do { \
+  if (stale) { free (stale); stale = 0; } \
+} while (0)
+
+#if defined LT_DEBUGWRAPPER
+static int lt_debug = 1;
+#else
+static int lt_debug = 0;
+#endif
+
+const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */
+
+void *xmalloc (size_t num);
+char *xstrdup (const char *string);
+const char *base_name (const char *name);
+char *find_executable (const char *wrapper);
+char *chase_symlinks (const char *pathspec);
+int make_executable (const char *path);
+int check_executable (const char *path);
+char *strendzap (char *str, const char *pat);
+void lt_debugprintf (const char *file, int line, const char *fmt, ...);
+void lt_fatal (const char *file, int line, const char *message, ...);
+static const char *nonnull (const char *s);
+static const char *nonempty (const char *s);
+void lt_setenv (const char *name, const char *value);
+char *lt_extend_str (const char *orig_value, const char *add, int to_end);
+void lt_update_exe_path (const char *name, const char *value);
+void lt_update_lib_path (const char *name, const char *value);
+char **prepare_spawn (char **argv);
+void lt_dump_script (FILE *f);
+EOF
+
+	    cat <<EOF
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5)
+# define externally_visible volatile
+#else
+# define externally_visible __attribute__((externally_visible)) volatile
+#endif
+externally_visible const char * MAGIC_EXE = "$magic_exe";
+const char * LIB_PATH_VARNAME = "$shlibpath_var";
+EOF
+
+	    if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+              func_to_host_path "$temp_rpath"
+	      cat <<EOF
+const char * LIB_PATH_VALUE   = "$func_to_host_path_result";
+EOF
+	    else
+	      cat <<"EOF"
+const char * LIB_PATH_VALUE   = "";
+EOF
+	    fi
+
+	    if test -n "$dllsearchpath"; then
+              func_to_host_path "$dllsearchpath:"
+	      cat <<EOF
+const char * EXE_PATH_VARNAME = "PATH";
+const char * EXE_PATH_VALUE   = "$func_to_host_path_result";
+EOF
+	    else
+	      cat <<"EOF"
+const char * EXE_PATH_VARNAME = "";
+const char * EXE_PATH_VALUE   = "";
+EOF
+	    fi
+
+	    if test yes = "$fast_install"; then
+	      cat <<EOF
+const char * TARGET_PROGRAM_NAME = "lt-$outputname"; /* hopefully, no .exe */
+EOF
+	    else
+	      cat <<EOF
+const char * TARGET_PROGRAM_NAME = "$outputname"; /* hopefully, no .exe */
+EOF
+	    fi
+
+
+	    cat <<"EOF"
+
+#define LTWRAPPER_OPTION_PREFIX         "--lt-"
+
+static const char *ltwrapper_option_prefix = LTWRAPPER_OPTION_PREFIX;
+static const char *dumpscript_opt       = LTWRAPPER_OPTION_PREFIX "dump-script";
+static const char *debug_opt            = LTWRAPPER_OPTION_PREFIX "debug";
+
+int
+main (int argc, char *argv[])
+{
+  char **newargz;
+  int  newargc;
+  char *tmp_pathspec;
+  char *actual_cwrapper_path;
+  char *actual_cwrapper_name;
+  char *target_name;
+  char *lt_argv_zero;
+  int rval = 127;
+
+  int i;
+
+  program_name = (char *) xstrdup (base_name (argv[0]));
+  newargz = XMALLOC (char *, (size_t) argc + 1);
+
+  /* very simple arg parsing; don't want to rely on getopt
+   * also, copy all non cwrapper options to newargz, except
+   * argz[0], which is handled differently
+   */
+  newargc=0;
+  for (i = 1; i < argc; i++)
+    {
+      if (STREQ (argv[i], dumpscript_opt))
+	{
+EOF
+	    case $host in
+	      *mingw* | *cygwin* )
+		# make stdout use "unix" line endings
+		echo "          setmode(1,_O_BINARY);"
+		;;
+	      esac
+
+	    cat <<"EOF"
+	  lt_dump_script (stdout);
+	  return 0;
+	}
+      if (STREQ (argv[i], debug_opt))
+	{
+          lt_debug = 1;
+          continue;
+	}
+      if (STREQ (argv[i], ltwrapper_option_prefix))
+        {
+          /* however, if there is an option in the LTWRAPPER_OPTION_PREFIX
+             namespace, but it is not one of the ones we know about and
+             have already dealt with, above (inluding dump-script), then
+             report an error. Otherwise, targets might begin to believe
+             they are allowed to use options in the LTWRAPPER_OPTION_PREFIX
+             namespace. The first time any user complains about this, we'll
+             need to make LTWRAPPER_OPTION_PREFIX a configure-time option
+             or a configure.ac-settable value.
+           */
+          lt_fatal (__FILE__, __LINE__,
+		    "unrecognized %s option: '%s'",
+                    ltwrapper_option_prefix, argv[i]);
+        }
+      /* otherwise ... */
+      newargz[++newargc] = xstrdup (argv[i]);
+    }
+  newargz[++newargc] = NULL;
+
+EOF
+	    cat <<EOF
+  /* The GNU banner must be the first non-error debug message */
+  lt_debugprintf (__FILE__, __LINE__, "libtool wrapper (GNU $PACKAGE) $VERSION\n");
+EOF
+	    cat <<"EOF"
+  lt_debugprintf (__FILE__, __LINE__, "(main) argv[0]: %s\n", argv[0]);
+  lt_debugprintf (__FILE__, __LINE__, "(main) program_name: %s\n", program_name);
+
+  tmp_pathspec = find_executable (argv[0]);
+  if (tmp_pathspec == NULL)
+    lt_fatal (__FILE__, __LINE__, "couldn't find %s", argv[0]);
+  lt_debugprintf (__FILE__, __LINE__,
+                  "(main) found exe (before symlink chase) at: %s\n",
+		  tmp_pathspec);
+
+  actual_cwrapper_path = chase_symlinks (tmp_pathspec);
+  lt_debugprintf (__FILE__, __LINE__,
+                  "(main) found exe (after symlink chase) at: %s\n",
+		  actual_cwrapper_path);
+  XFREE (tmp_pathspec);
+
+  actual_cwrapper_name = xstrdup (base_name (actual_cwrapper_path));
+  strendzap (actual_cwrapper_path, actual_cwrapper_name);
+
+  /* wrapper name transforms */
+  strendzap (actual_cwrapper_name, ".exe");
+  tmp_pathspec = lt_extend_str (actual_cwrapper_name, ".exe", 1);
+  XFREE (actual_cwrapper_name);
+  actual_cwrapper_name = tmp_pathspec;
+  tmp_pathspec = 0;
+
+  /* target_name transforms -- use actual target program name; might have lt- prefix */
+  target_name = xstrdup (base_name (TARGET_PROGRAM_NAME));
+  strendzap (target_name, ".exe");
+  tmp_pathspec = lt_extend_str (target_name, ".exe", 1);
+  XFREE (target_name);
+  target_name = tmp_pathspec;
+  tmp_pathspec = 0;
+
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(main) libtool target name: %s\n",
+		  target_name);
+EOF
+
+	    cat <<EOF
+  newargz[0] =
+    XMALLOC (char, (strlen (actual_cwrapper_path) +
+		    strlen ("$objdir") + 1 + strlen (actual_cwrapper_name) + 1));
+  strcpy (newargz[0], actual_cwrapper_path);
+  strcat (newargz[0], "$objdir");
+  strcat (newargz[0], "/");
+EOF
+
+	    cat <<"EOF"
+  /* stop here, and copy so we don't have to do this twice */
+  tmp_pathspec = xstrdup (newargz[0]);
+
+  /* do NOT want the lt- prefix here, so use actual_cwrapper_name */
+  strcat (newargz[0], actual_cwrapper_name);
+
+  /* DO want the lt- prefix here if it exists, so use target_name */
+  lt_argv_zero = lt_extend_str (tmp_pathspec, target_name, 1);
+  XFREE (tmp_pathspec);
+  tmp_pathspec = NULL;
+EOF
+
+	    case $host_os in
+	      mingw*)
+	    cat <<"EOF"
+  {
+    char* p;
+    while ((p = strchr (newargz[0], '\\')) != NULL)
+      {
+	*p = '/';
+      }
+    while ((p = strchr (lt_argv_zero, '\\')) != NULL)
+      {
+	*p = '/';
+      }
+  }
+EOF
+	    ;;
+	    esac
+
+	    cat <<"EOF"
+  XFREE (target_name);
+  XFREE (actual_cwrapper_path);
+  XFREE (actual_cwrapper_name);
+
+  lt_setenv ("BIN_SH", "xpg4"); /* for Tru64 */
+  lt_setenv ("DUALCASE", "1");  /* for MSK sh */
+  /* Update the DLL searchpath.  EXE_PATH_VALUE ($dllsearchpath) must
+     be prepended before (that is, appear after) LIB_PATH_VALUE ($temp_rpath)
+     because on Windows, both *_VARNAMEs are PATH but uninstalled
+     libraries must come first. */
+  lt_update_exe_path (EXE_PATH_VARNAME, EXE_PATH_VALUE);
+  lt_update_lib_path (LIB_PATH_VARNAME, LIB_PATH_VALUE);
+
+  lt_debugprintf (__FILE__, __LINE__, "(main) lt_argv_zero: %s\n",
+		  nonnull (lt_argv_zero));
+  for (i = 0; i < newargc; i++)
+    {
+      lt_debugprintf (__FILE__, __LINE__, "(main) newargz[%d]: %s\n",
+		      i, nonnull (newargz[i]));
+    }
+
+EOF
+
+	    case $host_os in
+	      mingw*)
+		cat <<"EOF"
+  /* execv doesn't actually work on mingw as expected on unix */
+  newargz = prepare_spawn (newargz);
+  rval = (int) _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz);
+  if (rval == -1)
+    {
+      /* failed to start process */
+      lt_debugprintf (__FILE__, __LINE__,
+		      "(main) failed to launch target \"%s\": %s\n",
+		      lt_argv_zero, nonnull (strerror (errno)));
+      return 127;
+    }
+  return rval;
+EOF
+		;;
+	      *)
+		cat <<"EOF"
+  execv (lt_argv_zero, newargz);
+  return rval; /* =127, but avoids unused variable warning */
+EOF
+		;;
+	    esac
+
+	    cat <<"EOF"
+}
+
+void *
+xmalloc (size_t num)
+{
+  void *p = (void *) malloc (num);
+  if (!p)
+    lt_fatal (__FILE__, __LINE__, "memory exhausted");
+
+  return p;
+}
+
+char *
+xstrdup (const char *string)
+{
+  return string ? strcpy ((char *) xmalloc (strlen (string) + 1),
+			  string) : NULL;
+}
+
+const char *
+base_name (const char *name)
+{
+  const char *base;
+
+#if defined HAVE_DOS_BASED_FILE_SYSTEM
+  /* Skip over the disk name in MSDOS pathnames. */
+  if (isalpha ((unsigned char) name[0]) && name[1] == ':')
+    name += 2;
+#endif
+
+  for (base = name; *name; name++)
+    if (IS_DIR_SEPARATOR (*name))
+      base = name + 1;
+  return base;
+}
+
+int
+check_executable (const char *path)
+{
+  struct stat st;
+
+  lt_debugprintf (__FILE__, __LINE__, "(check_executable): %s\n",
+                  nonempty (path));
+  if ((!path) || (!*path))
+    return 0;
+
+  if ((stat (path, &st) >= 0)
+      && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)))
+    return 1;
+  else
+    return 0;
+}
+
+int
+make_executable (const char *path)
+{
+  int rval = 0;
+  struct stat st;
+
+  lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n",
+                  nonempty (path));
+  if ((!path) || (!*path))
+    return 0;
+
+  if (stat (path, &st) >= 0)
+    {
+      rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR);
+    }
+  return rval;
+}
+
+/* Searches for the full path of the wrapper.  Returns
+   newly allocated full path name if found, NULL otherwise
+   Does not chase symlinks, even on platforms that support them.
+*/
+char *
+find_executable (const char *wrapper)
+{
+  int has_slash = 0;
+  const char *p;
+  const char *p_next;
+  /* static buffer for getcwd */
+  char tmp[LT_PATHMAX + 1];
+  size_t tmp_len;
+  char *concat_name;
+
+  lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n",
+                  nonempty (wrapper));
+
+  if ((wrapper == NULL) || (*wrapper == '\0'))
+    return NULL;
+
+  /* Absolute path? */
+#if defined HAVE_DOS_BASED_FILE_SYSTEM
+  if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':')
+    {
+      concat_name = xstrdup (wrapper);
+      if (check_executable (concat_name))
+	return concat_name;
+      XFREE (concat_name);
+    }
+  else
+    {
+#endif
+      if (IS_DIR_SEPARATOR (wrapper[0]))
+	{
+	  concat_name = xstrdup (wrapper);
+	  if (check_executable (concat_name))
+	    return concat_name;
+	  XFREE (concat_name);
+	}
+#if defined HAVE_DOS_BASED_FILE_SYSTEM
+    }
+#endif
+
+  for (p = wrapper; *p; p++)
+    if (*p == '/')
+      {
+	has_slash = 1;
+	break;
+      }
+  if (!has_slash)
+    {
+      /* no slashes; search PATH */
+      const char *path = getenv ("PATH");
+      if (path != NULL)
+	{
+	  for (p = path; *p; p = p_next)
+	    {
+	      const char *q;
+	      size_t p_len;
+	      for (q = p; *q; q++)
+		if (IS_PATH_SEPARATOR (*q))
+		  break;
+	      p_len = (size_t) (q - p);
+	      p_next = (*q == '\0' ? q : q + 1);
+	      if (p_len == 0)
+		{
+		  /* empty path: current directory */
+		  if (getcwd (tmp, LT_PATHMAX) == NULL)
+		    lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+                              nonnull (strerror (errno)));
+		  tmp_len = strlen (tmp);
+		  concat_name =
+		    XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+		  memcpy (concat_name, tmp, tmp_len);
+		  concat_name[tmp_len] = '/';
+		  strcpy (concat_name + tmp_len + 1, wrapper);
+		}
+	      else
+		{
+		  concat_name =
+		    XMALLOC (char, p_len + 1 + strlen (wrapper) + 1);
+		  memcpy (concat_name, p, p_len);
+		  concat_name[p_len] = '/';
+		  strcpy (concat_name + p_len + 1, wrapper);
+		}
+	      if (check_executable (concat_name))
+		return concat_name;
+	      XFREE (concat_name);
+	    }
+	}
+      /* not found in PATH; assume curdir */
+    }
+  /* Relative path | not found in path: prepend cwd */
+  if (getcwd (tmp, LT_PATHMAX) == NULL)
+    lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+              nonnull (strerror (errno)));
+  tmp_len = strlen (tmp);
+  concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+  memcpy (concat_name, tmp, tmp_len);
+  concat_name[tmp_len] = '/';
+  strcpy (concat_name + tmp_len + 1, wrapper);
+
+  if (check_executable (concat_name))
+    return concat_name;
+  XFREE (concat_name);
+  return NULL;
+}
+
+char *
+chase_symlinks (const char *pathspec)
+{
+#ifndef S_ISLNK
+  return xstrdup (pathspec);
+#else
+  char buf[LT_PATHMAX];
+  struct stat s;
+  char *tmp_pathspec = xstrdup (pathspec);
+  char *p;
+  int has_symlinks = 0;
+  while (strlen (tmp_pathspec) && !has_symlinks)
+    {
+      lt_debugprintf (__FILE__, __LINE__,
+		      "checking path component for symlinks: %s\n",
+		      tmp_pathspec);
+      if (lstat (tmp_pathspec, &s) == 0)
+	{
+	  if (S_ISLNK (s.st_mode) != 0)
+	    {
+	      has_symlinks = 1;
+	      break;
+	    }
+
+	  /* search backwards for last DIR_SEPARATOR */
+	  p = tmp_pathspec + strlen (tmp_pathspec) - 1;
+	  while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+	    p--;
+	  if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+	    {
+	      /* no more DIR_SEPARATORS left */
+	      break;
+	    }
+	  *p = '\0';
+	}
+      else
+	{
+	  lt_fatal (__FILE__, __LINE__,
+		    "error accessing file \"%s\": %s",
+		    tmp_pathspec, nonnull (strerror (errno)));
+	}
+    }
+  XFREE (tmp_pathspec);
+
+  if (!has_symlinks)
+    {
+      return xstrdup (pathspec);
+    }
+
+  tmp_pathspec = realpath (pathspec, buf);
+  if (tmp_pathspec == 0)
+    {
+      lt_fatal (__FILE__, __LINE__,
+		"could not follow symlinks for %s", pathspec);
+    }
+  return xstrdup (tmp_pathspec);
+#endif
+}
+
+char *
+strendzap (char *str, const char *pat)
+{
+  size_t len, patlen;
+
+  assert (str != NULL);
+  assert (pat != NULL);
+
+  len = strlen (str);
+  patlen = strlen (pat);
+
+  if (patlen <= len)
+    {
+      str += len - patlen;
+      if (STREQ (str, pat))
+	*str = '\0';
+    }
+  return str;
+}
+
+void
+lt_debugprintf (const char *file, int line, const char *fmt, ...)
+{
+  va_list args;
+  if (lt_debug)
+    {
+      (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line);
+      va_start (args, fmt);
+      (void) vfprintf (stderr, fmt, args);
+      va_end (args);
+    }
+}
+
+static void
+lt_error_core (int exit_status, const char *file,
+	       int line, const char *mode,
+	       const char *message, va_list ap)
+{
+  fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode);
+  vfprintf (stderr, message, ap);
+  fprintf (stderr, ".\n");
+
+  if (exit_status >= 0)
+    exit (exit_status);
+}
+
+void
+lt_fatal (const char *file, int line, const char *message, ...)
+{
+  va_list ap;
+  va_start (ap, message);
+  lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap);
+  va_end (ap);
+}
+
+static const char *
+nonnull (const char *s)
+{
+  return s ? s : "(null)";
+}
+
+static const char *
+nonempty (const char *s)
+{
+  return (s && !*s) ? "(empty)" : nonnull (s);
+}
+
+void
+lt_setenv (const char *name, const char *value)
+{
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(lt_setenv) setting '%s' to '%s'\n",
+                  nonnull (name), nonnull (value));
+  {
+#ifdef HAVE_SETENV
+    /* always make a copy, for consistency with !HAVE_SETENV */
+    char *str = xstrdup (value);
+    setenv (name, str, 1);
+#else
+    size_t len = strlen (name) + 1 + strlen (value) + 1;
+    char *str = XMALLOC (char, len);
+    sprintf (str, "%s=%s", name, value);
+    if (putenv (str) != EXIT_SUCCESS)
+      {
+        XFREE (str);
+      }
+#endif
+  }
+}
+
+char *
+lt_extend_str (const char *orig_value, const char *add, int to_end)
+{
+  char *new_value;
+  if (orig_value && *orig_value)
+    {
+      size_t orig_value_len = strlen (orig_value);
+      size_t add_len = strlen (add);
+      new_value = XMALLOC (char, add_len + orig_value_len + 1);
+      if (to_end)
+        {
+          strcpy (new_value, orig_value);
+          strcpy (new_value + orig_value_len, add);
+        }
+      else
+        {
+          strcpy (new_value, add);
+          strcpy (new_value + add_len, orig_value);
+        }
+    }
+  else
+    {
+      new_value = xstrdup (add);
+    }
+  return new_value;
+}
+
+void
+lt_update_exe_path (const char *name, const char *value)
+{
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(lt_update_exe_path) modifying '%s' by prepending '%s'\n",
+                  nonnull (name), nonnull (value));
+
+  if (name && *name && value && *value)
+    {
+      char *new_value = lt_extend_str (getenv (name), value, 0);
+      /* some systems can't cope with a ':'-terminated path #' */
+      size_t len = strlen (new_value);
+      while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1]))
+        {
+          new_value[--len] = '\0';
+        }
+      lt_setenv (name, new_value);
+      XFREE (new_value);
+    }
+}
+
+void
+lt_update_lib_path (const char *name, const char *value)
+{
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(lt_update_lib_path) modifying '%s' by prepending '%s'\n",
+                  nonnull (name), nonnull (value));
+
+  if (name && *name && value && *value)
+    {
+      char *new_value = lt_extend_str (getenv (name), value, 0);
+      lt_setenv (name, new_value);
+      XFREE (new_value);
+    }
+}
+
+EOF
+	    case $host_os in
+	      mingw*)
+		cat <<"EOF"
+
+/* Prepares an argument vector before calling spawn().
+   Note that spawn() does not by itself call the command interpreter
+     (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") :
+      ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+         GetVersionEx(&v);
+         v.dwPlatformId == VER_PLATFORM_WIN32_NT;
+      }) ? "cmd.exe" : "command.com").
+   Instead it simply concatenates the arguments, separated by ' ', and calls
+   CreateProcess().  We must quote the arguments since Win32 CreateProcess()
+   interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a
+   special way:
+   - Space and tab are interpreted as delimiters. They are not treated as
+     delimiters if they are surrounded by double quotes: "...".
+   - Unescaped double quotes are removed from the input. Their only effect is
+     that within double quotes, space and tab are treated like normal
+     characters.
+   - Backslashes not followed by double quotes are not special.
+   - But 2*n+1 backslashes followed by a double quote become
+     n backslashes followed by a double quote (n >= 0):
+       \" -> "
+       \\\" -> \"
+       \\\\\" -> \\"
+ */
+#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+char **
+prepare_spawn (char **argv)
+{
+  size_t argc;
+  char **new_argv;
+  size_t i;
+
+  /* Count number of arguments.  */
+  for (argc = 0; argv[argc] != NULL; argc++)
+    ;
+
+  /* Allocate new argument vector.  */
+  new_argv = XMALLOC (char *, argc + 1);
+
+  /* Put quoted arguments into the new argument vector.  */
+  for (i = 0; i < argc; i++)
+    {
+      const char *string = argv[i];
+
+      if (string[0] == '\0')
+	new_argv[i] = xstrdup ("\"\"");
+      else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL)
+	{
+	  int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL);
+	  size_t length;
+	  unsigned int backslashes;
+	  const char *s;
+	  char *quoted_string;
+	  char *p;
+
+	  length = 0;
+	  backslashes = 0;
+	  if (quote_around)
+	    length++;
+	  for (s = string; *s != '\0'; s++)
+	    {
+	      char c = *s;
+	      if (c == '"')
+		length += backslashes + 1;
+	      length++;
+	      if (c == '\\')
+		backslashes++;
+	      else
+		backslashes = 0;
+	    }
+	  if (quote_around)
+	    length += backslashes + 1;
+
+	  quoted_string = XMALLOC (char, length + 1);
+
+	  p = quoted_string;
+	  backslashes = 0;
+	  if (quote_around)
+	    *p++ = '"';
+	  for (s = string; *s != '\0'; s++)
+	    {
+	      char c = *s;
+	      if (c == '"')
+		{
+		  unsigned int j;
+		  for (j = backslashes + 1; j > 0; j--)
+		    *p++ = '\\';
+		}
+	      *p++ = c;
+	      if (c == '\\')
+		backslashes++;
+	      else
+		backslashes = 0;
+	    }
+	  if (quote_around)
+	    {
+	      unsigned int j;
+	      for (j = backslashes; j > 0; j--)
+		*p++ = '\\';
+	      *p++ = '"';
+	    }
+	  *p = '\0';
+
+	  new_argv[i] = quoted_string;
+	}
+      else
+	new_argv[i] = (char *) string;
+    }
+  new_argv[argc] = NULL;
+
+  return new_argv;
+}
+EOF
+		;;
+	    esac
+
+            cat <<"EOF"
+void lt_dump_script (FILE* f)
+{
+EOF
+	    func_emit_wrapper yes |
+	      $SED -n -e '
+s/^\(.\{79\}\)\(..*\)/\1\
+\2/
+h
+s/\([\\"]\)/\\\1/g
+s/$/\\n/
+s/\([^\n]*\).*/  fputs ("\1", f);/p
+g
+D'
+            cat <<"EOF"
+}
+EOF
+}
+# end: func_emit_cwrapperexe_src
+
+# func_win32_import_lib_p ARG
+# True if ARG is an import lib, as indicated by $file_magic_cmd
+func_win32_import_lib_p ()
+{
+    $debug_cmd
+
+    case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in
+    *import*) : ;;
+    *) false ;;
+    esac
+}
+
+# func_suncc_cstd_abi
+# !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!!
+# Several compiler flags select an ABI that is incompatible with the
+# Cstd library. Avoid specifying it if any are in CXXFLAGS.
+func_suncc_cstd_abi ()
+{
+    $debug_cmd
+
+    case " $compile_command " in
+    *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*)
+      suncc_use_cstd_abi=no
+      ;;
+    *)
+      suncc_use_cstd_abi=yes
+      ;;
+    esac
+}
+
+# func_mode_link arg...
+func_mode_link ()
+{
+    $debug_cmd
+
+    case $host in
+    *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+      # It is impossible to link a dll without this setting, and
+      # we shouldn't force the makefile maintainer to figure out
+      # what system we are compiling for in order to pass an extra
+      # flag for every libtool invocation.
+      # allow_undefined=no
+
+      # FIXME: Unfortunately, there are problems with the above when trying
+      # to make a dll that has undefined symbols, in which case not
+      # even a static library is built.  For now, we need to specify
+      # -no-undefined on the libtool link line when we can be certain
+      # that all symbols are satisfied, otherwise we get a static library.
+      allow_undefined=yes
+      ;;
+    *)
+      allow_undefined=yes
+      ;;
+    esac
+    libtool_args=$nonopt
+    base_compile="$nonopt $@"
+    compile_command=$nonopt
+    finalize_command=$nonopt
+
+    compile_rpath=
+    finalize_rpath=
+    compile_shlibpath=
+    finalize_shlibpath=
+    convenience=
+    old_convenience=
+    deplibs=
+    old_deplibs=
+    compiler_flags=
+    linker_flags=
+    dllsearchpath=
+    lib_search_path=`pwd`
+    inst_prefix_dir=
+    new_inherited_linker_flags=
+
+    avoid_version=no
+    bindir=
+    dlfiles=
+    dlprefiles=
+    dlself=no
+    export_dynamic=no
+    export_symbols=
+    export_symbols_regex=
+    generated=
+    libobjs=
+    ltlibs=
+    module=no
+    no_install=no
+    objs=
+    os2dllname=
+    non_pic_objects=
+    precious_files_regex=
+    prefer_static_libs=no
+    preload=false
+    prev=
+    prevarg=
+    release=
+    rpath=
+    xrpath=
+    perm_rpath=
+    temp_rpath=
+    thread_safe=no
+    vinfo=
+    vinfo_number=no
+    weak_libs=
+    single_module=$wl-single_module
+    func_infer_tag $base_compile
+
+    # We need to know -static, to get the right output filenames.
+    for arg
+    do
+      case $arg in
+      -shared)
+	test yes != "$build_libtool_libs" \
+	  && func_fatal_configuration "cannot build a shared library"
+	build_old_libs=no
+	break
+	;;
+      -all-static | -static | -static-libtool-libs)
+	case $arg in
+	-all-static)
+	  if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then
+	    func_warning "complete static linking is impossible in this configuration"
+	  fi
+	  if test -n "$link_static_flag"; then
+	    dlopen_self=$dlopen_self_static
+	  fi
+	  prefer_static_libs=yes
+	  ;;
+	-static)
+	  if test -z "$pic_flag" && test -n "$link_static_flag"; then
+	    dlopen_self=$dlopen_self_static
+	  fi
+	  prefer_static_libs=built
+	  ;;
+	-static-libtool-libs)
+	  if test -z "$pic_flag" && test -n "$link_static_flag"; then
+	    dlopen_self=$dlopen_self_static
+	  fi
+	  prefer_static_libs=yes
+	  ;;
+	esac
+	build_libtool_libs=no
+	build_old_libs=yes
+	break
+	;;
+      esac
+    done
+
+    # See if our shared archives depend on static archives.
+    test -n "$old_archive_from_new_cmds" && build_old_libs=yes
+
+    # Go through the arguments, transforming them on the way.
+    while test "$#" -gt 0; do
+      arg=$1
+      shift
+      func_quote_for_eval "$arg"
+      qarg=$func_quote_for_eval_unquoted_result
+      func_append libtool_args " $func_quote_for_eval_result"
+
+      # If the previous option needs an argument, assign it.
+      if test -n "$prev"; then
+	case $prev in
+	output)
+	  func_append compile_command " @OUTPUT@"
+	  func_append finalize_command " @OUTPUT@"
+	  ;;
+	esac
+
+	case $prev in
+	bindir)
+	  bindir=$arg
+	  prev=
+	  continue
+	  ;;
+	dlfiles|dlprefiles)
+	  $preload || {
+	    # Add the symbol object into the linking commands.
+	    func_append compile_command " @SYMFILE@"
+	    func_append finalize_command " @SYMFILE@"
+	    preload=:
+	  }
+	  case $arg in
+	  *.la | *.lo) ;;  # We handle these cases below.
+	  force)
+	    if test no = "$dlself"; then
+	      dlself=needless
+	      export_dynamic=yes
+	    fi
+	    prev=
+	    continue
+	    ;;
+	  self)
+	    if test dlprefiles = "$prev"; then
+	      dlself=yes
+	    elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then
+	      dlself=yes
+	    else
+	      dlself=needless
+	      export_dynamic=yes
+	    fi
+	    prev=
+	    continue
+	    ;;
+	  *)
+	    if test dlfiles = "$prev"; then
+	      func_append dlfiles " $arg"
+	    else
+	      func_append dlprefiles " $arg"
+	    fi
+	    prev=
+	    continue
+	    ;;
+	  esac
+	  ;;
+	expsyms)
+	  export_symbols=$arg
+	  test -f "$arg" \
+	    || func_fatal_error "symbol file '$arg' does not exist"
+	  prev=
+	  continue
+	  ;;
+	expsyms_regex)
+	  export_symbols_regex=$arg
+	  prev=
+	  continue
+	  ;;
+	framework)
+	  case $host in
+	    *-*-darwin*)
+	      case "$deplibs " in
+		*" $qarg.ltframework "*) ;;
+		*) func_append deplibs " $qarg.ltframework" # this is fixed later
+		   ;;
+	      esac
+	      ;;
+	  esac
+	  prev=
+	  continue
+	  ;;
+	inst_prefix)
+	  inst_prefix_dir=$arg
+	  prev=
+	  continue
+	  ;;
+	mllvm)
+	  # Clang does not use LLVM to link, so we can simply discard any
+	  # '-mllvm $arg' options when doing the link step.
+	  prev=
+	  continue
+	  ;;
+	objectlist)
+	  if test -f "$arg"; then
+	    save_arg=$arg
+	    moreargs=
+	    for fil in `cat "$save_arg"`
+	    do
+#	      func_append moreargs " $fil"
+	      arg=$fil
+	      # A libtool-controlled object.
+
+	      # Check to see that this really is a libtool object.
+	      if func_lalib_unsafe_p "$arg"; then
+		pic_object=
+		non_pic_object=
+
+		# Read the .lo file
+		func_source "$arg"
+
+		if test -z "$pic_object" ||
+		   test -z "$non_pic_object" ||
+		   test none = "$pic_object" &&
+		   test none = "$non_pic_object"; then
+		  func_fatal_error "cannot find name of object for '$arg'"
+		fi
+
+		# Extract subdirectory from the argument.
+		func_dirname "$arg" "/" ""
+		xdir=$func_dirname_result
+
+		if test none != "$pic_object"; then
+		  # Prepend the subdirectory the object is found in.
+		  pic_object=$xdir$pic_object
+
+		  if test dlfiles = "$prev"; then
+		    if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then
+		      func_append dlfiles " $pic_object"
+		      prev=
+		      continue
+		    else
+		      # If libtool objects are unsupported, then we need to preload.
+		      prev=dlprefiles
+		    fi
+		  fi
+
+		  # CHECK ME:  I think I busted this.  -Ossama
+		  if test dlprefiles = "$prev"; then
+		    # Preload the old-style object.
+		    func_append dlprefiles " $pic_object"
+		    prev=
+		  fi
+
+		  # A PIC object.
+		  func_append libobjs " $pic_object"
+		  arg=$pic_object
+		fi
+
+		# Non-PIC object.
+		if test none != "$non_pic_object"; then
+		  # Prepend the subdirectory the object is found in.
+		  non_pic_object=$xdir$non_pic_object
+
+		  # A standard non-PIC object
+		  func_append non_pic_objects " $non_pic_object"
+		  if test -z "$pic_object" || test none = "$pic_object"; then
+		    arg=$non_pic_object
+		  fi
+		else
+		  # If the PIC object exists, use it instead.
+		  # $xdir was prepended to $pic_object above.
+		  non_pic_object=$pic_object
+		  func_append non_pic_objects " $non_pic_object"
+		fi
+	      else
+		# Only an error if not doing a dry-run.
+		if $opt_dry_run; then
+		  # Extract subdirectory from the argument.
+		  func_dirname "$arg" "/" ""
+		  xdir=$func_dirname_result
+
+		  func_lo2o "$arg"
+		  pic_object=$xdir$objdir/$func_lo2o_result
+		  non_pic_object=$xdir$func_lo2o_result
+		  func_append libobjs " $pic_object"
+		  func_append non_pic_objects " $non_pic_object"
+	        else
+		  func_fatal_error "'$arg' is not a valid libtool object"
+		fi
+	      fi
+	    done
+	  else
+	    func_fatal_error "link input file '$arg' does not exist"
+	  fi
+	  arg=$save_arg
+	  prev=
+	  continue
+	  ;;
+	os2dllname)
+	  os2dllname=$arg
+	  prev=
+	  continue
+	  ;;
+	precious_regex)
+	  precious_files_regex=$arg
+	  prev=
+	  continue
+	  ;;
+	release)
+	  release=-$arg
+	  prev=
+	  continue
+	  ;;
+	rpath | xrpath)
+	  # We need an absolute path.
+	  case $arg in
+	  [\\/]* | [A-Za-z]:[\\/]*) ;;
+	  *)
+	    func_fatal_error "only absolute run-paths are allowed"
+	    ;;
+	  esac
+	  if test rpath = "$prev"; then
+	    case "$rpath " in
+	    *" $arg "*) ;;
+	    *) func_append rpath " $arg" ;;
+	    esac
+	  else
+	    case "$xrpath " in
+	    *" $arg "*) ;;
+	    *) func_append xrpath " $arg" ;;
+	    esac
+	  fi
+	  prev=
+	  continue
+	  ;;
+	shrext)
+	  shrext_cmds=$arg
+	  prev=
+	  continue
+	  ;;
+	weak)
+	  func_append weak_libs " $arg"
+	  prev=
+	  continue
+	  ;;
+	xcclinker)
+	  func_append linker_flags " $qarg"
+	  func_append compiler_flags " $qarg"
+	  prev=
+	  func_append compile_command " $qarg"
+	  func_append finalize_command " $qarg"
+	  continue
+	  ;;
+	xcompiler)
+	  func_append compiler_flags " $qarg"
+	  prev=
+	  func_append compile_command " $qarg"
+	  func_append finalize_command " $qarg"
+	  continue
+	  ;;
+	xlinker)
+	  func_append linker_flags " $qarg"
+	  func_append compiler_flags " $wl$qarg"
+	  prev=
+	  func_append compile_command " $wl$qarg"
+	  func_append finalize_command " $wl$qarg"
+	  continue
+	  ;;
+	*)
+	  eval "$prev=\"\$arg\""
+	  prev=
+	  continue
+	  ;;
+	esac
+      fi # test -n "$prev"
+
+      prevarg=$arg
+
+      case $arg in
+      -all-static)
+	if test -n "$link_static_flag"; then
+	  # See comment for -static flag below, for more details.
+	  func_append compile_command " $link_static_flag"
+	  func_append finalize_command " $link_static_flag"
+	fi
+	continue
+	;;
+
+      -allow-undefined)
+	# FIXME: remove this flag sometime in the future.
+	func_fatal_error "'-allow-undefined' must not be used because it is the default"
+	;;
+
+      -avoid-version)
+	avoid_version=yes
+	continue
+	;;
+
+      -bindir)
+	prev=bindir
+	continue
+	;;
+
+      -dlopen)
+	prev=dlfiles
+	continue
+	;;
+
+      -dlpreopen)
+	prev=dlprefiles
+	continue
+	;;
+
+      -export-dynamic)
+	export_dynamic=yes
+	continue
+	;;
+
+      -export-symbols | -export-symbols-regex)
+	if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+	  func_fatal_error "more than one -exported-symbols argument is not allowed"
+	fi
+	if test X-export-symbols = "X$arg"; then
+	  prev=expsyms
+	else
+	  prev=expsyms_regex
+	fi
+	continue
+	;;
+
+      -framework)
+	prev=framework
+	continue
+	;;
+
+      -inst-prefix-dir)
+	prev=inst_prefix
+	continue
+	;;
+
+      # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
+      # so, if we see these flags be careful not to treat them like -L
+      -L[A-Z][A-Z]*:*)
+	case $with_gcc/$host in
+	no/*-*-irix* | /*-*-irix*)
+	  func_append compile_command " $arg"
+	  func_append finalize_command " $arg"
+	  ;;
+	esac
+	continue
+	;;
+
+      -L*)
+	func_stripname "-L" '' "$arg"
+	if test -z "$func_stripname_result"; then
+	  if test "$#" -gt 0; then
+	    func_fatal_error "require no space between '-L' and '$1'"
+	  else
+	    func_fatal_error "need path for '-L' option"
+	  fi
+	fi
+	func_resolve_sysroot "$func_stripname_result"
+	dir=$func_resolve_sysroot_result
+	# We need an absolute path.
+	case $dir in
+	[\\/]* | [A-Za-z]:[\\/]*) ;;
+	*)
+	  absdir=`cd "$dir" && pwd`
+	  test -z "$absdir" && \
+	    func_fatal_error "cannot determine absolute directory name of '$dir'"
+	  dir=$absdir
+	  ;;
+	esac
+	case "$deplibs " in
+	*" -L$dir "* | *" $arg "*)
+	  # Will only happen for absolute or sysroot arguments
+	  ;;
+	*)
+	  # Preserve sysroot, but never include relative directories
+	  case $dir in
+	    [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;;
+	    *) func_append deplibs " -L$dir" ;;
+	  esac
+	  func_append lib_search_path " $dir"
+	  ;;
+	esac
+	case $host in
+	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+	  testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'`
+	  case :$dllsearchpath: in
+	  *":$dir:"*) ;;
+	  ::) dllsearchpath=$dir;;
+	  *) func_append dllsearchpath ":$dir";;
+	  esac
+	  case :$dllsearchpath: in
+	  *":$testbindir:"*) ;;
+	  ::) dllsearchpath=$testbindir;;
+	  *) func_append dllsearchpath ":$testbindir";;
+	  esac
+	  ;;
+	esac
+	continue
+	;;
+
+      -l*)
+	if test X-lc = "X$arg" || test X-lm = "X$arg"; then
+	  case $host in
+	  *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*)
+	    # These systems don't actually have a C or math library (as such)
+	    continue
+	    ;;
+	  *-*-os2*)
+	    # These systems don't actually have a C library (as such)
+	    test X-lc = "X$arg" && continue
+	    ;;
+	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*)
+	    # Do not include libc due to us having libc/libc_r.
+	    test X-lc = "X$arg" && continue
+	    ;;
+	  *-*-rhapsody* | *-*-darwin1.[012])
+	    # Rhapsody C and math libraries are in the System framework
+	    func_append deplibs " System.ltframework"
+	    continue
+	    ;;
+	  *-*-sco3.2v5* | *-*-sco5v6*)
+	    # Causes problems with __ctype
+	    test X-lc = "X$arg" && continue
+	    ;;
+	  *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+	    # Compiler inserts libc in the correct place for threads to work
+	    test X-lc = "X$arg" && continue
+	    ;;
+	  esac
+	elif test X-lc_r = "X$arg"; then
+	 case $host in
+	 *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*)
+	   # Do not include libc_r directly, use -pthread flag.
+	   continue
+	   ;;
+	 esac
+	fi
+	func_append deplibs " $arg"
+	continue
+	;;
+
+      -mllvm)
+	prev=mllvm
+	continue
+	;;
+
+      -module)
+	module=yes
+	continue
+	;;
+
+      # Tru64 UNIX uses -model [arg] to determine the layout of C++
+      # classes, name mangling, and exception handling.
+      # Darwin uses the -arch flag to determine output architecture.
+      -model|-arch|-isysroot|--sysroot)
+	func_append compiler_flags " $arg"
+	func_append compile_command " $arg"
+	func_append finalize_command " $arg"
+	prev=xcompiler
+	continue
+	;;
+
+      -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+      |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
+	func_append compiler_flags " $arg"
+	func_append compile_command " $arg"
+	func_append finalize_command " $arg"
+	case "$new_inherited_linker_flags " in
+	    *" $arg "*) ;;
+	    * ) func_append new_inherited_linker_flags " $arg" ;;
+	esac
+	continue
+	;;
+
+      -multi_module)
+	single_module=$wl-multi_module
+	continue
+	;;
+
+      -no-fast-install)
+	fast_install=no
+	continue
+	;;
+
+      -no-install)
+	case $host in
+	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*)
+	  # The PATH hackery in wrapper scripts is required on Windows
+	  # and Darwin in order for the loader to find any dlls it needs.
+	  func_warning "'-no-install' is ignored for $host"
+	  func_warning "assuming '-no-fast-install' instead"
+	  fast_install=no
+	  ;;
+	*) no_install=yes ;;
+	esac
+	continue
+	;;
+
+      -no-undefined)
+	allow_undefined=no
+	continue
+	;;
+
+      -objectlist)
+	prev=objectlist
+	continue
+	;;
+
+      -os2dllname)
+	prev=os2dllname
+	continue
+	;;
+
+      -o) prev=output ;;
+
+      -precious-files-regex)
+	prev=precious_regex
+	continue
+	;;
+
+      -release)
+	prev=release
+	continue
+	;;
+
+      -rpath)
+	prev=rpath
+	continue
+	;;
+
+      -R)
+	prev=xrpath
+	continue
+	;;
+
+      -R*)
+	func_stripname '-R' '' "$arg"
+	dir=$func_stripname_result
+	# We need an absolute path.
+	case $dir in
+	[\\/]* | [A-Za-z]:[\\/]*) ;;
+	=*)
+	  func_stripname '=' '' "$dir"
+	  dir=$lt_sysroot$func_stripname_result
+	  ;;
+	*)
+	  func_fatal_error "only absolute run-paths are allowed"
+	  ;;
+	esac
+	case "$xrpath " in
+	*" $dir "*) ;;
+	*) func_append xrpath " $dir" ;;
+	esac
+	continue
+	;;
+
+      -shared)
+	# The effects of -shared are defined in a previous loop.
+	continue
+	;;
+
+      -shrext)
+	prev=shrext
+	continue
+	;;
+
+      -static | -static-libtool-libs)
+	# The effects of -static are defined in a previous loop.
+	# We used to do the same as -all-static on platforms that
+	# didn't have a PIC flag, but the assumption that the effects
+	# would be equivalent was wrong.  It would break on at least
+	# Digital Unix and AIX.
+	continue
+	;;
+
+      -thread-safe)
+	thread_safe=yes
+	continue
+	;;
+
+      -version-info)
+	prev=vinfo
+	continue
+	;;
+
+      -version-number)
+	prev=vinfo
+	vinfo_number=yes
+	continue
+	;;
+
+      -weak)
+        prev=weak
+	continue
+	;;
+
+      -Wc,*)
+	func_stripname '-Wc,' '' "$arg"
+	args=$func_stripname_result
+	arg=
+	save_ifs=$IFS; IFS=,
+	for flag in $args; do
+	  IFS=$save_ifs
+          func_quote_for_eval "$flag"
+	  func_append arg " $func_quote_for_eval_result"
+	  func_append compiler_flags " $func_quote_for_eval_result"
+	done
+	IFS=$save_ifs
+	func_stripname ' ' '' "$arg"
+	arg=$func_stripname_result
+	;;
+
+      -Wl,*)
+	func_stripname '-Wl,' '' "$arg"
+	args=$func_stripname_result
+	arg=
+	save_ifs=$IFS; IFS=,
+	for flag in $args; do
+	  IFS=$save_ifs
+          func_quote_for_eval "$flag"
+	  func_append arg " $wl$func_quote_for_eval_result"
+	  func_append compiler_flags " $wl$func_quote_for_eval_result"
+	  func_append linker_flags " $func_quote_for_eval_result"
+	done
+	IFS=$save_ifs
+	func_stripname ' ' '' "$arg"
+	arg=$func_stripname_result
+	;;
+
+      -Xcompiler)
+	prev=xcompiler
+	continue
+	;;
+
+      -Xlinker)
+	prev=xlinker
+	continue
+	;;
+
+      -XCClinker)
+	prev=xcclinker
+	continue
+	;;
+
+      # -msg_* for osf cc
+      -msg_*)
+	func_quote_for_eval "$arg"
+	arg=$func_quote_for_eval_result
+	;;
+
+      # Flags to be passed through unchanged, with rationale:
+      # -64, -mips[0-9]      enable 64-bit mode for the SGI compiler
+      # -r[0-9][0-9]*        specify processor for the SGI compiler
+      # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler
+      # +DA*, +DD*           enable 64-bit mode for the HP compiler
+      # -q*                  compiler args for the IBM compiler
+      # -m*, -t[45]*, -txscale* architecture-specific flags for GCC
+      # -F/path              path to uninstalled frameworks, gcc on darwin
+      # -p, -pg, --coverage, -fprofile-*  profiling flags for GCC
+      # -fstack-protector*   stack protector flags for GCC
+      # @file                GCC response files
+      # -tp=*                Portland pgcc target processor selection
+      # --sysroot=*          for sysroot support
+      # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization
+      # -stdlib=*            select c++ std lib with clang
+      -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \
+      -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \
+      -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*)
+        func_quote_for_eval "$arg"
+	arg=$func_quote_for_eval_result
+        func_append compile_command " $arg"
+        func_append finalize_command " $arg"
+        func_append compiler_flags " $arg"
+        continue
+        ;;
+
+      -Z*)
+        if test os2 = "`expr $host : '.*\(os2\)'`"; then
+          # OS/2 uses -Zxxx to specify OS/2-specific options
+	  compiler_flags="$compiler_flags $arg"
+	  func_append compile_command " $arg"
+	  func_append finalize_command " $arg"
+	  case $arg in
+	  -Zlinker | -Zstack)
+	    prev=xcompiler
+	    ;;
+	  esac
+	  continue
+        else
+	  # Otherwise treat like 'Some other compiler flag' below
+	  func_quote_for_eval "$arg"
+	  arg=$func_quote_for_eval_result
+        fi
+	;;
+
+      # Some other compiler flag.
+      -* | +*)
+        func_quote_for_eval "$arg"
+	arg=$func_quote_for_eval_result
+	;;
+
+      *.$objext)
+	# A standard object.
+	func_append objs " $arg"
+	;;
+
+      *.lo)
+	# A libtool-controlled object.
+
+	# Check to see that this really is a libtool object.
+	if func_lalib_unsafe_p "$arg"; then
+	  pic_object=
+	  non_pic_object=
+
+	  # Read the .lo file
+	  func_source "$arg"
+
+	  if test -z "$pic_object" ||
+	     test -z "$non_pic_object" ||
+	     test none = "$pic_object" &&
+	     test none = "$non_pic_object"; then
+	    func_fatal_error "cannot find name of object for '$arg'"
+	  fi
+
+	  # Extract subdirectory from the argument.
+	  func_dirname "$arg" "/" ""
+	  xdir=$func_dirname_result
+
+	  test none = "$pic_object" || {
+	    # Prepend the subdirectory the object is found in.
+	    pic_object=$xdir$pic_object
+
+	    if test dlfiles = "$prev"; then
+	      if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then
+		func_append dlfiles " $pic_object"
+		prev=
+		continue
+	      else
+		# If libtool objects are unsupported, then we need to preload.
+		prev=dlprefiles
+	      fi
+	    fi
+
+	    # CHECK ME:  I think I busted this.  -Ossama
+	    if test dlprefiles = "$prev"; then
+	      # Preload the old-style object.
+	      func_append dlprefiles " $pic_object"
+	      prev=
+	    fi
+
+	    # A PIC object.
+	    func_append libobjs " $pic_object"
+	    arg=$pic_object
+	  }
+
+	  # Non-PIC object.
+	  if test none != "$non_pic_object"; then
+	    # Prepend the subdirectory the object is found in.
+	    non_pic_object=$xdir$non_pic_object
+
+	    # A standard non-PIC object
+	    func_append non_pic_objects " $non_pic_object"
+	    if test -z "$pic_object" || test none = "$pic_object"; then
+	      arg=$non_pic_object
+	    fi
+	  else
+	    # If the PIC object exists, use it instead.
+	    # $xdir was prepended to $pic_object above.
+	    non_pic_object=$pic_object
+	    func_append non_pic_objects " $non_pic_object"
+	  fi
+	else
+	  # Only an error if not doing a dry-run.
+	  if $opt_dry_run; then
+	    # Extract subdirectory from the argument.
+	    func_dirname "$arg" "/" ""
+	    xdir=$func_dirname_result
+
+	    func_lo2o "$arg"
+	    pic_object=$xdir$objdir/$func_lo2o_result
+	    non_pic_object=$xdir$func_lo2o_result
+	    func_append libobjs " $pic_object"
+	    func_append non_pic_objects " $non_pic_object"
+	  else
+	    func_fatal_error "'$arg' is not a valid libtool object"
+	  fi
+	fi
+	;;
+
+      *.$libext)
+	# An archive.
+	func_append deplibs " $arg"
+	func_append old_deplibs " $arg"
+	continue
+	;;
+
+      *.la)
+	# A libtool-controlled library.
+
+	func_resolve_sysroot "$arg"
+	if test dlfiles = "$prev"; then
+	  # This library was specified with -dlopen.
+	  func_append dlfiles " $func_resolve_sysroot_result"
+	  prev=
+	elif test dlprefiles = "$prev"; then
+	  # The library was specified with -dlpreopen.
+	  func_append dlprefiles " $func_resolve_sysroot_result"
+	  prev=
+	else
+	  func_append deplibs " $func_resolve_sysroot_result"
+	fi
+	continue
+	;;
+
+      # Some other compiler argument.
+      *)
+	# Unknown arguments in both finalize_command and compile_command need
+	# to be aesthetically quoted because they are evaled later.
+	func_quote_for_eval "$arg"
+	arg=$func_quote_for_eval_result
+	;;
+      esac # arg
+
+      # Now actually substitute the argument into the commands.
+      if test -n "$arg"; then
+	func_append compile_command " $arg"
+	func_append finalize_command " $arg"
+      fi
+    done # argument parsing loop
+
+    test -n "$prev" && \
+      func_fatal_help "the '$prevarg' option requires an argument"
+
+    if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then
+      eval arg=\"$export_dynamic_flag_spec\"
+      func_append compile_command " $arg"
+      func_append finalize_command " $arg"
+    fi
+
+    oldlibs=
+    # calculate the name of the file, without its directory
+    func_basename "$output"
+    outputname=$func_basename_result
+    libobjs_save=$libobjs
+
+    if test -n "$shlibpath_var"; then
+      # get the directories listed in $shlibpath_var
+      eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\`
+    else
+      shlib_search_path=
+    fi
+    eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
+    eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
+
+    # Definition is injected by LT_CONFIG during libtool generation.
+    func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH"
+
+    func_dirname "$output" "/" ""
+    output_objdir=$func_dirname_result$objdir
+    func_to_tool_file "$output_objdir/"
+    tool_output_objdir=$func_to_tool_file_result
+    # Create the object directory.
+    func_mkdir_p "$output_objdir"
+
+    # Determine the type of output
+    case $output in
+    "")
+      func_fatal_help "you must specify an output file"
+      ;;
+    *.$libext) linkmode=oldlib ;;
+    *.lo | *.$objext) linkmode=obj ;;
+    *.la) linkmode=lib ;;
+    *) linkmode=prog ;; # Anything else should be a program.
+    esac
+
+    specialdeplibs=
+
+    libs=
+    # Find all interdependent deplibs by searching for libraries
+    # that are linked more than once (e.g. -la -lb -la)
+    for deplib in $deplibs; do
+      if $opt_preserve_dup_deps; then
+	case "$libs " in
+	*" $deplib "*) func_append specialdeplibs " $deplib" ;;
+	esac
+      fi
+      func_append libs " $deplib"
+    done
+
+    if test lib = "$linkmode"; then
+      libs="$predeps $libs $compiler_lib_search_path $postdeps"
+
+      # Compute libraries that are listed more than once in $predeps
+      # $postdeps and mark them as special (i.e., whose duplicates are
+      # not to be eliminated).
+      pre_post_deps=
+      if $opt_duplicate_compiler_generated_deps; then
+	for pre_post_dep in $predeps $postdeps; do
+	  case "$pre_post_deps " in
+	  *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;;
+	  esac
+	  func_append pre_post_deps " $pre_post_dep"
+	done
+      fi
+      pre_post_deps=
+    fi
+
+    deplibs=
+    newdependency_libs=
+    newlib_search_path=
+    need_relink=no # whether we're linking any uninstalled libtool libraries
+    notinst_deplibs= # not-installed libtool libraries
+    notinst_path= # paths that contain not-installed libtool libraries
+
+    case $linkmode in
+    lib)
+	passes="conv dlpreopen link"
+	for file in $dlfiles $dlprefiles; do
+	  case $file in
+	  *.la) ;;
+	  *)
+	    func_fatal_help "libraries can '-dlopen' only libtool libraries: $file"
+	    ;;
+	  esac
+	done
+	;;
+    prog)
+	compile_deplibs=
+	finalize_deplibs=
+	alldeplibs=false
+	newdlfiles=
+	newdlprefiles=
+	passes="conv scan dlopen dlpreopen link"
+	;;
+    *)  passes="conv"
+	;;
+    esac
+
+    for pass in $passes; do
+      # The preopen pass in lib mode reverses $deplibs; put it back here
+      # so that -L comes before libs that need it for instance...
+      if test lib,link = "$linkmode,$pass"; then
+	## FIXME: Find the place where the list is rebuilt in the wrong
+	##        order, and fix it there properly
+        tmp_deplibs=
+	for deplib in $deplibs; do
+	  tmp_deplibs="$deplib $tmp_deplibs"
+	done
+	deplibs=$tmp_deplibs
+      fi
+
+      if test lib,link = "$linkmode,$pass" ||
+	 test prog,scan = "$linkmode,$pass"; then
+	libs=$deplibs
+	deplibs=
+      fi
+      if test prog = "$linkmode"; then
+	case $pass in
+	dlopen) libs=$dlfiles ;;
+	dlpreopen) libs=$dlprefiles ;;
+	link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
+	esac
+      fi
+      if test lib,dlpreopen = "$linkmode,$pass"; then
+	# Collect and forward deplibs of preopened libtool libs
+	for lib in $dlprefiles; do
+	  # Ignore non-libtool-libs
+	  dependency_libs=
+	  func_resolve_sysroot "$lib"
+	  case $lib in
+	  *.la)	func_source "$func_resolve_sysroot_result" ;;
+	  esac
+
+	  # Collect preopened libtool deplibs, except any this library
+	  # has declared as weak libs
+	  for deplib in $dependency_libs; do
+	    func_basename "$deplib"
+            deplib_base=$func_basename_result
+	    case " $weak_libs " in
+	    *" $deplib_base "*) ;;
+	    *) func_append deplibs " $deplib" ;;
+	    esac
+	  done
+	done
+	libs=$dlprefiles
+      fi
+      if test dlopen = "$pass"; then
+	# Collect dlpreopened libraries
+	save_deplibs=$deplibs
+	deplibs=
+      fi
+
+      for deplib in $libs; do
+	lib=
+	found=false
+	case $deplib in
+	-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+        |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
+	  if test prog,link = "$linkmode,$pass"; then
+	    compile_deplibs="$deplib $compile_deplibs"
+	    finalize_deplibs="$deplib $finalize_deplibs"
+	  else
+	    func_append compiler_flags " $deplib"
+	    if test lib = "$linkmode"; then
+		case "$new_inherited_linker_flags " in
+		    *" $deplib "*) ;;
+		    * ) func_append new_inherited_linker_flags " $deplib" ;;
+		esac
+	    fi
+	  fi
+	  continue
+	  ;;
+	-l*)
+	  if test lib != "$linkmode" && test prog != "$linkmode"; then
+	    func_warning "'-l' is ignored for archives/objects"
+	    continue
+	  fi
+	  func_stripname '-l' '' "$deplib"
+	  name=$func_stripname_result
+	  if test lib = "$linkmode"; then
+	    searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path"
+	  else
+	    searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path"
+	  fi
+	  for searchdir in $searchdirs; do
+	    for search_ext in .la $std_shrext .so .a; do
+	      # Search the libtool library
+	      lib=$searchdir/lib$name$search_ext
+	      if test -f "$lib"; then
+		if test .la = "$search_ext"; then
+		  found=:
+		else
+		  found=false
+		fi
+		break 2
+	      fi
+	    done
+	  done
+	  if $found; then
+	    # deplib is a libtool library
+	    # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
+	    # We need to do some special things here, and not later.
+	    if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+	      case " $predeps $postdeps " in
+	      *" $deplib "*)
+		if func_lalib_p "$lib"; then
+		  library_names=
+		  old_library=
+		  func_source "$lib"
+		  for l in $old_library $library_names; do
+		    ll=$l
+		  done
+		  if test "X$ll" = "X$old_library"; then # only static version available
+		    found=false
+		    func_dirname "$lib" "" "."
+		    ladir=$func_dirname_result
+		    lib=$ladir/$old_library
+		    if test prog,link = "$linkmode,$pass"; then
+		      compile_deplibs="$deplib $compile_deplibs"
+		      finalize_deplibs="$deplib $finalize_deplibs"
+		    else
+		      deplibs="$deplib $deplibs"
+		      test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs"
+		    fi
+		    continue
+		  fi
+		fi
+		;;
+	      *) ;;
+	      esac
+	    fi
+	  else
+	    # deplib doesn't seem to be a libtool library
+	    if test prog,link = "$linkmode,$pass"; then
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    else
+	      deplibs="$deplib $deplibs"
+	      test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs"
+	    fi
+	    continue
+	  fi
+	  ;; # -l
+	*.ltframework)
+	  if test prog,link = "$linkmode,$pass"; then
+	    compile_deplibs="$deplib $compile_deplibs"
+	    finalize_deplibs="$deplib $finalize_deplibs"
+	  else
+	    deplibs="$deplib $deplibs"
+	    if test lib = "$linkmode"; then
+		case "$new_inherited_linker_flags " in
+		    *" $deplib "*) ;;
+		    * ) func_append new_inherited_linker_flags " $deplib" ;;
+		esac
+	    fi
+	  fi
+	  continue
+	  ;;
+	-L*)
+	  case $linkmode in
+	  lib)
+	    deplibs="$deplib $deplibs"
+	    test conv = "$pass" && continue
+	    newdependency_libs="$deplib $newdependency_libs"
+	    func_stripname '-L' '' "$deplib"
+	    func_resolve_sysroot "$func_stripname_result"
+	    func_append newlib_search_path " $func_resolve_sysroot_result"
+	    ;;
+	  prog)
+	    if test conv = "$pass"; then
+	      deplibs="$deplib $deplibs"
+	      continue
+	    fi
+	    if test scan = "$pass"; then
+	      deplibs="$deplib $deplibs"
+	    else
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    fi
+	    func_stripname '-L' '' "$deplib"
+	    func_resolve_sysroot "$func_stripname_result"
+	    func_append newlib_search_path " $func_resolve_sysroot_result"
+	    ;;
+	  *)
+	    func_warning "'-L' is ignored for archives/objects"
+	    ;;
+	  esac # linkmode
+	  continue
+	  ;; # -L
+	-R*)
+	  if test link = "$pass"; then
+	    func_stripname '-R' '' "$deplib"
+	    func_resolve_sysroot "$func_stripname_result"
+	    dir=$func_resolve_sysroot_result
+	    # Make sure the xrpath contains only unique directories.
+	    case "$xrpath " in
+	    *" $dir "*) ;;
+	    *) func_append xrpath " $dir" ;;
+	    esac
+	  fi
+	  deplibs="$deplib $deplibs"
+	  continue
+	  ;;
+	*.la)
+	  func_resolve_sysroot "$deplib"
+	  lib=$func_resolve_sysroot_result
+	  ;;
+	*.$libext)
+	  if test conv = "$pass"; then
+	    deplibs="$deplib $deplibs"
+	    continue
+	  fi
+	  case $linkmode in
+	  lib)
+	    # Linking convenience modules into shared libraries is allowed,
+	    # but linking other static libraries is non-portable.
+	    case " $dlpreconveniencelibs " in
+	    *" $deplib "*) ;;
+	    *)
+	      valid_a_lib=false
+	      case $deplibs_check_method in
+		match_pattern*)
+		  set dummy $deplibs_check_method; shift
+		  match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+		  if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \
+		    | $EGREP "$match_pattern_regex" > /dev/null; then
+		    valid_a_lib=:
+		  fi
+		;;
+		pass_all)
+		  valid_a_lib=:
+		;;
+	      esac
+	      if $valid_a_lib; then
+		echo
+		$ECHO "*** Warning: Linking the shared library $output against the"
+		$ECHO "*** static library $deplib is not portable!"
+		deplibs="$deplib $deplibs"
+	      else
+		echo
+		$ECHO "*** Warning: Trying to link with static lib archive $deplib."
+		echo "*** I have the capability to make that library automatically link in when"
+		echo "*** you link to this library.  But I can only do this if you have a"
+		echo "*** shared version of the library, which you do not appear to have"
+		echo "*** because the file extensions .$libext of this argument makes me believe"
+		echo "*** that it is just a static archive that I should not use here."
+	      fi
+	      ;;
+	    esac
+	    continue
+	    ;;
+	  prog)
+	    if test link != "$pass"; then
+	      deplibs="$deplib $deplibs"
+	    else
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    fi
+	    continue
+	    ;;
+	  esac # linkmode
+	  ;; # *.$libext
+	*.lo | *.$objext)
+	  if test conv = "$pass"; then
+	    deplibs="$deplib $deplibs"
+	  elif test prog = "$linkmode"; then
+	    if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then
+	      # If there is no dlopen support or we're linking statically,
+	      # we need to preload.
+	      func_append newdlprefiles " $deplib"
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    else
+	      func_append newdlfiles " $deplib"
+	    fi
+	  fi
+	  continue
+	  ;;
+	%DEPLIBS%)
+	  alldeplibs=:
+	  continue
+	  ;;
+	esac # case $deplib
+
+	$found || test -f "$lib" \
+	  || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'"
+
+	# Check to see that this really is a libtool archive.
+	func_lalib_unsafe_p "$lib" \
+	  || func_fatal_error "'$lib' is not a valid libtool archive"
+
+	func_dirname "$lib" "" "."
+	ladir=$func_dirname_result
+
+	dlname=
+	dlopen=
+	dlpreopen=
+	libdir=
+	library_names=
+	old_library=
+	inherited_linker_flags=
+	# If the library was installed with an old release of libtool,
+	# it will not redefine variables installed, or shouldnotlink
+	installed=yes
+	shouldnotlink=no
+	avoidtemprpath=
+
+
+	# Read the .la file
+	func_source "$lib"
+
+	# Convert "-framework foo" to "foo.ltframework"
+	if test -n "$inherited_linker_flags"; then
+	  tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'`
+	  for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do
+	    case " $new_inherited_linker_flags " in
+	      *" $tmp_inherited_linker_flag "*) ;;
+	      *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";;
+	    esac
+	  done
+	fi
+	dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	if test lib,link = "$linkmode,$pass" ||
+	   test prog,scan = "$linkmode,$pass" ||
+	   { test prog != "$linkmode" && test lib != "$linkmode"; }; then
+	  test -n "$dlopen" && func_append dlfiles " $dlopen"
+	  test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen"
+	fi
+
+	if test conv = "$pass"; then
+	  # Only check for convenience libraries
+	  deplibs="$lib $deplibs"
+	  if test -z "$libdir"; then
+	    if test -z "$old_library"; then
+	      func_fatal_error "cannot find name of link library for '$lib'"
+	    fi
+	    # It is a libtool convenience library, so add in its objects.
+	    func_append convenience " $ladir/$objdir/$old_library"
+	    func_append old_convenience " $ladir/$objdir/$old_library"
+	  elif test prog != "$linkmode" && test lib != "$linkmode"; then
+	    func_fatal_error "'$lib' is not a convenience library"
+	  fi
+	  tmp_libs=
+	  for deplib in $dependency_libs; do
+	    deplibs="$deplib $deplibs"
+	    if $opt_preserve_dup_deps; then
+	      case "$tmp_libs " in
+	      *" $deplib "*) func_append specialdeplibs " $deplib" ;;
+	      esac
+	    fi
+	    func_append tmp_libs " $deplib"
+	  done
+	  continue
+	fi # $pass = conv
+
+
+	# Get the name of the library we link against.
+	linklib=
+	if test -n "$old_library" &&
+	   { test yes = "$prefer_static_libs" ||
+	     test built,no = "$prefer_static_libs,$installed"; }; then
+	  linklib=$old_library
+	else
+	  for l in $old_library $library_names; do
+	    linklib=$l
+	  done
+	fi
+	if test -z "$linklib"; then
+	  func_fatal_error "cannot find name of link library for '$lib'"
+	fi
+
+	# This library was specified with -dlopen.
+	if test dlopen = "$pass"; then
+	  test -z "$libdir" \
+	    && func_fatal_error "cannot -dlopen a convenience library: '$lib'"
+	  if test -z "$dlname" ||
+	     test yes != "$dlopen_support" ||
+	     test no = "$build_libtool_libs"
+	  then
+	    # If there is no dlname, no dlopen support or we're linking
+	    # statically, we need to preload.  We also need to preload any
+	    # dependent libraries so libltdl's deplib preloader doesn't
+	    # bomb out in the load deplibs phase.
+	    func_append dlprefiles " $lib $dependency_libs"
+	  else
+	    func_append newdlfiles " $lib"
+	  fi
+	  continue
+	fi # $pass = dlopen
+
+	# We need an absolute path.
+	case $ladir in
+	[\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;;
+	*)
+	  abs_ladir=`cd "$ladir" && pwd`
+	  if test -z "$abs_ladir"; then
+	    func_warning "cannot determine absolute directory name of '$ladir'"
+	    func_warning "passing it literally to the linker, although it might fail"
+	    abs_ladir=$ladir
+	  fi
+	  ;;
+	esac
+	func_basename "$lib"
+	laname=$func_basename_result
+
+	# Find the relevant object directory and library name.
+	if test yes = "$installed"; then
+	  if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+	    func_warning "library '$lib' was moved."
+	    dir=$ladir
+	    absdir=$abs_ladir
+	    libdir=$abs_ladir
+	  else
+	    dir=$lt_sysroot$libdir
+	    absdir=$lt_sysroot$libdir
+	  fi
+	  test yes = "$hardcode_automatic" && avoidtemprpath=yes
+	else
+	  if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+	    dir=$ladir
+	    absdir=$abs_ladir
+	    # Remove this search path later
+	    func_append notinst_path " $abs_ladir"
+	  else
+	    dir=$ladir/$objdir
+	    absdir=$abs_ladir/$objdir
+	    # Remove this search path later
+	    func_append notinst_path " $abs_ladir"
+	  fi
+	fi # $installed = yes
+	func_stripname 'lib' '.la' "$laname"
+	name=$func_stripname_result
+
+	# This library was specified with -dlpreopen.
+	if test dlpreopen = "$pass"; then
+	  if test -z "$libdir" && test prog = "$linkmode"; then
+	    func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'"
+	  fi
+	  case $host in
+	    # special handling for platforms with PE-DLLs.
+	    *cygwin* | *mingw* | *cegcc* )
+	      # Linker will automatically link against shared library if both
+	      # static and shared are present.  Therefore, ensure we extract
+	      # symbols from the import library if a shared library is present
+	      # (otherwise, the dlopen module name will be incorrect).  We do
+	      # this by putting the import library name into $newdlprefiles.
+	      # We recover the dlopen module name by 'saving' the la file
+	      # name in a special purpose variable, and (later) extracting the
+	      # dlname from the la file.
+	      if test -n "$dlname"; then
+	        func_tr_sh "$dir/$linklib"
+	        eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname"
+	        func_append newdlprefiles " $dir/$linklib"
+	      else
+	        func_append newdlprefiles " $dir/$old_library"
+	        # Keep a list of preopened convenience libraries to check
+	        # that they are being used correctly in the link pass.
+	        test -z "$libdir" && \
+	          func_append dlpreconveniencelibs " $dir/$old_library"
+	      fi
+	    ;;
+	    * )
+	      # Prefer using a static library (so that no silly _DYNAMIC symbols
+	      # are required to link).
+	      if test -n "$old_library"; then
+	        func_append newdlprefiles " $dir/$old_library"
+	        # Keep a list of preopened convenience libraries to check
+	        # that they are being used correctly in the link pass.
+	        test -z "$libdir" && \
+	          func_append dlpreconveniencelibs " $dir/$old_library"
+	      # Otherwise, use the dlname, so that lt_dlopen finds it.
+	      elif test -n "$dlname"; then
+	        func_append newdlprefiles " $dir/$dlname"
+	      else
+	        func_append newdlprefiles " $dir/$linklib"
+	      fi
+	    ;;
+	  esac
+	fi # $pass = dlpreopen
+
+	if test -z "$libdir"; then
+	  # Link the convenience library
+	  if test lib = "$linkmode"; then
+	    deplibs="$dir/$old_library $deplibs"
+	  elif test prog,link = "$linkmode,$pass"; then
+	    compile_deplibs="$dir/$old_library $compile_deplibs"
+	    finalize_deplibs="$dir/$old_library $finalize_deplibs"
+	  else
+	    deplibs="$lib $deplibs" # used for prog,scan pass
+	  fi
+	  continue
+	fi
+
+
+	if test prog = "$linkmode" && test link != "$pass"; then
+	  func_append newlib_search_path " $ladir"
+	  deplibs="$lib $deplibs"
+
+	  linkalldeplibs=false
+	  if test no != "$link_all_deplibs" || test -z "$library_names" ||
+	     test no = "$build_libtool_libs"; then
+	    linkalldeplibs=:
+	  fi
+
+	  tmp_libs=
+	  for deplib in $dependency_libs; do
+	    case $deplib in
+	    -L*) func_stripname '-L' '' "$deplib"
+	         func_resolve_sysroot "$func_stripname_result"
+	         func_append newlib_search_path " $func_resolve_sysroot_result"
+		 ;;
+	    esac
+	    # Need to link against all dependency_libs?
+	    if $linkalldeplibs; then
+	      deplibs="$deplib $deplibs"
+	    else
+	      # Need to hardcode shared library paths
+	      # or/and link against static libraries
+	      newdependency_libs="$deplib $newdependency_libs"
+	    fi
+	    if $opt_preserve_dup_deps; then
+	      case "$tmp_libs " in
+	      *" $deplib "*) func_append specialdeplibs " $deplib" ;;
+	      esac
+	    fi
+	    func_append tmp_libs " $deplib"
+	  done # for deplib
+	  continue
+	fi # $linkmode = prog...
+
+	if test prog,link = "$linkmode,$pass"; then
+	  if test -n "$library_names" &&
+	     { { test no = "$prefer_static_libs" ||
+	         test built,yes = "$prefer_static_libs,$installed"; } ||
+	       test -z "$old_library"; }; then
+	    # We need to hardcode the library path
+	    if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then
+	      # Make sure the rpath contains only unique directories.
+	      case $temp_rpath: in
+	      *"$absdir:"*) ;;
+	      *) func_append temp_rpath "$absdir:" ;;
+	      esac
+	    fi
+
+	    # Hardcode the library path.
+	    # Skip directories that are in the system default run-time
+	    # search path.
+	    case " $sys_lib_dlsearch_path " in
+	    *" $absdir "*) ;;
+	    *)
+	      case "$compile_rpath " in
+	      *" $absdir "*) ;;
+	      *) func_append compile_rpath " $absdir" ;;
+	      esac
+	      ;;
+	    esac
+	    case " $sys_lib_dlsearch_path " in
+	    *" $libdir "*) ;;
+	    *)
+	      case "$finalize_rpath " in
+	      *" $libdir "*) ;;
+	      *) func_append finalize_rpath " $libdir" ;;
+	      esac
+	      ;;
+	    esac
+	  fi # $linkmode,$pass = prog,link...
+
+	  if $alldeplibs &&
+	     { test pass_all = "$deplibs_check_method" ||
+	       { test yes = "$build_libtool_libs" &&
+		 test -n "$library_names"; }; }; then
+	    # We only need to search for static libraries
+	    continue
+	  fi
+	fi
+
+	link_static=no # Whether the deplib will be linked statically
+	use_static_libs=$prefer_static_libs
+	if test built = "$use_static_libs" && test yes = "$installed"; then
+	  use_static_libs=no
+	fi
+	if test -n "$library_names" &&
+	   { test no = "$use_static_libs" || test -z "$old_library"; }; then
+	  case $host in
+	  *cygwin* | *mingw* | *cegcc* | *os2*)
+	      # No point in relinking DLLs because paths are not encoded
+	      func_append notinst_deplibs " $lib"
+	      need_relink=no
+	    ;;
+	  *)
+	    if test no = "$installed"; then
+	      func_append notinst_deplibs " $lib"
+	      need_relink=yes
+	    fi
+	    ;;
+	  esac
+	  # This is a shared library
+
+	  # Warn about portability, can't link against -module's on some
+	  # systems (darwin).  Don't bleat about dlopened modules though!
+	  dlopenmodule=
+	  for dlpremoduletest in $dlprefiles; do
+	    if test "X$dlpremoduletest" = "X$lib"; then
+	      dlopenmodule=$dlpremoduletest
+	      break
+	    fi
+	  done
+	  if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then
+	    echo
+	    if test prog = "$linkmode"; then
+	      $ECHO "*** Warning: Linking the executable $output against the loadable module"
+	    else
+	      $ECHO "*** Warning: Linking the shared library $output against the loadable module"
+	    fi
+	    $ECHO "*** $linklib is not portable!"
+	  fi
+	  if test lib = "$linkmode" &&
+	     test yes = "$hardcode_into_libs"; then
+	    # Hardcode the library path.
+	    # Skip directories that are in the system default run-time
+	    # search path.
+	    case " $sys_lib_dlsearch_path " in
+	    *" $absdir "*) ;;
+	    *)
+	      case "$compile_rpath " in
+	      *" $absdir "*) ;;
+	      *) func_append compile_rpath " $absdir" ;;
+	      esac
+	      ;;
+	    esac
+	    case " $sys_lib_dlsearch_path " in
+	    *" $libdir "*) ;;
+	    *)
+	      case "$finalize_rpath " in
+	      *" $libdir "*) ;;
+	      *) func_append finalize_rpath " $libdir" ;;
+	      esac
+	      ;;
+	    esac
+	  fi
+
+	  if test -n "$old_archive_from_expsyms_cmds"; then
+	    # figure out the soname
+	    set dummy $library_names
+	    shift
+	    realname=$1
+	    shift
+	    libname=`eval "\\$ECHO \"$libname_spec\""`
+	    # use dlname if we got it. it's perfectly good, no?
+	    if test -n "$dlname"; then
+	      soname=$dlname
+	    elif test -n "$soname_spec"; then
+	      # bleh windows
+	      case $host in
+	      *cygwin* | mingw* | *cegcc* | *os2*)
+	        func_arith $current - $age
+		major=$func_arith_result
+		versuffix=-$major
+		;;
+	      esac
+	      eval soname=\"$soname_spec\"
+	    else
+	      soname=$realname
+	    fi
+
+	    # Make a new name for the extract_expsyms_cmds to use
+	    soroot=$soname
+	    func_basename "$soroot"
+	    soname=$func_basename_result
+	    func_stripname 'lib' '.dll' "$soname"
+	    newlib=libimp-$func_stripname_result.a
+
+	    # If the library has no export list, then create one now
+	    if test -f "$output_objdir/$soname-def"; then :
+	    else
+	      func_verbose "extracting exported symbol list from '$soname'"
+	      func_execute_cmds "$extract_expsyms_cmds" 'exit $?'
+	    fi
+
+	    # Create $newlib
+	    if test -f "$output_objdir/$newlib"; then :; else
+	      func_verbose "generating import library for '$soname'"
+	      func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?'
+	    fi
+	    # make sure the library variables are pointing to the new library
+	    dir=$output_objdir
+	    linklib=$newlib
+	  fi # test -n "$old_archive_from_expsyms_cmds"
+
+	  if test prog = "$linkmode" || test relink != "$opt_mode"; then
+	    add_shlibpath=
+	    add_dir=
+	    add=
+	    lib_linked=yes
+	    case $hardcode_action in
+	    immediate | unsupported)
+	      if test no = "$hardcode_direct"; then
+		add=$dir/$linklib
+		case $host in
+		  *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;;
+		  *-*-sysv4*uw2*) add_dir=-L$dir ;;
+		  *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \
+		    *-*-unixware7*) add_dir=-L$dir ;;
+		  *-*-darwin* )
+		    # if the lib is a (non-dlopened) module then we cannot
+		    # link against it, someone is ignoring the earlier warnings
+		    if /usr/bin/file -L $add 2> /dev/null |
+			 $GREP ": [^:]* bundle" >/dev/null; then
+		      if test "X$dlopenmodule" != "X$lib"; then
+			$ECHO "*** Warning: lib $linklib is a module, not a shared library"
+			if test -z "$old_library"; then
+			  echo
+			  echo "*** And there doesn't seem to be a static archive available"
+			  echo "*** The link will probably fail, sorry"
+			else
+			  add=$dir/$old_library
+			fi
+		      elif test -n "$old_library"; then
+			add=$dir/$old_library
+		      fi
+		    fi
+		esac
+	      elif test no = "$hardcode_minus_L"; then
+		case $host in
+		*-*-sunos*) add_shlibpath=$dir ;;
+		esac
+		add_dir=-L$dir
+		add=-l$name
+	      elif test no = "$hardcode_shlibpath_var"; then
+		add_shlibpath=$dir
+		add=-l$name
+	      else
+		lib_linked=no
+	      fi
+	      ;;
+	    relink)
+	      if test yes = "$hardcode_direct" &&
+	         test no = "$hardcode_direct_absolute"; then
+		add=$dir/$linklib
+	      elif test yes = "$hardcode_minus_L"; then
+		add_dir=-L$absdir
+		# Try looking first in the location we're being installed to.
+		if test -n "$inst_prefix_dir"; then
+		  case $libdir in
+		    [\\/]*)
+		      func_append add_dir " -L$inst_prefix_dir$libdir"
+		      ;;
+		  esac
+		fi
+		add=-l$name
+	      elif test yes = "$hardcode_shlibpath_var"; then
+		add_shlibpath=$dir
+		add=-l$name
+	      else
+		lib_linked=no
+	      fi
+	      ;;
+	    *) lib_linked=no ;;
+	    esac
+
+	    if test yes != "$lib_linked"; then
+	      func_fatal_configuration "unsupported hardcode properties"
+	    fi
+
+	    if test -n "$add_shlibpath"; then
+	      case :$compile_shlibpath: in
+	      *":$add_shlibpath:"*) ;;
+	      *) func_append compile_shlibpath "$add_shlibpath:" ;;
+	      esac
+	    fi
+	    if test prog = "$linkmode"; then
+	      test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
+	      test -n "$add" && compile_deplibs="$add $compile_deplibs"
+	    else
+	      test -n "$add_dir" && deplibs="$add_dir $deplibs"
+	      test -n "$add" && deplibs="$add $deplibs"
+	      if test yes != "$hardcode_direct" &&
+		 test yes != "$hardcode_minus_L" &&
+		 test yes = "$hardcode_shlibpath_var"; then
+		case :$finalize_shlibpath: in
+		*":$libdir:"*) ;;
+		*) func_append finalize_shlibpath "$libdir:" ;;
+		esac
+	      fi
+	    fi
+	  fi
+
+	  if test prog = "$linkmode" || test relink = "$opt_mode"; then
+	    add_shlibpath=
+	    add_dir=
+	    add=
+	    # Finalize command for both is simple: just hardcode it.
+	    if test yes = "$hardcode_direct" &&
+	       test no = "$hardcode_direct_absolute"; then
+	      add=$libdir/$linklib
+	    elif test yes = "$hardcode_minus_L"; then
+	      add_dir=-L$libdir
+	      add=-l$name
+	    elif test yes = "$hardcode_shlibpath_var"; then
+	      case :$finalize_shlibpath: in
+	      *":$libdir:"*) ;;
+	      *) func_append finalize_shlibpath "$libdir:" ;;
+	      esac
+	      add=-l$name
+	    elif test yes = "$hardcode_automatic"; then
+	      if test -n "$inst_prefix_dir" &&
+		 test -f "$inst_prefix_dir$libdir/$linklib"; then
+		add=$inst_prefix_dir$libdir/$linklib
+	      else
+		add=$libdir/$linklib
+	      fi
+	    else
+	      # We cannot seem to hardcode it, guess we'll fake it.
+	      add_dir=-L$libdir
+	      # Try looking first in the location we're being installed to.
+	      if test -n "$inst_prefix_dir"; then
+		case $libdir in
+		  [\\/]*)
+		    func_append add_dir " -L$inst_prefix_dir$libdir"
+		    ;;
+		esac
+	      fi
+	      add=-l$name
+	    fi
+
+	    if test prog = "$linkmode"; then
+	      test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
+	      test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
+	    else
+	      test -n "$add_dir" && deplibs="$add_dir $deplibs"
+	      test -n "$add" && deplibs="$add $deplibs"
+	    fi
+	  fi
+	elif test prog = "$linkmode"; then
+	  # Here we assume that one of hardcode_direct or hardcode_minus_L
+	  # is not unsupported.  This is valid on all known static and
+	  # shared platforms.
+	  if test unsupported != "$hardcode_direct"; then
+	    test -n "$old_library" && linklib=$old_library
+	    compile_deplibs="$dir/$linklib $compile_deplibs"
+	    finalize_deplibs="$dir/$linklib $finalize_deplibs"
+	  else
+	    compile_deplibs="-l$name -L$dir $compile_deplibs"
+	    finalize_deplibs="-l$name -L$dir $finalize_deplibs"
+	  fi
+	elif test yes = "$build_libtool_libs"; then
+	  # Not a shared library
+	  if test pass_all != "$deplibs_check_method"; then
+	    # We're trying link a shared library against a static one
+	    # but the system doesn't support it.
+
+	    # Just print a warning and add the library to dependency_libs so
+	    # that the program can be linked against the static library.
+	    echo
+	    $ECHO "*** Warning: This system cannot link to static lib archive $lib."
+	    echo "*** I have the capability to make that library automatically link in when"
+	    echo "*** you link to this library.  But I can only do this if you have a"
+	    echo "*** shared version of the library, which you do not appear to have."
+	    if test yes = "$module"; then
+	      echo "*** But as you try to build a module library, libtool will still create "
+	      echo "*** a static module, that should work as long as the dlopening application"
+	      echo "*** is linked with the -dlopen flag to resolve symbols at runtime."
+	      if test -z "$global_symbol_pipe"; then
+		echo
+		echo "*** However, this would only work if libtool was able to extract symbol"
+		echo "*** lists from a program, using 'nm' or equivalent, but libtool could"
+		echo "*** not find such a program.  So, this module is probably useless."
+		echo "*** 'nm' from GNU binutils and a full rebuild may help."
+	      fi
+	      if test no = "$build_old_libs"; then
+		build_libtool_libs=module
+		build_old_libs=yes
+	      else
+		build_libtool_libs=no
+	      fi
+	    fi
+	  else
+	    deplibs="$dir/$old_library $deplibs"
+	    link_static=yes
+	  fi
+	fi # link shared/static library?
+
+	if test lib = "$linkmode"; then
+	  if test -n "$dependency_libs" &&
+	     { test yes != "$hardcode_into_libs" ||
+	       test yes = "$build_old_libs" ||
+	       test yes = "$link_static"; }; then
+	    # Extract -R from dependency_libs
+	    temp_deplibs=
+	    for libdir in $dependency_libs; do
+	      case $libdir in
+	      -R*) func_stripname '-R' '' "$libdir"
+	           temp_xrpath=$func_stripname_result
+		   case " $xrpath " in
+		   *" $temp_xrpath "*) ;;
+		   *) func_append xrpath " $temp_xrpath";;
+		   esac;;
+	      *) func_append temp_deplibs " $libdir";;
+	      esac
+	    done
+	    dependency_libs=$temp_deplibs
+	  fi
+
+	  func_append newlib_search_path " $absdir"
+	  # Link against this library
+	  test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
+	  # ... and its dependency_libs
+	  tmp_libs=
+	  for deplib in $dependency_libs; do
+	    newdependency_libs="$deplib $newdependency_libs"
+	    case $deplib in
+              -L*) func_stripname '-L' '' "$deplib"
+                   func_resolve_sysroot "$func_stripname_result";;
+              *) func_resolve_sysroot "$deplib" ;;
+            esac
+	    if $opt_preserve_dup_deps; then
+	      case "$tmp_libs " in
+	      *" $func_resolve_sysroot_result "*)
+                func_append specialdeplibs " $func_resolve_sysroot_result" ;;
+	      esac
+	    fi
+	    func_append tmp_libs " $func_resolve_sysroot_result"
+	  done
+
+	  if test no != "$link_all_deplibs"; then
+	    # Add the search paths of all dependency libraries
+	    for deplib in $dependency_libs; do
+	      path=
+	      case $deplib in
+	      -L*) path=$deplib ;;
+	      *.la)
+	        func_resolve_sysroot "$deplib"
+	        deplib=$func_resolve_sysroot_result
+	        func_dirname "$deplib" "" "."
+		dir=$func_dirname_result
+		# We need an absolute path.
+		case $dir in
+		[\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;;
+		*)
+		  absdir=`cd "$dir" && pwd`
+		  if test -z "$absdir"; then
+		    func_warning "cannot determine absolute directory name of '$dir'"
+		    absdir=$dir
+		  fi
+		  ;;
+		esac
+		if $GREP "^installed=no" $deplib > /dev/null; then
+		case $host in
+		*-*-darwin*)
+		  depdepl=
+		  eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib`
+		  if test -n "$deplibrary_names"; then
+		    for tmp in $deplibrary_names; do
+		      depdepl=$tmp
+		    done
+		    if test -f "$absdir/$objdir/$depdepl"; then
+		      depdepl=$absdir/$objdir/$depdepl
+		      darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
+                      if test -z "$darwin_install_name"; then
+                          darwin_install_name=`$OTOOL64 -L $depdepl  | awk '{if (NR == 2) {print $1;exit}}'`
+                      fi
+		      func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl"
+		      func_append linker_flags " -dylib_file $darwin_install_name:$depdepl"
+		      path=
+		    fi
+		  fi
+		  ;;
+		*)
+		  path=-L$absdir/$objdir
+		  ;;
+		esac
+		else
+		  eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+		  test -z "$libdir" && \
+		    func_fatal_error "'$deplib' is not a valid libtool archive"
+		  test "$absdir" != "$libdir" && \
+		    func_warning "'$deplib' seems to be moved"
+
+		  path=-L$absdir
+		fi
+		;;
+	      esac
+	      case " $deplibs " in
+	      *" $path "*) ;;
+	      *) deplibs="$path $deplibs" ;;
+	      esac
+	    done
+	  fi # link_all_deplibs != no
+	fi # linkmode = lib
+      done # for deplib in $libs
+      if test link = "$pass"; then
+	if test prog = "$linkmode"; then
+	  compile_deplibs="$new_inherited_linker_flags $compile_deplibs"
+	  finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs"
+	else
+	  compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	fi
+      fi
+      dependency_libs=$newdependency_libs
+      if test dlpreopen = "$pass"; then
+	# Link the dlpreopened libraries before other libraries
+	for deplib in $save_deplibs; do
+	  deplibs="$deplib $deplibs"
+	done
+      fi
+      if test dlopen != "$pass"; then
+	test conv = "$pass" || {
+	  # Make sure lib_search_path contains only unique directories.
+	  lib_search_path=
+	  for dir in $newlib_search_path; do
+	    case "$lib_search_path " in
+	    *" $dir "*) ;;
+	    *) func_append lib_search_path " $dir" ;;
+	    esac
+	  done
+	  newlib_search_path=
+	}
+
+	if test prog,link = "$linkmode,$pass"; then
+	  vars="compile_deplibs finalize_deplibs"
+	else
+	  vars=deplibs
+	fi
+	for var in $vars dependency_libs; do
+	  # Add libraries to $var in reverse order
+	  eval tmp_libs=\"\$$var\"
+	  new_libs=
+	  for deplib in $tmp_libs; do
+	    # FIXME: Pedantically, this is the right thing to do, so
+	    #        that some nasty dependency loop isn't accidentally
+	    #        broken:
+	    #new_libs="$deplib $new_libs"
+	    # Pragmatically, this seems to cause very few problems in
+	    # practice:
+	    case $deplib in
+	    -L*) new_libs="$deplib $new_libs" ;;
+	    -R*) ;;
+	    *)
+	      # And here is the reason: when a library appears more
+	      # than once as an explicit dependence of a library, or
+	      # is implicitly linked in more than once by the
+	      # compiler, it is considered special, and multiple
+	      # occurrences thereof are not removed.  Compare this
+	      # with having the same library being listed as a
+	      # dependency of multiple other libraries: in this case,
+	      # we know (pedantically, we assume) the library does not
+	      # need to be listed more than once, so we keep only the
+	      # last copy.  This is not always right, but it is rare
+	      # enough that we require users that really mean to play
+	      # such unportable linking tricks to link the library
+	      # using -Wl,-lname, so that libtool does not consider it
+	      # for duplicate removal.
+	      case " $specialdeplibs " in
+	      *" $deplib "*) new_libs="$deplib $new_libs" ;;
+	      *)
+		case " $new_libs " in
+		*" $deplib "*) ;;
+		*) new_libs="$deplib $new_libs" ;;
+		esac
+		;;
+	      esac
+	      ;;
+	    esac
+	  done
+	  tmp_libs=
+	  for deplib in $new_libs; do
+	    case $deplib in
+	    -L*)
+	      case " $tmp_libs " in
+	      *" $deplib "*) ;;
+	      *) func_append tmp_libs " $deplib" ;;
+	      esac
+	      ;;
+	    *) func_append tmp_libs " $deplib" ;;
+	    esac
+	  done
+	  eval $var=\"$tmp_libs\"
+	done # for var
+      fi
+
+      # Add Sun CC postdeps if required:
+      test CXX = "$tagname" && {
+        case $host_os in
+        linux*)
+          case `$CC -V 2>&1 | sed 5q` in
+          *Sun\ C*) # Sun C++ 5.9
+            func_suncc_cstd_abi
+
+            if test no != "$suncc_use_cstd_abi"; then
+              func_append postdeps ' -library=Cstd -library=Crun'
+            fi
+            ;;
+          esac
+          ;;
+
+        solaris*)
+          func_cc_basename "$CC"
+          case $func_cc_basename_result in
+          CC* | sunCC*)
+            func_suncc_cstd_abi
+
+            if test no != "$suncc_use_cstd_abi"; then
+              func_append postdeps ' -library=Cstd -library=Crun'
+            fi
+            ;;
+          esac
+          ;;
+        esac
+      }
+
+      # Last step: remove runtime libs from dependency_libs
+      # (they stay in deplibs)
+      tmp_libs=
+      for i in $dependency_libs; do
+	case " $predeps $postdeps $compiler_lib_search_path " in
+	*" $i "*)
+	  i=
+	  ;;
+	esac
+	if test -n "$i"; then
+	  func_append tmp_libs " $i"
+	fi
+      done
+      dependency_libs=$tmp_libs
+    done # for pass
+    if test prog = "$linkmode"; then
+      dlfiles=$newdlfiles
+    fi
+    if test prog = "$linkmode" || test lib = "$linkmode"; then
+      dlprefiles=$newdlprefiles
+    fi
+
+    case $linkmode in
+    oldlib)
+      if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then
+	func_warning "'-dlopen' is ignored for archives"
+      fi
+
+      case " $deplibs" in
+      *\ -l* | *\ -L*)
+	func_warning "'-l' and '-L' are ignored for archives" ;;
+      esac
+
+      test -n "$rpath" && \
+	func_warning "'-rpath' is ignored for archives"
+
+      test -n "$xrpath" && \
+	func_warning "'-R' is ignored for archives"
+
+      test -n "$vinfo" && \
+	func_warning "'-version-info/-version-number' is ignored for archives"
+
+      test -n "$release" && \
+	func_warning "'-release' is ignored for archives"
+
+      test -n "$export_symbols$export_symbols_regex" && \
+	func_warning "'-export-symbols' is ignored for archives"
+
+      # Now set the variables for building old libraries.
+      build_libtool_libs=no
+      oldlibs=$output
+      func_append objs "$old_deplibs"
+      ;;
+
+    lib)
+      # Make sure we only generate libraries of the form 'libNAME.la'.
+      case $outputname in
+      lib*)
+	func_stripname 'lib' '.la' "$outputname"
+	name=$func_stripname_result
+	eval shared_ext=\"$shrext_cmds\"
+	eval libname=\"$libname_spec\"
+	;;
+      *)
+	test no = "$module" \
+	  && func_fatal_help "libtool library '$output' must begin with 'lib'"
+
+	if test no != "$need_lib_prefix"; then
+	  # Add the "lib" prefix for modules if required
+	  func_stripname '' '.la' "$outputname"
+	  name=$func_stripname_result
+	  eval shared_ext=\"$shrext_cmds\"
+	  eval libname=\"$libname_spec\"
+	else
+	  func_stripname '' '.la' "$outputname"
+	  libname=$func_stripname_result
+	fi
+	;;
+      esac
+
+      if test -n "$objs"; then
+	if test pass_all != "$deplibs_check_method"; then
+	  func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs"
+	else
+	  echo
+	  $ECHO "*** Warning: Linking the shared library $output against the non-libtool"
+	  $ECHO "*** objects $objs is not portable!"
+	  func_append libobjs " $objs"
+	fi
+      fi
+
+      test no = "$dlself" \
+	|| func_warning "'-dlopen self' is ignored for libtool libraries"
+
+      set dummy $rpath
+      shift
+      test 1 -lt "$#" \
+	&& func_warning "ignoring multiple '-rpath's for a libtool library"
+
+      install_libdir=$1
+
+      oldlibs=
+      if test -z "$rpath"; then
+	if test yes = "$build_libtool_libs"; then
+	  # Building a libtool convenience library.
+	  # Some compilers have problems with a '.al' extension so
+	  # convenience libraries should have the same extension an
+	  # archive normally would.
+	  oldlibs="$output_objdir/$libname.$libext $oldlibs"
+	  build_libtool_libs=convenience
+	  build_old_libs=yes
+	fi
+
+	test -n "$vinfo" && \
+	  func_warning "'-version-info/-version-number' is ignored for convenience libraries"
+
+	test -n "$release" && \
+	  func_warning "'-release' is ignored for convenience libraries"
+      else
+
+	# Parse the version information argument.
+	save_ifs=$IFS; IFS=:
+	set dummy $vinfo 0 0 0
+	shift
+	IFS=$save_ifs
+
+	test -n "$7" && \
+	  func_fatal_help "too many parameters to '-version-info'"
+
+	# convert absolute version numbers to libtool ages
+	# this retains compatibility with .la files and attempts
+	# to make the code below a bit more comprehensible
+
+	case $vinfo_number in
+	yes)
+	  number_major=$1
+	  number_minor=$2
+	  number_revision=$3
+	  #
+	  # There are really only two kinds -- those that
+	  # use the current revision as the major version
+	  # and those that subtract age and use age as
+	  # a minor version.  But, then there is irix
+	  # that has an extra 1 added just for fun
+	  #
+	  case $version_type in
+	  # correct linux to gnu/linux during the next big refactor
+	  darwin|freebsd-elf|linux|osf|windows|none)
+	    func_arith $number_major + $number_minor
+	    current=$func_arith_result
+	    age=$number_minor
+	    revision=$number_revision
+	    ;;
+	  freebsd-aout|qnx|sunos)
+	    current=$number_major
+	    revision=$number_minor
+	    age=0
+	    ;;
+	  irix|nonstopux)
+	    func_arith $number_major + $number_minor
+	    current=$func_arith_result
+	    age=$number_minor
+	    revision=$number_minor
+	    lt_irix_increment=no
+	    ;;
+	  esac
+	  ;;
+	no)
+	  current=$1
+	  revision=$2
+	  age=$3
+	  ;;
+	esac
+
+	# Check that each of the things are valid numbers.
+	case $current in
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+	*)
+	  func_error "CURRENT '$current' must be a nonnegative integer"
+	  func_fatal_error "'$vinfo' is not valid version information"
+	  ;;
+	esac
+
+	case $revision in
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+	*)
+	  func_error "REVISION '$revision' must be a nonnegative integer"
+	  func_fatal_error "'$vinfo' is not valid version information"
+	  ;;
+	esac
+
+	case $age in
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+	*)
+	  func_error "AGE '$age' must be a nonnegative integer"
+	  func_fatal_error "'$vinfo' is not valid version information"
+	  ;;
+	esac
+
+	if test "$age" -gt "$current"; then
+	  func_error "AGE '$age' is greater than the current interface number '$current'"
+	  func_fatal_error "'$vinfo' is not valid version information"
+	fi
+
+	# Calculate the version variables.
+	major=
+	versuffix=
+	verstring=
+	case $version_type in
+	none) ;;
+
+	darwin)
+	  # Like Linux, but with the current version available in
+	  # verstring for coding it into the library header
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix=$major.$age.$revision
+	  # Darwin ld doesn't like 0 for these options...
+	  func_arith $current + 1
+	  minor_current=$func_arith_result
+	  xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision"
+	  verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+          # On Darwin other compilers
+          case $CC in
+              nagfor*)
+                  verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision"
+                  ;;
+              *)
+                  verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+                  ;;
+          esac
+	  ;;
+
+	freebsd-aout)
+	  major=.$current
+	  versuffix=.$current.$revision
+	  ;;
+
+	freebsd-elf)
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix=$major.$age.$revision
+	  ;;
+
+	irix | nonstopux)
+	  if test no = "$lt_irix_increment"; then
+	    func_arith $current - $age
+	  else
+	    func_arith $current - $age + 1
+	  fi
+	  major=$func_arith_result
+
+	  case $version_type in
+	    nonstopux) verstring_prefix=nonstopux ;;
+	    *)         verstring_prefix=sgi ;;
+	  esac
+	  verstring=$verstring_prefix$major.$revision
+
+	  # Add in all the interfaces that we are compatible with.
+	  loop=$revision
+	  while test 0 -ne "$loop"; do
+	    func_arith $revision - $loop
+	    iface=$func_arith_result
+	    func_arith $loop - 1
+	    loop=$func_arith_result
+	    verstring=$verstring_prefix$major.$iface:$verstring
+	  done
+
+	  # Before this point, $major must not contain '.'.
+	  major=.$major
+	  versuffix=$major.$revision
+	  ;;
+
+	linux) # correct to gnu/linux during the next big refactor
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix=$major.$age.$revision
+	  ;;
+
+	osf)
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix=.$current.$age.$revision
+	  verstring=$current.$age.$revision
+
+	  # Add in all the interfaces that we are compatible with.
+	  loop=$age
+	  while test 0 -ne "$loop"; do
+	    func_arith $current - $loop
+	    iface=$func_arith_result
+	    func_arith $loop - 1
+	    loop=$func_arith_result
+	    verstring=$verstring:$iface.0
+	  done
+
+	  # Make executables depend on our current version.
+	  func_append verstring ":$current.0"
+	  ;;
+
+	qnx)
+	  major=.$current
+	  versuffix=.$current
+	  ;;
+
+	sco)
+	  major=.$current
+	  versuffix=.$current
+	  ;;
+
+	sunos)
+	  major=.$current
+	  versuffix=.$current.$revision
+	  ;;
+
+	windows)
+	  # Use '-' rather than '.', since we only want one
+	  # extension on DOS 8.3 file systems.
+	  func_arith $current - $age
+	  major=$func_arith_result
+	  versuffix=-$major
+	  ;;
+
+	*)
+	  func_fatal_configuration "unknown library version type '$version_type'"
+	  ;;
+	esac
+
+	# Clear the version info if we defaulted, and they specified a release.
+	if test -z "$vinfo" && test -n "$release"; then
+	  major=
+	  case $version_type in
+	  darwin)
+	    # we can't check for "0.0" in archive_cmds due to quoting
+	    # problems, so we reset it completely
+	    verstring=
+	    ;;
+	  *)
+	    verstring=0.0
+	    ;;
+	  esac
+	  if test no = "$need_version"; then
+	    versuffix=
+	  else
+	    versuffix=.0.0
+	  fi
+	fi
+
+	# Remove version info from name if versioning should be avoided
+	if test yes,no = "$avoid_version,$need_version"; then
+	  major=
+	  versuffix=
+	  verstring=
+	fi
+
+	# Check to see if the archive will have undefined symbols.
+	if test yes = "$allow_undefined"; then
+	  if test unsupported = "$allow_undefined_flag"; then
+	    if test yes = "$build_old_libs"; then
+	      func_warning "undefined symbols not allowed in $host shared libraries; building static only"
+	      build_libtool_libs=no
+	    else
+	      func_fatal_error "can't build $host shared library unless -no-undefined is specified"
+	    fi
+	  fi
+	else
+	  # Don't allow undefined symbols.
+	  allow_undefined_flag=$no_undefined_flag
+	fi
+
+      fi
+
+      func_generate_dlsyms "$libname" "$libname" :
+      func_append libobjs " $symfileobj"
+      test " " = "$libobjs" && libobjs=
+
+      if test relink != "$opt_mode"; then
+	# Remove our outputs, but don't remove object files since they
+	# may have been created when compiling PIC objects.
+	removelist=
+	tempremovelist=`$ECHO "$output_objdir/*"`
+	for p in $tempremovelist; do
+	  case $p in
+	    *.$objext | *.gcno)
+	       ;;
+	    $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*)
+	       if test -n "$precious_files_regex"; then
+		 if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1
+		 then
+		   continue
+		 fi
+	       fi
+	       func_append removelist " $p"
+	       ;;
+	    *) ;;
+	  esac
+	done
+	test -n "$removelist" && \
+	  func_show_eval "${RM}r \$removelist"
+      fi
+
+      # Now set the variables for building old libraries.
+      if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then
+	func_append oldlibs " $output_objdir/$libname.$libext"
+
+	# Transform .lo files to .o files.
+	oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP`
+      fi
+
+      # Eliminate all temporary directories.
+      #for path in $notinst_path; do
+      #	lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"`
+      #	deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"`
+      #	dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"`
+      #done
+
+      if test -n "$xrpath"; then
+	# If the user specified any rpath flags, then add them.
+	temp_xrpath=
+	for libdir in $xrpath; do
+	  func_replace_sysroot "$libdir"
+	  func_append temp_xrpath " -R$func_replace_sysroot_result"
+	  case "$finalize_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append finalize_rpath " $libdir" ;;
+	  esac
+	done
+	if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then
+	  dependency_libs="$temp_xrpath $dependency_libs"
+	fi
+      fi
+
+      # Make sure dlfiles contains only unique files that won't be dlpreopened
+      old_dlfiles=$dlfiles
+      dlfiles=
+      for lib in $old_dlfiles; do
+	case " $dlprefiles $dlfiles " in
+	*" $lib "*) ;;
+	*) func_append dlfiles " $lib" ;;
+	esac
+      done
+
+      # Make sure dlprefiles contains only unique files
+      old_dlprefiles=$dlprefiles
+      dlprefiles=
+      for lib in $old_dlprefiles; do
+	case "$dlprefiles " in
+	*" $lib "*) ;;
+	*) func_append dlprefiles " $lib" ;;
+	esac
+      done
+
+      if test yes = "$build_libtool_libs"; then
+	if test -n "$rpath"; then
+	  case $host in
+	  *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*)
+	    # these systems don't actually have a c library (as such)!
+	    ;;
+	  *-*-rhapsody* | *-*-darwin1.[012])
+	    # Rhapsody C library is in the System framework
+	    func_append deplibs " System.ltframework"
+	    ;;
+	  *-*-netbsd*)
+	    # Don't link with libc until the a.out ld.so is fixed.
+	    ;;
+	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+	    # Do not include libc due to us having libc/libc_r.
+	    ;;
+	  *-*-sco3.2v5* | *-*-sco5v6*)
+	    # Causes problems with __ctype
+	    ;;
+	  *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+	    # Compiler inserts libc in the correct place for threads to work
+	    ;;
+	  *)
+	    # Add libc to deplibs on all other systems if necessary.
+	    if test yes = "$build_libtool_need_lc"; then
+	      func_append deplibs " -lc"
+	    fi
+	    ;;
+	  esac
+	fi
+
+	# Transform deplibs into only deplibs that can be linked in shared.
+	name_save=$name
+	libname_save=$libname
+	release_save=$release
+	versuffix_save=$versuffix
+	major_save=$major
+	# I'm not sure if I'm treating the release correctly.  I think
+	# release should show up in the -l (ie -lgmp5) so we don't want to
+	# add it in twice.  Is that correct?
+	release=
+	versuffix=
+	major=
+	newdeplibs=
+	droppeddeps=no
+	case $deplibs_check_method in
+	pass_all)
+	  # Don't check for shared/static.  Everything works.
+	  # This might be a little naive.  We might want to check
+	  # whether the library exists or not.  But this is on
+	  # osf3 & osf4 and I'm not really sure... Just
+	  # implementing what was already the behavior.
+	  newdeplibs=$deplibs
+	  ;;
+	test_compile)
+	  # This code stresses the "libraries are programs" paradigm to its
+	  # limits. Maybe even breaks it.  We compile a program, linking it
+	  # against the deplibs as a proxy for the library.  Then we can check
+	  # whether they linked in statically or dynamically with ldd.
+	  $opt_dry_run || $RM conftest.c
+	  cat > conftest.c <<EOF
+	  int main() { return 0; }
+EOF
+	  $opt_dry_run || $RM conftest
+	  if $LTCC $LTCFLAGS -o conftest conftest.c $deplibs; then
+	    ldd_output=`ldd conftest`
+	    for i in $deplibs; do
+	      case $i in
+	      -l*)
+		func_stripname -l '' "$i"
+		name=$func_stripname_result
+		if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+		  case " $predeps $postdeps " in
+		  *" $i "*)
+		    func_append newdeplibs " $i"
+		    i=
+		    ;;
+		  esac
+		fi
+		if test -n "$i"; then
+		  libname=`eval "\\$ECHO \"$libname_spec\""`
+		  deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+		  set dummy $deplib_matches; shift
+		  deplib_match=$1
+		  if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0; then
+		    func_append newdeplibs " $i"
+		  else
+		    droppeddeps=yes
+		    echo
+		    $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+		    echo "*** I have the capability to make that library automatically link in when"
+		    echo "*** you link to this library.  But I can only do this if you have a"
+		    echo "*** shared version of the library, which I believe you do not have"
+		    echo "*** because a test_compile did reveal that the linker did not use it for"
+		    echo "*** its dynamic dependency list that programs get resolved with at runtime."
+		  fi
+		fi
+		;;
+	      *)
+		func_append newdeplibs " $i"
+		;;
+	      esac
+	    done
+	  else
+	    # Error occurred in the first compile.  Let's try to salvage
+	    # the situation: Compile a separate program for each library.
+	    for i in $deplibs; do
+	      case $i in
+	      -l*)
+		func_stripname -l '' "$i"
+		name=$func_stripname_result
+		$opt_dry_run || $RM conftest
+		if $LTCC $LTCFLAGS -o conftest conftest.c $i; then
+		  ldd_output=`ldd conftest`
+		  if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+		    case " $predeps $postdeps " in
+		    *" $i "*)
+		      func_append newdeplibs " $i"
+		      i=
+		      ;;
+		    esac
+		  fi
+		  if test -n "$i"; then
+		    libname=`eval "\\$ECHO \"$libname_spec\""`
+		    deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+		    set dummy $deplib_matches; shift
+		    deplib_match=$1
+		    if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0; then
+		      func_append newdeplibs " $i"
+		    else
+		      droppeddeps=yes
+		      echo
+		      $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+		      echo "*** I have the capability to make that library automatically link in when"
+		      echo "*** you link to this library.  But I can only do this if you have a"
+		      echo "*** shared version of the library, which you do not appear to have"
+		      echo "*** because a test_compile did reveal that the linker did not use this one"
+		      echo "*** as a dynamic dependency that programs can get resolved with at runtime."
+		    fi
+		  fi
+		else
+		  droppeddeps=yes
+		  echo
+		  $ECHO "*** Warning!  Library $i is needed by this library but I was not able to"
+		  echo "*** make it link in!  You will probably need to install it or some"
+		  echo "*** library that it depends on before this library will be fully"
+		  echo "*** functional.  Installing it before continuing would be even better."
+		fi
+		;;
+	      *)
+		func_append newdeplibs " $i"
+		;;
+	      esac
+	    done
+	  fi
+	  ;;
+	file_magic*)
+	  set dummy $deplibs_check_method; shift
+	  file_magic_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+	  for a_deplib in $deplibs; do
+	    case $a_deplib in
+	    -l*)
+	      func_stripname -l '' "$a_deplib"
+	      name=$func_stripname_result
+	      if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+		case " $predeps $postdeps " in
+		*" $a_deplib "*)
+		  func_append newdeplibs " $a_deplib"
+		  a_deplib=
+		  ;;
+		esac
+	      fi
+	      if test -n "$a_deplib"; then
+		libname=`eval "\\$ECHO \"$libname_spec\""`
+		if test -n "$file_magic_glob"; then
+		  libnameglob=`func_echo_all "$libname" | $SED -e $file_magic_glob`
+		else
+		  libnameglob=$libname
+		fi
+		test yes = "$want_nocaseglob" && nocaseglob=`shopt -p nocaseglob`
+		for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+		  if test yes = "$want_nocaseglob"; then
+		    shopt -s nocaseglob
+		    potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+		    $nocaseglob
+		  else
+		    potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+		  fi
+		  for potent_lib in $potential_libs; do
+		      # Follow soft links.
+		      if ls -lLd "$potent_lib" 2>/dev/null |
+			 $GREP " -> " >/dev/null; then
+			continue
+		      fi
+		      # The statement above tries to avoid entering an
+		      # endless loop below, in case of cyclic links.
+		      # We might still enter an endless loop, since a link
+		      # loop can be closed while we follow links,
+		      # but so what?
+		      potlib=$potent_lib
+		      while test -h "$potlib" 2>/dev/null; do
+			potliblink=`ls -ld $potlib | $SED 's/.* -> //'`
+			case $potliblink in
+			[\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;;
+			*) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";;
+			esac
+		      done
+		      if eval $file_magic_cmd \"\$potlib\" 2>/dev/null |
+			 $SED -e 10q |
+			 $EGREP "$file_magic_regex" > /dev/null; then
+			func_append newdeplibs " $a_deplib"
+			a_deplib=
+			break 2
+		      fi
+		  done
+		done
+	      fi
+	      if test -n "$a_deplib"; then
+		droppeddeps=yes
+		echo
+		$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+		echo "*** I have the capability to make that library automatically link in when"
+		echo "*** you link to this library.  But I can only do this if you have a"
+		echo "*** shared version of the library, which you do not appear to have"
+		echo "*** because I did check the linker path looking for a file starting"
+		if test -z "$potlib"; then
+		  $ECHO "*** with $libname but no candidates were found. (...for file magic test)"
+		else
+		  $ECHO "*** with $libname and none of the candidates passed a file format test"
+		  $ECHO "*** using a file magic. Last file checked: $potlib"
+		fi
+	      fi
+	      ;;
+	    *)
+	      # Add a -L argument.
+	      func_append newdeplibs " $a_deplib"
+	      ;;
+	    esac
+	  done # Gone through all deplibs.
+	  ;;
+	match_pattern*)
+	  set dummy $deplibs_check_method; shift
+	  match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+	  for a_deplib in $deplibs; do
+	    case $a_deplib in
+	    -l*)
+	      func_stripname -l '' "$a_deplib"
+	      name=$func_stripname_result
+	      if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+		case " $predeps $postdeps " in
+		*" $a_deplib "*)
+		  func_append newdeplibs " $a_deplib"
+		  a_deplib=
+		  ;;
+		esac
+	      fi
+	      if test -n "$a_deplib"; then
+		libname=`eval "\\$ECHO \"$libname_spec\""`
+		for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+		  potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+		  for potent_lib in $potential_libs; do
+		    potlib=$potent_lib # see symlink-check above in file_magic test
+		    if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \
+		       $EGREP "$match_pattern_regex" > /dev/null; then
+		      func_append newdeplibs " $a_deplib"
+		      a_deplib=
+		      break 2
+		    fi
+		  done
+		done
+	      fi
+	      if test -n "$a_deplib"; then
+		droppeddeps=yes
+		echo
+		$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+		echo "*** I have the capability to make that library automatically link in when"
+		echo "*** you link to this library.  But I can only do this if you have a"
+		echo "*** shared version of the library, which you do not appear to have"
+		echo "*** because I did check the linker path looking for a file starting"
+		if test -z "$potlib"; then
+		  $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)"
+		else
+		  $ECHO "*** with $libname and none of the candidates passed a file format test"
+		  $ECHO "*** using a regex pattern. Last file checked: $potlib"
+		fi
+	      fi
+	      ;;
+	    *)
+	      # Add a -L argument.
+	      func_append newdeplibs " $a_deplib"
+	      ;;
+	    esac
+	  done # Gone through all deplibs.
+	  ;;
+	none | unknown | *)
+	  newdeplibs=
+	  tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'`
+	  if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+	    for i in $predeps $postdeps; do
+	      # can't use Xsed below, because $i might contain '/'
+	      tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"`
+	    done
+	  fi
+	  case $tmp_deplibs in
+	  *[!\	\ ]*)
+	    echo
+	    if test none = "$deplibs_check_method"; then
+	      echo "*** Warning: inter-library dependencies are not supported in this platform."
+	    else
+	      echo "*** Warning: inter-library dependencies are not known to be supported."
+	    fi
+	    echo "*** All declared inter-library dependencies are being dropped."
+	    droppeddeps=yes
+	    ;;
+	  esac
+	  ;;
+	esac
+	versuffix=$versuffix_save
+	major=$major_save
+	release=$release_save
+	libname=$libname_save
+	name=$name_save
+
+	case $host in
+	*-*-rhapsody* | *-*-darwin1.[012])
+	  # On Rhapsody replace the C library with the System framework
+	  newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'`
+	  ;;
+	esac
+
+	if test yes = "$droppeddeps"; then
+	  if test yes = "$module"; then
+	    echo
+	    echo "*** Warning: libtool could not satisfy all declared inter-library"
+	    $ECHO "*** dependencies of module $libname.  Therefore, libtool will create"
+	    echo "*** a static module, that should work as long as the dlopening"
+	    echo "*** application is linked with the -dlopen flag."
+	    if test -z "$global_symbol_pipe"; then
+	      echo
+	      echo "*** However, this would only work if libtool was able to extract symbol"
+	      echo "*** lists from a program, using 'nm' or equivalent, but libtool could"
+	      echo "*** not find such a program.  So, this module is probably useless."
+	      echo "*** 'nm' from GNU binutils and a full rebuild may help."
+	    fi
+	    if test no = "$build_old_libs"; then
+	      oldlibs=$output_objdir/$libname.$libext
+	      build_libtool_libs=module
+	      build_old_libs=yes
+	    else
+	      build_libtool_libs=no
+	    fi
+	  else
+	    echo "*** The inter-library dependencies that have been dropped here will be"
+	    echo "*** automatically added whenever a program is linked with this library"
+	    echo "*** or is declared to -dlopen it."
+
+	    if test no = "$allow_undefined"; then
+	      echo
+	      echo "*** Since this library must not contain undefined symbols,"
+	      echo "*** because either the platform does not support them or"
+	      echo "*** it was explicitly requested with -no-undefined,"
+	      echo "*** libtool will only create a static version of it."
+	      if test no = "$build_old_libs"; then
+		oldlibs=$output_objdir/$libname.$libext
+		build_libtool_libs=module
+		build_old_libs=yes
+	      else
+		build_libtool_libs=no
+	      fi
+	    fi
+	  fi
+	fi
+	# Done checking deplibs!
+	deplibs=$newdeplibs
+      fi
+      # Time to change all our "foo.ltframework" stuff back to "-framework foo"
+      case $host in
+	*-*-darwin*)
+	  newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	  new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	  deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	  ;;
+      esac
+
+      # move library search paths that coincide with paths to not yet
+      # installed libraries to the beginning of the library search list
+      new_libs=
+      for path in $notinst_path; do
+	case " $new_libs " in
+	*" -L$path/$objdir "*) ;;
+	*)
+	  case " $deplibs " in
+	  *" -L$path/$objdir "*)
+	    func_append new_libs " -L$path/$objdir" ;;
+	  esac
+	  ;;
+	esac
+      done
+      for deplib in $deplibs; do
+	case $deplib in
+	-L*)
+	  case " $new_libs " in
+	  *" $deplib "*) ;;
+	  *) func_append new_libs " $deplib" ;;
+	  esac
+	  ;;
+	*) func_append new_libs " $deplib" ;;
+	esac
+      done
+      deplibs=$new_libs
+
+      # All the library-specific variables (install_libdir is set above).
+      library_names=
+      old_library=
+      dlname=
+
+      # Test again, we may have decided not to build it any more
+      if test yes = "$build_libtool_libs"; then
+	# Remove $wl instances when linking with ld.
+	# FIXME: should test the right _cmds variable.
+	case $archive_cmds in
+	  *\$LD\ *) wl= ;;
+        esac
+	if test yes = "$hardcode_into_libs"; then
+	  # Hardcode the library paths
+	  hardcode_libdirs=
+	  dep_rpath=
+	  rpath=$finalize_rpath
+	  test relink = "$opt_mode" || rpath=$compile_rpath$rpath
+	  for libdir in $rpath; do
+	    if test -n "$hardcode_libdir_flag_spec"; then
+	      if test -n "$hardcode_libdir_separator"; then
+		func_replace_sysroot "$libdir"
+		libdir=$func_replace_sysroot_result
+		if test -z "$hardcode_libdirs"; then
+		  hardcode_libdirs=$libdir
+		else
+		  # Just accumulate the unique libdirs.
+		  case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+		  *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+		    ;;
+		  *)
+		    func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+		    ;;
+		  esac
+		fi
+	      else
+		eval flag=\"$hardcode_libdir_flag_spec\"
+		func_append dep_rpath " $flag"
+	      fi
+	    elif test -n "$runpath_var"; then
+	      case "$perm_rpath " in
+	      *" $libdir "*) ;;
+	      *) func_append perm_rpath " $libdir" ;;
+	      esac
+	    fi
+	  done
+	  # Substitute the hardcoded libdirs into the rpath.
+	  if test -n "$hardcode_libdir_separator" &&
+	     test -n "$hardcode_libdirs"; then
+	    libdir=$hardcode_libdirs
+	    eval "dep_rpath=\"$hardcode_libdir_flag_spec\""
+	  fi
+	  if test -n "$runpath_var" && test -n "$perm_rpath"; then
+	    # We should set the runpath_var.
+	    rpath=
+	    for dir in $perm_rpath; do
+	      func_append rpath "$dir:"
+	    done
+	    eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
+	  fi
+	  test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
+	fi
+
+	shlibpath=$finalize_shlibpath
+	test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath
+	if test -n "$shlibpath"; then
+	  eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
+	fi
+
+	# Get the real and link names of the library.
+	eval shared_ext=\"$shrext_cmds\"
+	eval library_names=\"$library_names_spec\"
+	set dummy $library_names
+	shift
+	realname=$1
+	shift
+
+	if test -n "$soname_spec"; then
+	  eval soname=\"$soname_spec\"
+	else
+	  soname=$realname
+	fi
+	if test -z "$dlname"; then
+	  dlname=$soname
+	fi
+
+	lib=$output_objdir/$realname
+	linknames=
+	for link
+	do
+	  func_append linknames " $link"
+	done
+
+	# Use standard objects if they are pic
+	test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP`
+	test "X$libobjs" = "X " && libobjs=
+
+	delfiles=
+	if test -n "$export_symbols" && test -n "$include_expsyms"; then
+	  $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp"
+	  export_symbols=$output_objdir/$libname.uexp
+	  func_append delfiles " $export_symbols"
+	fi
+
+	orig_export_symbols=
+	case $host_os in
+	cygwin* | mingw* | cegcc*)
+	  if test -n "$export_symbols" && test -z "$export_symbols_regex"; then
+	    # exporting using user supplied symfile
+	    func_dll_def_p "$export_symbols" || {
+	      # and it's NOT already a .def file. Must figure out
+	      # which of the given symbols are data symbols and tag
+	      # them as such. So, trigger use of export_symbols_cmds.
+	      # export_symbols gets reassigned inside the "prepare
+	      # the list of exported symbols" if statement, so the
+	      # include_expsyms logic still works.
+	      orig_export_symbols=$export_symbols
+	      export_symbols=
+	      always_export_symbols=yes
+	    }
+	  fi
+	  ;;
+	esac
+
+	# Prepare the list of exported symbols
+	if test -z "$export_symbols"; then
+	  if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then
+	    func_verbose "generating symbol list for '$libname.la'"
+	    export_symbols=$output_objdir/$libname.exp
+	    $opt_dry_run || $RM $export_symbols
+	    cmds=$export_symbols_cmds
+	    save_ifs=$IFS; IFS='~'
+	    for cmd1 in $cmds; do
+	      IFS=$save_ifs
+	      # Take the normal branch if the nm_file_list_spec branch
+	      # doesn't work or if tool conversion is not needed.
+	      case $nm_file_list_spec~$to_tool_file_cmd in
+		*~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*)
+		  try_normal_branch=yes
+		  eval cmd=\"$cmd1\"
+		  func_len " $cmd"
+		  len=$func_len_result
+		  ;;
+		*)
+		  try_normal_branch=no
+		  ;;
+	      esac
+	      if test yes = "$try_normal_branch" \
+		 && { test "$len" -lt "$max_cmd_len" \
+		      || test "$max_cmd_len" -le -1; }
+	      then
+		func_show_eval "$cmd" 'exit $?'
+		skipped_export=false
+	      elif test -n "$nm_file_list_spec"; then
+		func_basename "$output"
+		output_la=$func_basename_result
+		save_libobjs=$libobjs
+		save_output=$output
+		output=$output_objdir/$output_la.nm
+		func_to_tool_file "$output"
+		libobjs=$nm_file_list_spec$func_to_tool_file_result
+		func_append delfiles " $output"
+		func_verbose "creating $NM input file list: $output"
+		for obj in $save_libobjs; do
+		  func_to_tool_file "$obj"
+		  $ECHO "$func_to_tool_file_result"
+		done > "$output"
+		eval cmd=\"$cmd1\"
+		func_show_eval "$cmd" 'exit $?'
+		output=$save_output
+		libobjs=$save_libobjs
+		skipped_export=false
+	      else
+		# The command line is too long to execute in one step.
+		func_verbose "using reloadable object file for export list..."
+		skipped_export=:
+		# Break out early, otherwise skipped_export may be
+		# set to false by a later but shorter cmd.
+		break
+	      fi
+	    done
+	    IFS=$save_ifs
+	    if test -n "$export_symbols_regex" && test : != "$skipped_export"; then
+	      func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+	      func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+	    fi
+	  fi
+	fi
+
+	if test -n "$export_symbols" && test -n "$include_expsyms"; then
+	  tmp_export_symbols=$export_symbols
+	  test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols
+	  $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
+	fi
+
+	if test : != "$skipped_export" && test -n "$orig_export_symbols"; then
+	  # The given exports_symbols file has to be filtered, so filter it.
+	  func_verbose "filter symbol list for '$libname.la' to tag DATA exports"
+	  # FIXME: $output_objdir/$libname.filter potentially contains lots of
+	  # 's' commands, which not all seds can handle. GNU sed should be fine
+	  # though. Also, the filter scales superlinearly with the number of
+	  # global variables. join(1) would be nice here, but unfortunately
+	  # isn't a blessed tool.
+	  $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+	  func_append delfiles " $export_symbols $output_objdir/$libname.filter"
+	  export_symbols=$output_objdir/$libname.def
+	  $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+	fi
+
+	tmp_deplibs=
+	for test_deplib in $deplibs; do
+	  case " $convenience " in
+	  *" $test_deplib "*) ;;
+	  *)
+	    func_append tmp_deplibs " $test_deplib"
+	    ;;
+	  esac
+	done
+	deplibs=$tmp_deplibs
+
+	if test -n "$convenience"; then
+	  if test -n "$whole_archive_flag_spec" &&
+	    test yes = "$compiler_needs_object" &&
+	    test -z "$libobjs"; then
+	    # extract the archives, so we have objects to list.
+	    # TODO: could optimize this to just extract one archive.
+	    whole_archive_flag_spec=
+	  fi
+	  if test -n "$whole_archive_flag_spec"; then
+	    save_libobjs=$libobjs
+	    eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+	    test "X$libobjs" = "X " && libobjs=
+	  else
+	    gentop=$output_objdir/${outputname}x
+	    func_append generated " $gentop"
+
+	    func_extract_archives $gentop $convenience
+	    func_append libobjs " $func_extract_archives_result"
+	    test "X$libobjs" = "X " && libobjs=
+	  fi
+	fi
+
+	if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then
+	  eval flag=\"$thread_safe_flag_spec\"
+	  func_append linker_flags " $flag"
+	fi
+
+	# Make a backup of the uninstalled library when relinking
+	if test relink = "$opt_mode"; then
+	  $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $?
+	fi
+
+	# Do each of the archive commands.
+	if test yes = "$module" && test -n "$module_cmds"; then
+	  if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+	    eval test_cmds=\"$module_expsym_cmds\"
+	    cmds=$module_expsym_cmds
+	  else
+	    eval test_cmds=\"$module_cmds\"
+	    cmds=$module_cmds
+	  fi
+	else
+	  if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+	    eval test_cmds=\"$archive_expsym_cmds\"
+	    cmds=$archive_expsym_cmds
+	  else
+	    eval test_cmds=\"$archive_cmds\"
+	    cmds=$archive_cmds
+	  fi
+	fi
+
+	if test : != "$skipped_export" &&
+	   func_len " $test_cmds" &&
+	   len=$func_len_result &&
+	   test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+	  :
+	else
+	  # The command line is too long to link in one step, link piecewise
+	  # or, if using GNU ld and skipped_export is not :, use a linker
+	  # script.
+
+	  # Save the value of $output and $libobjs because we want to
+	  # use them later.  If we have whole_archive_flag_spec, we
+	  # want to use save_libobjs as it was before
+	  # whole_archive_flag_spec was expanded, because we can't
+	  # assume the linker understands whole_archive_flag_spec.
+	  # This may have to be revisited, in case too many
+	  # convenience libraries get linked in and end up exceeding
+	  # the spec.
+	  if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
+	    save_libobjs=$libobjs
+	  fi
+	  save_output=$output
+	  func_basename "$output"
+	  output_la=$func_basename_result
+
+	  # Clear the reloadable object creation command queue and
+	  # initialize k to one.
+	  test_cmds=
+	  concat_cmds=
+	  objlist=
+	  last_robj=
+	  k=1
+
+	  if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then
+	    output=$output_objdir/$output_la.lnkscript
+	    func_verbose "creating GNU ld script: $output"
+	    echo 'INPUT (' > $output
+	    for obj in $save_libobjs
+	    do
+	      func_to_tool_file "$obj"
+	      $ECHO "$func_to_tool_file_result" >> $output
+	    done
+	    echo ')' >> $output
+	    func_append delfiles " $output"
+	    func_to_tool_file "$output"
+	    output=$func_to_tool_file_result
+	  elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then
+	    output=$output_objdir/$output_la.lnk
+	    func_verbose "creating linker input file list: $output"
+	    : > $output
+	    set x $save_libobjs
+	    shift
+	    firstobj=
+	    if test yes = "$compiler_needs_object"; then
+	      firstobj="$1 "
+	      shift
+	    fi
+	    for obj
+	    do
+	      func_to_tool_file "$obj"
+	      $ECHO "$func_to_tool_file_result" >> $output
+	    done
+	    func_append delfiles " $output"
+	    func_to_tool_file "$output"
+	    output=$firstobj\"$file_list_spec$func_to_tool_file_result\"
+	  else
+	    if test -n "$save_libobjs"; then
+	      func_verbose "creating reloadable object files..."
+	      output=$output_objdir/$output_la-$k.$objext
+	      eval test_cmds=\"$reload_cmds\"
+	      func_len " $test_cmds"
+	      len0=$func_len_result
+	      len=$len0
+
+	      # Loop over the list of objects to be linked.
+	      for obj in $save_libobjs
+	      do
+		func_len " $obj"
+		func_arith $len + $func_len_result
+		len=$func_arith_result
+		if test -z "$objlist" ||
+		   test "$len" -lt "$max_cmd_len"; then
+		  func_append objlist " $obj"
+		else
+		  # The command $test_cmds is almost too long, add a
+		  # command to the queue.
+		  if test 1 -eq "$k"; then
+		    # The first file doesn't have a previous command to add.
+		    reload_objs=$objlist
+		    eval concat_cmds=\"$reload_cmds\"
+		  else
+		    # All subsequent reloadable object files will link in
+		    # the last one created.
+		    reload_objs="$objlist $last_robj"
+		    eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\"
+		  fi
+		  last_robj=$output_objdir/$output_la-$k.$objext
+		  func_arith $k + 1
+		  k=$func_arith_result
+		  output=$output_objdir/$output_la-$k.$objext
+		  objlist=" $obj"
+		  func_len " $last_robj"
+		  func_arith $len0 + $func_len_result
+		  len=$func_arith_result
+		fi
+	      done
+	      # Handle the remaining objects by creating one last
+	      # reloadable object file.  All subsequent reloadable object
+	      # files will link in the last one created.
+	      test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+	      reload_objs="$objlist $last_robj"
+	      eval concat_cmds=\"\$concat_cmds$reload_cmds\"
+	      if test -n "$last_robj"; then
+	        eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\"
+	      fi
+	      func_append delfiles " $output"
+
+	    else
+	      output=
+	    fi
+
+	    ${skipped_export-false} && {
+	      func_verbose "generating symbol list for '$libname.la'"
+	      export_symbols=$output_objdir/$libname.exp
+	      $opt_dry_run || $RM $export_symbols
+	      libobjs=$output
+	      # Append the command to create the export file.
+	      test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+	      eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\"
+	      if test -n "$last_robj"; then
+		eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\"
+	      fi
+	    }
+
+	    test -n "$save_libobjs" &&
+	      func_verbose "creating a temporary reloadable object file: $output"
+
+	    # Loop through the commands generated above and execute them.
+	    save_ifs=$IFS; IFS='~'
+	    for cmd in $concat_cmds; do
+	      IFS=$save_ifs
+	      $opt_quiet || {
+		  func_quote_for_expand "$cmd"
+		  eval "func_echo $func_quote_for_expand_result"
+	      }
+	      $opt_dry_run || eval "$cmd" || {
+		lt_exit=$?
+
+		# Restore the uninstalled library and exit
+		if test relink = "$opt_mode"; then
+		  ( cd "$output_objdir" && \
+		    $RM "${realname}T" && \
+		    $MV "${realname}U" "$realname" )
+		fi
+
+		exit $lt_exit
+	      }
+	    done
+	    IFS=$save_ifs
+
+	    if test -n "$export_symbols_regex" && ${skipped_export-false}; then
+	      func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+	      func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+	    fi
+	  fi
+
+          ${skipped_export-false} && {
+	    if test -n "$export_symbols" && test -n "$include_expsyms"; then
+	      tmp_export_symbols=$export_symbols
+	      test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols
+	      $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
+	    fi
+
+	    if test -n "$orig_export_symbols"; then
+	      # The given exports_symbols file has to be filtered, so filter it.
+	      func_verbose "filter symbol list for '$libname.la' to tag DATA exports"
+	      # FIXME: $output_objdir/$libname.filter potentially contains lots of
+	      # 's' commands, which not all seds can handle. GNU sed should be fine
+	      # though. Also, the filter scales superlinearly with the number of
+	      # global variables. join(1) would be nice here, but unfortunately
+	      # isn't a blessed tool.
+	      $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+	      func_append delfiles " $export_symbols $output_objdir/$libname.filter"
+	      export_symbols=$output_objdir/$libname.def
+	      $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+	    fi
+	  }
+
+	  libobjs=$output
+	  # Restore the value of output.
+	  output=$save_output
+
+	  if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
+	    eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+	    test "X$libobjs" = "X " && libobjs=
+	  fi
+	  # Expand the library linking commands again to reset the
+	  # value of $libobjs for piecewise linking.
+
+	  # Do each of the archive commands.
+	  if test yes = "$module" && test -n "$module_cmds"; then
+	    if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+	      cmds=$module_expsym_cmds
+	    else
+	      cmds=$module_cmds
+	    fi
+	  else
+	    if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+	      cmds=$archive_expsym_cmds
+	    else
+	      cmds=$archive_cmds
+	    fi
+	  fi
+	fi
+
+	if test -n "$delfiles"; then
+	  # Append the command to remove temporary files to $cmds.
+	  eval cmds=\"\$cmds~\$RM $delfiles\"
+	fi
+
+	# Add any objects from preloaded convenience libraries
+	if test -n "$dlprefiles"; then
+	  gentop=$output_objdir/${outputname}x
+	  func_append generated " $gentop"
+
+	  func_extract_archives $gentop $dlprefiles
+	  func_append libobjs " $func_extract_archives_result"
+	  test "X$libobjs" = "X " && libobjs=
+	fi
+
+	save_ifs=$IFS; IFS='~'
+	for cmd in $cmds; do
+	  IFS=$sp$nl
+	  eval cmd=\"$cmd\"
+	  IFS=$save_ifs
+	  $opt_quiet || {
+	    func_quote_for_expand "$cmd"
+	    eval "func_echo $func_quote_for_expand_result"
+	  }
+	  $opt_dry_run || eval "$cmd" || {
+	    lt_exit=$?
+
+	    # Restore the uninstalled library and exit
+	    if test relink = "$opt_mode"; then
+	      ( cd "$output_objdir" && \
+	        $RM "${realname}T" && \
+		$MV "${realname}U" "$realname" )
+	    fi
+
+	    exit $lt_exit
+	  }
+	done
+	IFS=$save_ifs
+
+	# Restore the uninstalled library and exit
+	if test relink = "$opt_mode"; then
+	  $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $?
+
+	  if test -n "$convenience"; then
+	    if test -z "$whole_archive_flag_spec"; then
+	      func_show_eval '${RM}r "$gentop"'
+	    fi
+	  fi
+
+	  exit $EXIT_SUCCESS
+	fi
+
+	# Create links to the real library.
+	for linkname in $linknames; do
+	  if test "$realname" != "$linkname"; then
+	    func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?'
+	  fi
+	done
+
+	# If -module or -export-dynamic was specified, set the dlname.
+	if test yes = "$module" || test yes = "$export_dynamic"; then
+	  # On all known operating systems, these are identical.
+	  dlname=$soname
+	fi
+      fi
+      ;;
+
+    obj)
+      if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then
+	func_warning "'-dlopen' is ignored for objects"
+      fi
+
+      case " $deplibs" in
+      *\ -l* | *\ -L*)
+	func_warning "'-l' and '-L' are ignored for objects" ;;
+      esac
+
+      test -n "$rpath" && \
+	func_warning "'-rpath' is ignored for objects"
+
+      test -n "$xrpath" && \
+	func_warning "'-R' is ignored for objects"
+
+      test -n "$vinfo" && \
+	func_warning "'-version-info' is ignored for objects"
+
+      test -n "$release" && \
+	func_warning "'-release' is ignored for objects"
+
+      case $output in
+      *.lo)
+	test -n "$objs$old_deplibs" && \
+	  func_fatal_error "cannot build library object '$output' from non-libtool objects"
+
+	libobj=$output
+	func_lo2o "$libobj"
+	obj=$func_lo2o_result
+	;;
+      *)
+	libobj=
+	obj=$output
+	;;
+      esac
+
+      # Delete the old objects.
+      $opt_dry_run || $RM $obj $libobj
+
+      # Objects from convenience libraries.  This assumes
+      # single-version convenience libraries.  Whenever we create
+      # different ones for PIC/non-PIC, this we'll have to duplicate
+      # the extraction.
+      reload_conv_objs=
+      gentop=
+      # if reload_cmds runs $LD directly, get rid of -Wl from
+      # whole_archive_flag_spec and hope we can get by with turning comma
+      # into space.
+      case $reload_cmds in
+        *\$LD[\ \$]*) wl= ;;
+      esac
+      if test -n "$convenience"; then
+	if test -n "$whole_archive_flag_spec"; then
+	  eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\"
+	  test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'`
+	  reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags
+	else
+	  gentop=$output_objdir/${obj}x
+	  func_append generated " $gentop"
+
+	  func_extract_archives $gentop $convenience
+	  reload_conv_objs="$reload_objs $func_extract_archives_result"
+	fi
+      fi
+
+      # If we're not building shared, we need to use non_pic_objs
+      test yes = "$build_libtool_libs" || libobjs=$non_pic_objects
+
+      # Create the old-style object.
+      reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs
+
+      output=$obj
+      func_execute_cmds "$reload_cmds" 'exit $?'
+
+      # Exit if we aren't doing a library object file.
+      if test -z "$libobj"; then
+	if test -n "$gentop"; then
+	  func_show_eval '${RM}r "$gentop"'
+	fi
+
+	exit $EXIT_SUCCESS
+      fi
+
+      test yes = "$build_libtool_libs" || {
+	if test -n "$gentop"; then
+	  func_show_eval '${RM}r "$gentop"'
+	fi
+
+	# Create an invalid libtool object if no PIC, so that we don't
+	# accidentally link it into a program.
+	# $show "echo timestamp > $libobj"
+	# $opt_dry_run || eval "echo timestamp > $libobj" || exit $?
+	exit $EXIT_SUCCESS
+      }
+
+      if test -n "$pic_flag" || test default != "$pic_mode"; then
+	# Only do commands if we really have different PIC objects.
+	reload_objs="$libobjs $reload_conv_objs"
+	output=$libobj
+	func_execute_cmds "$reload_cmds" 'exit $?'
+      fi
+
+      if test -n "$gentop"; then
+	func_show_eval '${RM}r "$gentop"'
+      fi
+
+      exit $EXIT_SUCCESS
+      ;;
+
+    prog)
+      case $host in
+	*cygwin*) func_stripname '' '.exe' "$output"
+	          output=$func_stripname_result.exe;;
+      esac
+      test -n "$vinfo" && \
+	func_warning "'-version-info' is ignored for programs"
+
+      test -n "$release" && \
+	func_warning "'-release' is ignored for programs"
+
+      $preload \
+	&& test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \
+	&& func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support."
+
+      case $host in
+      *-*-rhapsody* | *-*-darwin1.[012])
+	# On Rhapsody replace the C library is the System framework
+	compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'`
+	finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'`
+	;;
+      esac
+
+      case $host in
+      *-*-darwin*)
+	# Don't allow lazy linking, it breaks C++ global constructors
+	# But is supposedly fixed on 10.4 or later (yay!).
+	if test CXX = "$tagname"; then
+	  case ${MACOSX_DEPLOYMENT_TARGET-10.0} in
+	    10.[0123])
+	      func_append compile_command " $wl-bind_at_load"
+	      func_append finalize_command " $wl-bind_at_load"
+	    ;;
+	  esac
+	fi
+	# Time to change all our "foo.ltframework" stuff back to "-framework foo"
+	compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	;;
+      esac
+
+
+      # move library search paths that coincide with paths to not yet
+      # installed libraries to the beginning of the library search list
+      new_libs=
+      for path in $notinst_path; do
+	case " $new_libs " in
+	*" -L$path/$objdir "*) ;;
+	*)
+	  case " $compile_deplibs " in
+	  *" -L$path/$objdir "*)
+	    func_append new_libs " -L$path/$objdir" ;;
+	  esac
+	  ;;
+	esac
+      done
+      for deplib in $compile_deplibs; do
+	case $deplib in
+	-L*)
+	  case " $new_libs " in
+	  *" $deplib "*) ;;
+	  *) func_append new_libs " $deplib" ;;
+	  esac
+	  ;;
+	*) func_append new_libs " $deplib" ;;
+	esac
+      done
+      compile_deplibs=$new_libs
+
+
+      func_append compile_command " $compile_deplibs"
+      func_append finalize_command " $finalize_deplibs"
+
+      if test -n "$rpath$xrpath"; then
+	# If the user specified any rpath flags, then add them.
+	for libdir in $rpath $xrpath; do
+	  # This is the magic to use -rpath.
+	  case "$finalize_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append finalize_rpath " $libdir" ;;
+	  esac
+	done
+      fi
+
+      # Now hardcode the library paths
+      rpath=
+      hardcode_libdirs=
+      for libdir in $compile_rpath $finalize_rpath; do
+	if test -n "$hardcode_libdir_flag_spec"; then
+	  if test -n "$hardcode_libdir_separator"; then
+	    if test -z "$hardcode_libdirs"; then
+	      hardcode_libdirs=$libdir
+	    else
+	      # Just accumulate the unique libdirs.
+	      case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+	      *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+		;;
+	      *)
+		func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+		;;
+	      esac
+	    fi
+	  else
+	    eval flag=\"$hardcode_libdir_flag_spec\"
+	    func_append rpath " $flag"
+	  fi
+	elif test -n "$runpath_var"; then
+	  case "$perm_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append perm_rpath " $libdir" ;;
+	  esac
+	fi
+	case $host in
+	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+	  testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'`
+	  case :$dllsearchpath: in
+	  *":$libdir:"*) ;;
+	  ::) dllsearchpath=$libdir;;
+	  *) func_append dllsearchpath ":$libdir";;
+	  esac
+	  case :$dllsearchpath: in
+	  *":$testbindir:"*) ;;
+	  ::) dllsearchpath=$testbindir;;
+	  *) func_append dllsearchpath ":$testbindir";;
+	  esac
+	  ;;
+	esac
+      done
+      # Substitute the hardcoded libdirs into the rpath.
+      if test -n "$hardcode_libdir_separator" &&
+	 test -n "$hardcode_libdirs"; then
+	libdir=$hardcode_libdirs
+	eval rpath=\" $hardcode_libdir_flag_spec\"
+      fi
+      compile_rpath=$rpath
+
+      rpath=
+      hardcode_libdirs=
+      for libdir in $finalize_rpath; do
+	if test -n "$hardcode_libdir_flag_spec"; then
+	  if test -n "$hardcode_libdir_separator"; then
+	    if test -z "$hardcode_libdirs"; then
+	      hardcode_libdirs=$libdir
+	    else
+	      # Just accumulate the unique libdirs.
+	      case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+	      *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+		;;
+	      *)
+		func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+		;;
+	      esac
+	    fi
+	  else
+	    eval flag=\"$hardcode_libdir_flag_spec\"
+	    func_append rpath " $flag"
+	  fi
+	elif test -n "$runpath_var"; then
+	  case "$finalize_perm_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append finalize_perm_rpath " $libdir" ;;
+	  esac
+	fi
+      done
+      # Substitute the hardcoded libdirs into the rpath.
+      if test -n "$hardcode_libdir_separator" &&
+	 test -n "$hardcode_libdirs"; then
+	libdir=$hardcode_libdirs
+	eval rpath=\" $hardcode_libdir_flag_spec\"
+      fi
+      finalize_rpath=$rpath
+
+      if test -n "$libobjs" && test yes = "$build_old_libs"; then
+	# Transform all the library objects into standard objects.
+	compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
+	finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
+      fi
+
+      func_generate_dlsyms "$outputname" "@PROGRAM@" false
+
+      # template prelinking step
+      if test -n "$prelink_cmds"; then
+	func_execute_cmds "$prelink_cmds" 'exit $?'
+      fi
+
+      wrappers_required=:
+      case $host in
+      *cegcc* | *mingw32ce*)
+        # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway.
+        wrappers_required=false
+        ;;
+      *cygwin* | *mingw* )
+        test yes = "$build_libtool_libs" || wrappers_required=false
+        ;;
+      *)
+        if test no = "$need_relink" || test yes != "$build_libtool_libs"; then
+          wrappers_required=false
+        fi
+        ;;
+      esac
+      $wrappers_required || {
+	# Replace the output file specification.
+	compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
+	link_command=$compile_command$compile_rpath
+
+	# We have no uninstalled library dependencies, so finalize right now.
+	exit_status=0
+	func_show_eval "$link_command" 'exit_status=$?'
+
+	if test -n "$postlink_cmds"; then
+	  func_to_tool_file "$output"
+	  postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+	  func_execute_cmds "$postlink_cmds" 'exit $?'
+	fi
+
+	# Delete the generated files.
+	if test -f "$output_objdir/${outputname}S.$objext"; then
+	  func_show_eval '$RM "$output_objdir/${outputname}S.$objext"'
+	fi
+
+	exit $exit_status
+      }
+
+      if test -n "$compile_shlibpath$finalize_shlibpath"; then
+	compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
+      fi
+      if test -n "$finalize_shlibpath"; then
+	finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
+      fi
+
+      compile_var=
+      finalize_var=
+      if test -n "$runpath_var"; then
+	if test -n "$perm_rpath"; then
+	  # We should set the runpath_var.
+	  rpath=
+	  for dir in $perm_rpath; do
+	    func_append rpath "$dir:"
+	  done
+	  compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
+	fi
+	if test -n "$finalize_perm_rpath"; then
+	  # We should set the runpath_var.
+	  rpath=
+	  for dir in $finalize_perm_rpath; do
+	    func_append rpath "$dir:"
+	  done
+	  finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
+	fi
+      fi
+
+      if test yes = "$no_install"; then
+	# We don't need to create a wrapper script.
+	link_command=$compile_var$compile_command$compile_rpath
+	# Replace the output file specification.
+	link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
+	# Delete the old output file.
+	$opt_dry_run || $RM $output
+	# Link the executable and exit
+	func_show_eval "$link_command" 'exit $?'
+
+	if test -n "$postlink_cmds"; then
+	  func_to_tool_file "$output"
+	  postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+	  func_execute_cmds "$postlink_cmds" 'exit $?'
+	fi
+
+	exit $EXIT_SUCCESS
+      fi
+
+      case $hardcode_action,$fast_install in
+        relink,*)
+	  # Fast installation is not supported
+	  link_command=$compile_var$compile_command$compile_rpath
+	  relink_command=$finalize_var$finalize_command$finalize_rpath
+
+	  func_warning "this platform does not like uninstalled shared libraries"
+	  func_warning "'$output' will be relinked during installation"
+	  ;;
+        *,yes)
+	  link_command=$finalize_var$compile_command$finalize_rpath
+	  relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'`
+          ;;
+	*,no)
+	  link_command=$compile_var$compile_command$compile_rpath
+	  relink_command=$finalize_var$finalize_command$finalize_rpath
+          ;;
+	*,needless)
+	  link_command=$finalize_var$compile_command$finalize_rpath
+	  relink_command=
+          ;;
+      esac
+
+      # Replace the output file specification.
+      link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
+
+      # Delete the old output files.
+      $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname
+
+      func_show_eval "$link_command" 'exit $?'
+
+      if test -n "$postlink_cmds"; then
+	func_to_tool_file "$output_objdir/$outputname"
+	postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+	func_execute_cmds "$postlink_cmds" 'exit $?'
+      fi
+
+      # Now create the wrapper script.
+      func_verbose "creating $output"
+
+      # Quote the relink command for shipping.
+      if test -n "$relink_command"; then
+	# Preserve any variables that may affect compiler behavior
+	for var in $variables_saved_for_relink; do
+	  if eval test -z \"\${$var+set}\"; then
+	    relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+	  elif eval var_value=\$$var; test -z "$var_value"; then
+	    relink_command="$var=; export $var; $relink_command"
+	  else
+	    func_quote_for_eval "$var_value"
+	    relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+	  fi
+	done
+	relink_command="(cd `pwd`; $relink_command)"
+	relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
+      fi
+
+      # Only actually do things if not in dry run mode.
+      $opt_dry_run || {
+	# win32 will think the script is a binary if it has
+	# a .exe suffix, so we strip it off here.
+	case $output in
+	  *.exe) func_stripname '' '.exe' "$output"
+	         output=$func_stripname_result ;;
+	esac
+	# test for cygwin because mv fails w/o .exe extensions
+	case $host in
+	  *cygwin*)
+	    exeext=.exe
+	    func_stripname '' '.exe' "$outputname"
+	    outputname=$func_stripname_result ;;
+	  *) exeext= ;;
+	esac
+	case $host in
+	  *cygwin* | *mingw* )
+	    func_dirname_and_basename "$output" "" "."
+	    output_name=$func_basename_result
+	    output_path=$func_dirname_result
+	    cwrappersource=$output_path/$objdir/lt-$output_name.c
+	    cwrapper=$output_path/$output_name.exe
+	    $RM $cwrappersource $cwrapper
+	    trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
+
+	    func_emit_cwrapperexe_src > $cwrappersource
+
+	    # The wrapper executable is built using the $host compiler,
+	    # because it contains $host paths and files. If cross-
+	    # compiling, it, like the target executable, must be
+	    # executed on the $host or under an emulation environment.
+	    $opt_dry_run || {
+	      $LTCC $LTCFLAGS -o $cwrapper $cwrappersource
+	      $STRIP $cwrapper
+	    }
+
+	    # Now, create the wrapper script for func_source use:
+	    func_ltwrapper_scriptname $cwrapper
+	    $RM $func_ltwrapper_scriptname_result
+	    trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15
+	    $opt_dry_run || {
+	      # note: this script will not be executed, so do not chmod.
+	      if test "x$build" = "x$host"; then
+		$cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result
+	      else
+		func_emit_wrapper no > $func_ltwrapper_scriptname_result
+	      fi
+	    }
+	  ;;
+	  * )
+	    $RM $output
+	    trap "$RM $output; exit $EXIT_FAILURE" 1 2 15
+
+	    func_emit_wrapper no > $output
+	    chmod +x $output
+	  ;;
+	esac
+      }
+      exit $EXIT_SUCCESS
+      ;;
+    esac
+
+    # See if we need to build an old-fashioned archive.
+    for oldlib in $oldlibs; do
+
+      case $build_libtool_libs in
+        convenience)
+	  oldobjs="$libobjs_save $symfileobj"
+	  addlibs=$convenience
+	  build_libtool_libs=no
+	  ;;
+	module)
+	  oldobjs=$libobjs_save
+	  addlibs=$old_convenience
+	  build_libtool_libs=no
+          ;;
+	*)
+	  oldobjs="$old_deplibs $non_pic_objects"
+	  $preload && test -f "$symfileobj" \
+	    && func_append oldobjs " $symfileobj"
+	  addlibs=$old_convenience
+	  ;;
+      esac
+
+      if test -n "$addlibs"; then
+	gentop=$output_objdir/${outputname}x
+	func_append generated " $gentop"
+
+	func_extract_archives $gentop $addlibs
+	func_append oldobjs " $func_extract_archives_result"
+      fi
+
+      # Do each command in the archive commands.
+      if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then
+	cmds=$old_archive_from_new_cmds
+      else
+
+	# Add any objects from preloaded convenience libraries
+	if test -n "$dlprefiles"; then
+	  gentop=$output_objdir/${outputname}x
+	  func_append generated " $gentop"
+
+	  func_extract_archives $gentop $dlprefiles
+	  func_append oldobjs " $func_extract_archives_result"
+	fi
+
+	# POSIX demands no paths to be encoded in archives.  We have
+	# to avoid creating archives with duplicate basenames if we
+	# might have to extract them afterwards, e.g., when creating a
+	# static archive out of a convenience library, or when linking
+	# the entirety of a libtool archive into another (currently
+	# not supported by libtool).
+	if (for obj in $oldobjs
+	    do
+	      func_basename "$obj"
+	      $ECHO "$func_basename_result"
+	    done | sort | sort -uc >/dev/null 2>&1); then
+	  :
+	else
+	  echo "copying selected object files to avoid basename conflicts..."
+	  gentop=$output_objdir/${outputname}x
+	  func_append generated " $gentop"
+	  func_mkdir_p "$gentop"
+	  save_oldobjs=$oldobjs
+	  oldobjs=
+	  counter=1
+	  for obj in $save_oldobjs
+	  do
+	    func_basename "$obj"
+	    objbase=$func_basename_result
+	    case " $oldobjs " in
+	    " ") oldobjs=$obj ;;
+	    *[\ /]"$objbase "*)
+	      while :; do
+		# Make sure we don't pick an alternate name that also
+		# overlaps.
+		newobj=lt$counter-$objbase
+		func_arith $counter + 1
+		counter=$func_arith_result
+		case " $oldobjs " in
+		*[\ /]"$newobj "*) ;;
+		*) if test ! -f "$gentop/$newobj"; then break; fi ;;
+		esac
+	      done
+	      func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
+	      func_append oldobjs " $gentop/$newobj"
+	      ;;
+	    *) func_append oldobjs " $obj" ;;
+	    esac
+	  done
+	fi
+	func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+	tool_oldlib=$func_to_tool_file_result
+	eval cmds=\"$old_archive_cmds\"
+
+	func_len " $cmds"
+	len=$func_len_result
+	if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+	  cmds=$old_archive_cmds
+	elif test -n "$archiver_list_spec"; then
+	  func_verbose "using command file archive linking..."
+	  for obj in $oldobjs
+	  do
+	    func_to_tool_file "$obj"
+	    $ECHO "$func_to_tool_file_result"
+	  done > $output_objdir/$libname.libcmd
+	  func_to_tool_file "$output_objdir/$libname.libcmd"
+	  oldobjs=" $archiver_list_spec$func_to_tool_file_result"
+	  cmds=$old_archive_cmds
+	else
+	  # the command line is too long to link in one step, link in parts
+	  func_verbose "using piecewise archive linking..."
+	  save_RANLIB=$RANLIB
+	  RANLIB=:
+	  objlist=
+	  concat_cmds=
+	  save_oldobjs=$oldobjs
+	  oldobjs=
+	  # Is there a better way of finding the last object in the list?
+	  for obj in $save_oldobjs
+	  do
+	    last_oldobj=$obj
+	  done
+	  eval test_cmds=\"$old_archive_cmds\"
+	  func_len " $test_cmds"
+	  len0=$func_len_result
+	  len=$len0
+	  for obj in $save_oldobjs
+	  do
+	    func_len " $obj"
+	    func_arith $len + $func_len_result
+	    len=$func_arith_result
+	    func_append objlist " $obj"
+	    if test "$len" -lt "$max_cmd_len"; then
+	      :
+	    else
+	      # the above command should be used before it gets too long
+	      oldobjs=$objlist
+	      if test "$obj" = "$last_oldobj"; then
+		RANLIB=$save_RANLIB
+	      fi
+	      test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+	      eval concat_cmds=\"\$concat_cmds$old_archive_cmds\"
+	      objlist=
+	      len=$len0
+	    fi
+	  done
+	  RANLIB=$save_RANLIB
+	  oldobjs=$objlist
+	  if test -z "$oldobjs"; then
+	    eval cmds=\"\$concat_cmds\"
+	  else
+	    eval cmds=\"\$concat_cmds~\$old_archive_cmds\"
+	  fi
+	fi
+      fi
+      func_execute_cmds "$cmds" 'exit $?'
+    done
+
+    test -n "$generated" && \
+      func_show_eval "${RM}r$generated"
+
+    # Now create the libtool archive.
+    case $output in
+    *.la)
+      old_library=
+      test yes = "$build_old_libs" && old_library=$libname.$libext
+      func_verbose "creating $output"
+
+      # Preserve any variables that may affect compiler behavior
+      for var in $variables_saved_for_relink; do
+	if eval test -z \"\${$var+set}\"; then
+	  relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+	elif eval var_value=\$$var; test -z "$var_value"; then
+	  relink_command="$var=; export $var; $relink_command"
+	else
+	  func_quote_for_eval "$var_value"
+	  relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+	fi
+      done
+      # Quote the link command for shipping.
+      relink_command="(cd `pwd`; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
+      relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
+      if test yes = "$hardcode_automatic"; then
+	relink_command=
+      fi
+
+      # Only create the output if not a dry run.
+      $opt_dry_run || {
+	for installed in no yes; do
+	  if test yes = "$installed"; then
+	    if test -z "$install_libdir"; then
+	      break
+	    fi
+	    output=$output_objdir/${outputname}i
+	    # Replace all uninstalled libtool libraries with the installed ones
+	    newdependency_libs=
+	    for deplib in $dependency_libs; do
+	      case $deplib in
+	      *.la)
+		func_basename "$deplib"
+		name=$func_basename_result
+		func_resolve_sysroot "$deplib"
+		eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result`
+		test -z "$libdir" && \
+		  func_fatal_error "'$deplib' is not a valid libtool archive"
+		func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name"
+		;;
+	      -L*)
+		func_stripname -L '' "$deplib"
+		func_replace_sysroot "$func_stripname_result"
+		func_append newdependency_libs " -L$func_replace_sysroot_result"
+		;;
+	      -R*)
+		func_stripname -R '' "$deplib"
+		func_replace_sysroot "$func_stripname_result"
+		func_append newdependency_libs " -R$func_replace_sysroot_result"
+		;;
+	      *) func_append newdependency_libs " $deplib" ;;
+	      esac
+	    done
+	    dependency_libs=$newdependency_libs
+	    newdlfiles=
+
+	    for lib in $dlfiles; do
+	      case $lib in
+	      *.la)
+	        func_basename "$lib"
+		name=$func_basename_result
+		eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+		test -z "$libdir" && \
+		  func_fatal_error "'$lib' is not a valid libtool archive"
+		func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name"
+		;;
+	      *) func_append newdlfiles " $lib" ;;
+	      esac
+	    done
+	    dlfiles=$newdlfiles
+	    newdlprefiles=
+	    for lib in $dlprefiles; do
+	      case $lib in
+	      *.la)
+		# Only pass preopened files to the pseudo-archive (for
+		# eventual linking with the app. that links it) if we
+		# didn't already link the preopened objects directly into
+		# the library:
+		func_basename "$lib"
+		name=$func_basename_result
+		eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+		test -z "$libdir" && \
+		  func_fatal_error "'$lib' is not a valid libtool archive"
+		func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name"
+		;;
+	      esac
+	    done
+	    dlprefiles=$newdlprefiles
+	  else
+	    newdlfiles=
+	    for lib in $dlfiles; do
+	      case $lib in
+		[\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;;
+		*) abs=`pwd`"/$lib" ;;
+	      esac
+	      func_append newdlfiles " $abs"
+	    done
+	    dlfiles=$newdlfiles
+	    newdlprefiles=
+	    for lib in $dlprefiles; do
+	      case $lib in
+		[\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;;
+		*) abs=`pwd`"/$lib" ;;
+	      esac
+	      func_append newdlprefiles " $abs"
+	    done
+	    dlprefiles=$newdlprefiles
+	  fi
+	  $RM $output
+	  # place dlname in correct position for cygwin
+	  # In fact, it would be nice if we could use this code for all target
+	  # systems that can't hard-code library paths into their executables
+	  # and that have no shared library path variable independent of PATH,
+	  # but it turns out we can't easily determine that from inspecting
+	  # libtool variables, so we have to hard-code the OSs to which it
+	  # applies here; at the moment, that means platforms that use the PE
+	  # object format with DLL files.  See the long comment at the top of
+	  # tests/bindir.at for full details.
+	  tdlname=$dlname
+	  case $host,$output,$installed,$module,$dlname in
+	    *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll)
+	      # If a -bindir argument was supplied, place the dll there.
+	      if test -n "$bindir"; then
+		func_relative_path "$install_libdir" "$bindir"
+		tdlname=$func_relative_path_result/$dlname
+	      else
+		# Otherwise fall back on heuristic.
+		tdlname=../bin/$dlname
+	      fi
+	      ;;
+	  esac
+	  $ECHO > $output "\
+# $outputname - a libtool library file
+# Generated by $PROGRAM (GNU $PACKAGE) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='$tdlname'
+
+# Names of this library.
+library_names='$library_names'
+
+# The name of the static archive.
+old_library='$old_library'
+
+# Linker flags that cannot go in dependency_libs.
+inherited_linker_flags='$new_inherited_linker_flags'
+
+# Libraries that this one depends upon.
+dependency_libs='$dependency_libs'
+
+# Names of additional weak libraries provided by this library
+weak_library_names='$weak_libs'
+
+# Version information for $libname.
+current=$current
+age=$age
+revision=$revision
+
+# Is this an already installed library?
+installed=$installed
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=$module
+
+# Files to dlopen/dlpreopen
+dlopen='$dlfiles'
+dlpreopen='$dlprefiles'
+
+# Directory that this library needs to be installed in:
+libdir='$install_libdir'"
+	  if test no,yes = "$installed,$need_relink"; then
+	    $ECHO >> $output "\
+relink_command=\"$relink_command\""
+	  fi
+	done
+      }
+
+      # Do a symbolic link so that the libtool archive can be found in
+      # LD_LIBRARY_PATH before the program is installed.
+      func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?'
+      ;;
+    esac
+    exit $EXIT_SUCCESS
+}
+
+if test link = "$opt_mode" || test relink = "$opt_mode"; then
+  func_mode_link ${1+"$@"}
+fi
+
+
+# func_mode_uninstall arg...
+func_mode_uninstall ()
+{
+    $debug_cmd
+
+    RM=$nonopt
+    files=
+    rmforce=false
+    exit_status=0
+
+    # This variable tells wrapper scripts just to set variables rather
+    # than running their programs.
+    libtool_install_magic=$magic
+
+    for arg
+    do
+      case $arg in
+      -f) func_append RM " $arg"; rmforce=: ;;
+      -*) func_append RM " $arg" ;;
+      *) func_append files " $arg" ;;
+      esac
+    done
+
+    test -z "$RM" && \
+      func_fatal_help "you must specify an RM program"
+
+    rmdirs=
+
+    for file in $files; do
+      func_dirname "$file" "" "."
+      dir=$func_dirname_result
+      if test . = "$dir"; then
+	odir=$objdir
+      else
+	odir=$dir/$objdir
+      fi
+      func_basename "$file"
+      name=$func_basename_result
+      test uninstall = "$opt_mode" && odir=$dir
+
+      # Remember odir for removal later, being careful to avoid duplicates
+      if test clean = "$opt_mode"; then
+	case " $rmdirs " in
+	  *" $odir "*) ;;
+	  *) func_append rmdirs " $odir" ;;
+	esac
+      fi
+
+      # Don't error if the file doesn't exist and rm -f was used.
+      if { test -L "$file"; } >/dev/null 2>&1 ||
+	 { test -h "$file"; } >/dev/null 2>&1 ||
+	 test -f "$file"; then
+	:
+      elif test -d "$file"; then
+	exit_status=1
+	continue
+      elif $rmforce; then
+	continue
+      fi
+
+      rmfiles=$file
+
+      case $name in
+      *.la)
+	# Possibly a libtool archive, so verify it.
+	if func_lalib_p "$file"; then
+	  func_source $dir/$name
+
+	  # Delete the libtool libraries and symlinks.
+	  for n in $library_names; do
+	    func_append rmfiles " $odir/$n"
+	  done
+	  test -n "$old_library" && func_append rmfiles " $odir/$old_library"
+
+	  case $opt_mode in
+	  clean)
+	    case " $library_names " in
+	    *" $dlname "*) ;;
+	    *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;;
+	    esac
+	    test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i"
+	    ;;
+	  uninstall)
+	    if test -n "$library_names"; then
+	      # Do each command in the postuninstall commands.
+	      func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1'
+	    fi
+
+	    if test -n "$old_library"; then
+	      # Do each command in the old_postuninstall commands.
+	      func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1'
+	    fi
+	    # FIXME: should reinstall the best remaining shared library.
+	    ;;
+	  esac
+	fi
+	;;
+
+      *.lo)
+	# Possibly a libtool object, so verify it.
+	if func_lalib_p "$file"; then
+
+	  # Read the .lo file
+	  func_source $dir/$name
+
+	  # Add PIC object to the list of files to remove.
+	  if test -n "$pic_object" && test none != "$pic_object"; then
+	    func_append rmfiles " $dir/$pic_object"
+	  fi
+
+	  # Add non-PIC object to the list of files to remove.
+	  if test -n "$non_pic_object" && test none != "$non_pic_object"; then
+	    func_append rmfiles " $dir/$non_pic_object"
+	  fi
+	fi
+	;;
+
+      *)
+	if test clean = "$opt_mode"; then
+	  noexename=$name
+	  case $file in
+	  *.exe)
+	    func_stripname '' '.exe' "$file"
+	    file=$func_stripname_result
+	    func_stripname '' '.exe' "$name"
+	    noexename=$func_stripname_result
+	    # $file with .exe has already been added to rmfiles,
+	    # add $file without .exe
+	    func_append rmfiles " $file"
+	    ;;
+	  esac
+	  # Do a test to see if this is a libtool program.
+	  if func_ltwrapper_p "$file"; then
+	    if func_ltwrapper_executable_p "$file"; then
+	      func_ltwrapper_scriptname "$file"
+	      relink_command=
+	      func_source $func_ltwrapper_scriptname_result
+	      func_append rmfiles " $func_ltwrapper_scriptname_result"
+	    else
+	      relink_command=
+	      func_source $dir/$noexename
+	    fi
+
+	    # note $name still contains .exe if it was in $file originally
+	    # as does the version of $file that was added into $rmfiles
+	    func_append rmfiles " $odir/$name $odir/${name}S.$objext"
+	    if test yes = "$fast_install" && test -n "$relink_command"; then
+	      func_append rmfiles " $odir/lt-$name"
+	    fi
+	    if test "X$noexename" != "X$name"; then
+	      func_append rmfiles " $odir/lt-$noexename.c"
+	    fi
+	  fi
+	fi
+	;;
+      esac
+      func_show_eval "$RM $rmfiles" 'exit_status=1'
+    done
+
+    # Try to remove the $objdir's in the directories where we deleted files
+    for dir in $rmdirs; do
+      if test -d "$dir"; then
+	func_show_eval "rmdir $dir >/dev/null 2>&1"
+      fi
+    done
+
+    exit $exit_status
+}
+
+if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then
+  func_mode_uninstall ${1+"$@"}
+fi
+
+test -z "$opt_mode" && {
+  help=$generic_help
+  func_fatal_help "you must specify a MODE"
+}
+
+test -z "$exec_cmd" && \
+  func_fatal_help "invalid operation mode '$opt_mode'"
+
+if test -n "$exec_cmd"; then
+  eval exec "$exec_cmd"
+  exit $EXIT_FAILURE
+fi
+
+exit $exit_status
+
+
+# The TAGs below are defined such that we never get into a situation
+# where we disable both kinds of libraries.  Given conflicting
+# choices, we go for a static library, that is the most portable,
+# since we can't tell whether shared libraries were disabled because
+# the user asked for that or because the platform doesn't support
+# them.  This is particularly important on AIX, because we don't
+# support having both static and shared libraries enabled at the same
+# time on that platform, so we default to a shared-only configuration.
+# If a disable-shared tag is given, we'll fallback to a static-only
+# configuration.  But we'll never go from static-only to shared-only.
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
+build_libtool_libs=no
+build_old_libs=yes
+# ### END LIBTOOL TAG CONFIG: disable-shared
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-static
+build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac`
+# ### END LIBTOOL TAG CONFIG: disable-static
+
+# Local Variables:
+# mode:shell-script
+# sh-indentation:2
+# End:
Index: applgrid/tags/applgrid-01-06-36/README
===================================================================
--- applgrid/tags/applgrid-01-06-36/README	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/README	(revision 2010)
@@ -0,0 +1,252 @@
+APPLgrid 1.6.15
+---------------
+
+Report bugs to sutt@cern.ch
+
+Contents
+--------
+
+1. Getting started - installation
+2. Using the code
+3. Useful utilities
+4. Renormalization and Factorization Scales
+5. Generating your own grids
+6  Combining grids
+
+
+
+1. Getting started - installation
+---------------------------------
+
+To install ...
+
+ ./configure
+ make clean
+ make
+ make install
+
+These are standard targets in the usual way, so the 
+library and headers will be installed in the usual place 
+(ie change location with 
+
+ ./configure --prefix=<my_installation_path>
+ 
+if you want it somewhere else.
+
+You can use option 
+
+ --without-root
+
+to compile without support for the cern "root" histograming 
+package
+
+NB: if you use grid files ending in the ".root" extension it
+is assumed that the files are root files.
+
+By convention, the applgrid native file format uses extension
+.appl, although the user is essentially free to chose their own 
+exension, as long as it is not ".root"
+
+2. Using the code
+-----------------
+
+In APPLgrid, each cross section has its own grid, stored in it's 
+own grid file. For the convolution, you call the grid constructor, 
+specifying which file to use. 
+
+For instance, a set of jet measurements with 5 different differential 
+cross sections each corresponding to a different rapidity interval, 
+will have 5 separate grids - one for each rapidity interval.
+
+After installing the APPLgrid code itself, everything needed for 
+reproducing a particular cross section is encapsulated in the grid 
+itself - there is no need to download any additional code specific 
+to that cross section.
+
+As a result, since all information is encoded within the APPLgrid grids
+ themselves it is simply enough to link against the APPLgrid code and 
+call the convolute method for the appropriate grid,
+
+      // read the grid
+      appl::grid grid_eta1("atlas-incljets06-eta1.appl");
+      
+      // perform the convolution
+      std::vector< double > xsec_eta1 = grid_eta1.vconvolute( evolvepdf_, alphasqcd_ );
+    
+
+The convolution will use whichever PDF and alphas routines are passed 
+into it. 
+
+If you have multiple PDFs set up, no additional code modification 
+is required. For multiple grids, simply create new instances, specifying 
+the appropriate grid file in the constructor. For example
+
+      /// load the grids
+      appl::grid grid_eta1("atlas-incljets06-eta1.appl");
+      appl::grid grid_eta2("atlas-incljets06-eta2.appl");
+      
+      /// perform the convolutions
+      std::vector< double > xsec_eta1 = grid_eta1.vconvolute( evolvepdf_, alphasqcd_ );
+      std::vector< double > xsec_eta2 = grid_eta2.vconvolute( evolvepdf_, alphasqcd_ );
+    
+
+These latest downloadable grids require no additional scaling and in 
+addition, include the non-perturbative (or additional) bin-by-bin corrections 
+discussed in the relevant papers.
+
+By default, the convolution will return just the NLO cross section without 
+application of the additional bin-by-bin. In order to obtain the complete cross 
+section including the bin-by-bin corrections the class method, eg. for grid 
+grid_eta1 then
+
+  grid_eta1.setApplyCorrections(true);
+
+should be called before performing the convolution. All subsequent convolutions 
+will also apply the corrections. To disable this for grid grid_eta1 call
+
+  grid_eta1.setApplyCorrections(false);
+
+and subsequent convolutions will not apply the corrections. 
+
+
+
+3. Useful utilities
+-------------------
+There is the useful
+
+      applgrid-config
+
+utility which provides locations of the installation directory, useful include 
+and linking flags etc, eg
+
+      % applgrid-config --help
+      applgrid-config: configuration tool for the APPLgrid
+      fast cross section convolution code
+      http://projects.hepforge.org/applgrid/
+      
+      Usage: applgrid-config [[--help|-h] | [--prefix] | [...]]
+      Options:
+      --help | -h    : this help
+      
+      --prefix       : installation prefix (cf. autoconf)
+      --incdir       : path to the APPLgrid header directory
+      --libdir       : path to the APPLgrid library directory
+      --cxxflags     : compiler flags for the C preprocessor
+      --ldflags      : compiler flags for the linker just for c code
+      --ldfflags     : compiler flags for the linker including the fortan interface
+      --share        : path to APPLgrid pdf conbination config files
+
+      --version      : release version number
+      
+    
+
+For general compilation and linkage with the users own code, the header and 
+library paths can be specifed to the compiler and linker using the 
+
+  applgrid-config 
+
+utility as follows
+
+      % applgrid-config --ldflags
+      -L/usr/local/lib -lAPPLgrid ...  -L/user/local/lib -lhoppet_
+      
+      % applgrid-config --cxxflags
+      -I/usr/local/include -pthread -m64 -I/cern/root/include
+    
+In addition there is 
+
+  applgrid-convert
+
+which can convert bewteen "root" and "appl" grid file formats.
+
+4. Renormalization and Factorization Scales
+-------------------------------------------
+For grids cross sections up to NLO, all cross sections can be computed with and 
+different scale factors for the renormalisation scales, ie muR, 2muR, 4muR etc. 
+
+Any, arbitrary numerical scaling factor can be specified by the user, although 
+changes to use a different scale, ie pT, rather than \hat{s} etc are not 
+currently possible. 
+
+For changes to the factorisation scale, again any arbitrary numerical factor 
+can be specified by the user, although in this case, APPLgrid uses the hoppet 
+package, so this needs to be installed before APPLgrid.
+
+
+5. Generating your own grids
+----------------------------
+This is not for the faint of heart. It is important to remember that generating 
+a grid using a calculation will be no less trivial that running the calculation 
+itself. 
+
+Installing and running a typical NLO calculation is often far from trivial, so 
+while still not completely trivial, we have tried to take some of the pain out
+of installing, and it some casses, we believe it is often easier to install and 
+use the applgrid versions of the calcualtion code than the standard versions.
+
+Having said this, for any significant issues, we can be contacted viw email at 
+the applgrid web site applgrid.hepforge.org
+
+In principle, the technique for generating a grid is to run the applgrid 
+enabled calculation twice:
+
+The first time is used purely to fill the phase space so that it can be optimised.
+
+]This means that enough weights much be generated to provide a good coverage of 
+the phase space, but not as many as would be needed for a full calculation.
+
+Following this, simply run the calcualtion again, with the full statistics. At 
+the start of the second run, the grid will be read in, and the phase space optimised, 
+then filled during the calculation.
+
+Following this, the grid can be used in the fact convolution.
+
+6. Combining grids
+------------------
+Should you prefer to divide your full statistics run into many different jobs, 
+running simultaneously, the grids can be added using the 
+
+  applgrid-combine 
+
+utility, which is simple to use, with some basic instructions available ... 
+
+  % applgrid-combine --help
+  Usage: applgrid-combine [OPTIONS] -o output_grid.appl  input_grid.appl [input_grid1.appl ... input_gridN.appl]
+
+    APPLgrid 'applgrid-combine' adds APPLgrid 1.4.28 grid files together into a single grid
+
+  Configuration: 
+      -o filename   	name of output grid (filename required)
+
+  Options: 
+      -s, --scale value	scale output grid by value, 
+          --verbose 	display grid documentation during add
+      -v, --version 	displays the APPLgrid version
+      -h, --help    	display this help
+
+  See http://applgrid.hepforge.org for more details
+
+  Report bugs to <sutt@cern.ch>
+
+
+If you wich to add grids together in this way - as when adding histograms 
+together - it is importnat that the ranges, and bin limits for the internal 
+structures are the same.
+
+To ensure this, all the jobs to produce the grids that will be added, must 
+start with the *same* grid, so that the optimised grid will be identical 
+ineach case.
+
+This means the approach is 
+
+ - run the calculation *once* to fill the phase space
+ - duplicate the resulting grid
+
+ - use the copies of this grid as input for any number of 
+   full statistics runs
+
+ - add the resulting grids from the full statistics runs together 
+   using the applgrid-combine utility
+
+MS 
+   
Index: applgrid/tags/applgrid-01-06-36/config.sub
===================================================================
--- applgrid/tags/applgrid-01-06-36/config.sub	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/config.sub	(revision 2010)
@@ -0,0 +1,1807 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+#   Copyright 1992-2014 Free Software Foundation, Inc.
+
+timestamp='2014-12-03'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program.  This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+
+
+# Please send patches to <config-patches@gnu.org>.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support.  The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+#	CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+#	CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+       $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+  -h, --help         print this help, then exit
+  -t, --time-stamp   print date of last modification, then exit
+  -v, --version      print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright 1992-2014 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+  case $1 in
+    --time-stamp | --time* | -t )
+       echo "$timestamp" ; exit ;;
+    --version | -v )
+       echo "$version" ; exit ;;
+    --help | --h* | -h )
+       echo "$usage"; exit ;;
+    -- )     # Stop option processing
+       shift; break ;;
+    - )	# Use stdin as input.
+       break ;;
+    -* )
+       echo "$me: invalid option $1$help"
+       exit 1 ;;
+
+    *local*)
+       # First pass through any local machine types.
+       echo $1
+       exit ;;
+
+    * )
+       break ;;
+  esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+    exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+    exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+  nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+  linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+  knetbsd*-gnu* | netbsd*-gnu* | \
+  kopensolaris*-gnu* | \
+  storm-chaos* | os2-emx* | rtmk-nova*)
+    os=-$maybe_os
+    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+    ;;
+  android-linux)
+    os=-linux-android
+    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+    ;;
+  *)
+    basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+    if [ $basic_machine != $1 ]
+    then os=`echo $1 | sed 's/.*-/-/'`
+    else os=; fi
+    ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work.  We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+	-sun*os*)
+		# Prevent following clause from handling this invalid input.
+		;;
+	-dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+	-att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+	-unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+	-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+	-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+	-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+	-apple | -axis | -knuth | -cray | -microblaze*)
+		os=
+		basic_machine=$1
+		;;
+	-bluegene*)
+		os=-cnk
+		;;
+	-sim | -cisco | -oki | -wec | -winbond)
+		os=
+		basic_machine=$1
+		;;
+	-scout)
+		;;
+	-wrs)
+		os=-vxworks
+		basic_machine=$1
+		;;
+	-chorusos*)
+		os=-chorusos
+		basic_machine=$1
+		;;
+	-chorusrdb)
+		os=-chorusrdb
+		basic_machine=$1
+		;;
+	-hiux*)
+		os=-hiuxwe2
+		;;
+	-sco6)
+		os=-sco5v6
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco5)
+		os=-sco3.2v5
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco4)
+		os=-sco3.2v4
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco3.2.[4-9]*)
+		os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco3.2v[4-9]*)
+		# Don't forget version if it is 3.2v4 or newer.
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco5v6*)
+		# Don't forget version if it is 3.2v4 or newer.
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco*)
+		os=-sco3.2v2
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-udk*)
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-isc)
+		os=-isc2.2
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-clix*)
+		basic_machine=clipper-intergraph
+		;;
+	-isc*)
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-lynx*178)
+		os=-lynxos178
+		;;
+	-lynx*5)
+		os=-lynxos5
+		;;
+	-lynx*)
+		os=-lynxos
+		;;
+	-ptx*)
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+		;;
+	-windowsnt*)
+		os=`echo $os | sed -e 's/windowsnt/winnt/'`
+		;;
+	-psos*)
+		os=-psos
+		;;
+	-mint | -mint[0-9]*)
+		basic_machine=m68k-atari
+		os=-mint
+		;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+	# Recognize the basic CPU types without company name.
+	# Some are omitted here because they have special meanings below.
+	1750a | 580 \
+	| a29k \
+	| aarch64 | aarch64_be \
+	| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+	| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+	| am33_2.0 \
+	| arc | arceb \
+	| arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+	| avr | avr32 \
+	| be32 | be64 \
+	| bfin \
+	| c4x | c8051 | clipper \
+	| d10v | d30v | dlx | dsp16xx \
+	| epiphany \
+	| fido | fr30 | frv \
+	| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+	| hexagon \
+	| i370 | i860 | i960 | ia64 \
+	| ip2k | iq2000 \
+	| k1om \
+	| le32 | le64 \
+	| lm32 \
+	| m32c | m32r | m32rle | m68000 | m68k | m88k \
+	| maxq | mb | microblaze | microblazeel | mcore | mep | metag \
+	| mips | mipsbe | mipseb | mipsel | mipsle \
+	| mips16 \
+	| mips64 | mips64el \
+	| mips64octeon | mips64octeonel \
+	| mips64orion | mips64orionel \
+	| mips64r5900 | mips64r5900el \
+	| mips64vr | mips64vrel \
+	| mips64vr4100 | mips64vr4100el \
+	| mips64vr4300 | mips64vr4300el \
+	| mips64vr5000 | mips64vr5000el \
+	| mips64vr5900 | mips64vr5900el \
+	| mipsisa32 | mipsisa32el \
+	| mipsisa32r2 | mipsisa32r2el \
+	| mipsisa32r6 | mipsisa32r6el \
+	| mipsisa64 | mipsisa64el \
+	| mipsisa64r2 | mipsisa64r2el \
+	| mipsisa64r6 | mipsisa64r6el \
+	| mipsisa64sb1 | mipsisa64sb1el \
+	| mipsisa64sr71k | mipsisa64sr71kel \
+	| mipsr5900 | mipsr5900el \
+	| mipstx39 | mipstx39el \
+	| mn10200 | mn10300 \
+	| moxie \
+	| mt \
+	| msp430 \
+	| nds32 | nds32le | nds32be \
+	| nios | nios2 | nios2eb | nios2el \
+	| ns16k | ns32k \
+	| open8 | or1k | or1knd | or32 \
+	| pdp10 | pdp11 | pj | pjl \
+	| powerpc | powerpc64 | powerpc64le | powerpcle \
+	| pyramid \
+	| riscv32 | riscv64 \
+	| rl78 | rx \
+	| score \
+	| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+	| sh64 | sh64le \
+	| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+	| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+	| spu \
+	| tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+	| ubicom32 \
+	| v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+	| visium \
+	| we32k \
+	| x86 | xc16x | xstormy16 | xtensa \
+	| z8k | z80)
+		basic_machine=$basic_machine-unknown
+		;;
+	c54x)
+		basic_machine=tic54x-unknown
+		;;
+	c55x)
+		basic_machine=tic55x-unknown
+		;;
+	c6x)
+		basic_machine=tic6x-unknown
+		;;
+	leon|leon[3-9])
+		basic_machine=sparc-$basic_machine
+		;;
+	m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
+		basic_machine=$basic_machine-unknown
+		os=-none
+		;;
+	m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+		;;
+	ms1)
+		basic_machine=mt-unknown
+		;;
+
+	strongarm | thumb | xscale)
+		basic_machine=arm-unknown
+		;;
+	xgate)
+		basic_machine=$basic_machine-unknown
+		os=-none
+		;;
+	xscaleeb)
+		basic_machine=armeb-unknown
+		;;
+
+	xscaleel)
+		basic_machine=armel-unknown
+		;;
+
+	# We use `pc' rather than `unknown'
+	# because (1) that's what they normally are, and
+	# (2) the word "unknown" tends to confuse beginning users.
+	i*86 | x86_64)
+	  basic_machine=$basic_machine-pc
+	  ;;
+	# Object if more than one company name word.
+	*-*-*)
+		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+		exit 1
+		;;
+	# Recognize the basic CPU types with company name.
+	580-* \
+	| a29k-* \
+	| aarch64-* | aarch64_be-* \
+	| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+	| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+	| alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
+	| arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
+	| avr-* | avr32-* \
+	| be32-* | be64-* \
+	| bfin-* | bs2000-* \
+	| c[123]* | c30-* | [cjt]90-* | c4x-* \
+	| c8051-* | clipper-* | craynv-* | cydra-* \
+	| d10v-* | d30v-* | dlx-* \
+	| elxsi-* \
+	| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+	| h8300-* | h8500-* \
+	| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+	| hexagon-* \
+	| i*86-* | i860-* | i960-* | ia64-* \
+	| ip2k-* | iq2000-* \
+	| k1om-* \
+	| le32-* | le64-* \
+	| lm32-* \
+	| m32c-* | m32r-* | m32rle-* \
+	| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+	| m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+	| microblaze-* | microblazeel-* \
+	| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+	| mips16-* \
+	| mips64-* | mips64el-* \
+	| mips64octeon-* | mips64octeonel-* \
+	| mips64orion-* | mips64orionel-* \
+	| mips64r5900-* | mips64r5900el-* \
+	| mips64vr-* | mips64vrel-* \
+	| mips64vr4100-* | mips64vr4100el-* \
+	| mips64vr4300-* | mips64vr4300el-* \
+	| mips64vr5000-* | mips64vr5000el-* \
+	| mips64vr5900-* | mips64vr5900el-* \
+	| mipsisa32-* | mipsisa32el-* \
+	| mipsisa32r2-* | mipsisa32r2el-* \
+	| mipsisa32r6-* | mipsisa32r6el-* \
+	| mipsisa64-* | mipsisa64el-* \
+	| mipsisa64r2-* | mipsisa64r2el-* \
+	| mipsisa64r6-* | mipsisa64r6el-* \
+	| mipsisa64sb1-* | mipsisa64sb1el-* \
+	| mipsisa64sr71k-* | mipsisa64sr71kel-* \
+	| mipsr5900-* | mipsr5900el-* \
+	| mipstx39-* | mipstx39el-* \
+	| mmix-* \
+	| mt-* \
+	| msp430-* \
+	| nds32-* | nds32le-* | nds32be-* \
+	| nios-* | nios2-* | nios2eb-* | nios2el-* \
+	| none-* | np1-* | ns16k-* | ns32k-* \
+	| open8-* \
+	| or1k*-* \
+	| orion-* \
+	| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+	| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+	| pyramid-* \
+	| rl78-* | romp-* | rs6000-* | rx-* \
+	| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+	| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+	| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+	| sparclite-* \
+	| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+	| tahoe-* \
+	| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+	| tile*-* \
+	| tron-* \
+	| ubicom32-* \
+	| v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+	| vax-* \
+	| visium-* \
+	| we32k-* \
+	| x86-* | x86_64-* | xc16x-* | xps100-* \
+	| xstormy16-* | xtensa*-* \
+	| ymp-* \
+	| z8k-* | z80-*)
+		;;
+	# Recognize the basic CPU types without company name, with glob match.
+	xtensa*)
+		basic_machine=$basic_machine-unknown
+		;;
+	# Recognize the various machine names and aliases which stand
+	# for a CPU type and a company and sometimes even an OS.
+	386bsd)
+		basic_machine=i386-unknown
+		os=-bsd
+		;;
+	3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+		basic_machine=m68000-att
+		;;
+	3b*)
+		basic_machine=we32k-att
+		;;
+	a29khif)
+		basic_machine=a29k-amd
+		os=-udi
+		;;
+	abacus)
+		basic_machine=abacus-unknown
+		;;
+	adobe68k)
+		basic_machine=m68010-adobe
+		os=-scout
+		;;
+	alliant | fx80)
+		basic_machine=fx80-alliant
+		;;
+	altos | altos3068)
+		basic_machine=m68k-altos
+		;;
+	am29k)
+		basic_machine=a29k-none
+		os=-bsd
+		;;
+	amd64)
+		basic_machine=x86_64-pc
+		;;
+	amd64-*)
+		basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	amdahl)
+		basic_machine=580-amdahl
+		os=-sysv
+		;;
+	amiga | amiga-*)
+		basic_machine=m68k-unknown
+		;;
+	amigaos | amigados)
+		basic_machine=m68k-unknown
+		os=-amigaos
+		;;
+	amigaunix | amix)
+		basic_machine=m68k-unknown
+		os=-sysv4
+		;;
+	apollo68)
+		basic_machine=m68k-apollo
+		os=-sysv
+		;;
+	apollo68bsd)
+		basic_machine=m68k-apollo
+		os=-bsd
+		;;
+	aros)
+		basic_machine=i386-pc
+		os=-aros
+		;;
+	aux)
+		basic_machine=m68k-apple
+		os=-aux
+		;;
+	balance)
+		basic_machine=ns32k-sequent
+		os=-dynix
+		;;
+	blackfin)
+		basic_machine=bfin-unknown
+		os=-linux
+		;;
+	blackfin-*)
+		basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=-linux
+		;;
+	bluegene*)
+		basic_machine=powerpc-ibm
+		os=-cnk
+		;;
+	c54x-*)
+		basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	c55x-*)
+		basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	c6x-*)
+		basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	c90)
+		basic_machine=c90-cray
+		os=-unicos
+		;;
+	cegcc)
+		basic_machine=arm-unknown
+		os=-cegcc
+		;;
+	convex-c1)
+		basic_machine=c1-convex
+		os=-bsd
+		;;
+	convex-c2)
+		basic_machine=c2-convex
+		os=-bsd
+		;;
+	convex-c32)
+		basic_machine=c32-convex
+		os=-bsd
+		;;
+	convex-c34)
+		basic_machine=c34-convex
+		os=-bsd
+		;;
+	convex-c38)
+		basic_machine=c38-convex
+		os=-bsd
+		;;
+	cray | j90)
+		basic_machine=j90-cray
+		os=-unicos
+		;;
+	craynv)
+		basic_machine=craynv-cray
+		os=-unicosmp
+		;;
+	cr16 | cr16-*)
+		basic_machine=cr16-unknown
+		os=-elf
+		;;
+	crds | unos)
+		basic_machine=m68k-crds
+		;;
+	crisv32 | crisv32-* | etraxfs*)
+		basic_machine=crisv32-axis
+		;;
+	cris | cris-* | etrax*)
+		basic_machine=cris-axis
+		;;
+	crx)
+		basic_machine=crx-unknown
+		os=-elf
+		;;
+	da30 | da30-*)
+		basic_machine=m68k-da30
+		;;
+	decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+		basic_machine=mips-dec
+		;;
+	decsystem10* | dec10*)
+		basic_machine=pdp10-dec
+		os=-tops10
+		;;
+	decsystem20* | dec20*)
+		basic_machine=pdp10-dec
+		os=-tops20
+		;;
+	delta | 3300 | motorola-3300 | motorola-delta \
+	      | 3300-motorola | delta-motorola)
+		basic_machine=m68k-motorola
+		;;
+	delta88)
+		basic_machine=m88k-motorola
+		os=-sysv3
+		;;
+	dicos)
+		basic_machine=i686-pc
+		os=-dicos
+		;;
+	djgpp)
+		basic_machine=i586-pc
+		os=-msdosdjgpp
+		;;
+	dpx20 | dpx20-*)
+		basic_machine=rs6000-bull
+		os=-bosx
+		;;
+	dpx2* | dpx2*-bull)
+		basic_machine=m68k-bull
+		os=-sysv3
+		;;
+	ebmon29k)
+		basic_machine=a29k-amd
+		os=-ebmon
+		;;
+	elxsi)
+		basic_machine=elxsi-elxsi
+		os=-bsd
+		;;
+	encore | umax | mmax)
+		basic_machine=ns32k-encore
+		;;
+	es1800 | OSE68k | ose68k | ose | OSE)
+		basic_machine=m68k-ericsson
+		os=-ose
+		;;
+	fx2800)
+		basic_machine=i860-alliant
+		;;
+	genix)
+		basic_machine=ns32k-ns
+		;;
+	gmicro)
+		basic_machine=tron-gmicro
+		os=-sysv
+		;;
+	go32)
+		basic_machine=i386-pc
+		os=-go32
+		;;
+	h3050r* | hiux*)
+		basic_machine=hppa1.1-hitachi
+		os=-hiuxwe2
+		;;
+	h8300hms)
+		basic_machine=h8300-hitachi
+		os=-hms
+		;;
+	h8300xray)
+		basic_machine=h8300-hitachi
+		os=-xray
+		;;
+	h8500hms)
+		basic_machine=h8500-hitachi
+		os=-hms
+		;;
+	harris)
+		basic_machine=m88k-harris
+		os=-sysv3
+		;;
+	hp300-*)
+		basic_machine=m68k-hp
+		;;
+	hp300bsd)
+		basic_machine=m68k-hp
+		os=-bsd
+		;;
+	hp300hpux)
+		basic_machine=m68k-hp
+		os=-hpux
+		;;
+	hp3k9[0-9][0-9] | hp9[0-9][0-9])
+		basic_machine=hppa1.0-hp
+		;;
+	hp9k2[0-9][0-9] | hp9k31[0-9])
+		basic_machine=m68000-hp
+		;;
+	hp9k3[2-9][0-9])
+		basic_machine=m68k-hp
+		;;
+	hp9k6[0-9][0-9] | hp6[0-9][0-9])
+		basic_machine=hppa1.0-hp
+		;;
+	hp9k7[0-79][0-9] | hp7[0-79][0-9])
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k78[0-9] | hp78[0-9])
+		# FIXME: really hppa2.0-hp
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+		# FIXME: really hppa2.0-hp
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k8[0-9][13679] | hp8[0-9][13679])
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k8[0-9][0-9] | hp8[0-9][0-9])
+		basic_machine=hppa1.0-hp
+		;;
+	hppa-next)
+		os=-nextstep3
+		;;
+	hppaosf)
+		basic_machine=hppa1.1-hp
+		os=-osf
+		;;
+	hppro)
+		basic_machine=hppa1.1-hp
+		os=-proelf
+		;;
+	i370-ibm* | ibm*)
+		basic_machine=i370-ibm
+		;;
+	i*86v32)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-sysv32
+		;;
+	i*86v4*)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-sysv4
+		;;
+	i*86v)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-sysv
+		;;
+	i*86sol2)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-solaris2
+		;;
+	i386mach)
+		basic_machine=i386-mach
+		os=-mach
+		;;
+	i386-vsta | vsta)
+		basic_machine=i386-unknown
+		os=-vsta
+		;;
+	iris | iris4d)
+		basic_machine=mips-sgi
+		case $os in
+		    -irix*)
+			;;
+		    *)
+			os=-irix4
+			;;
+		esac
+		;;
+	isi68 | isi)
+		basic_machine=m68k-isi
+		os=-sysv
+		;;
+	leon-*|leon[3-9]-*)
+		basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'`
+		;;
+	m68knommu)
+		basic_machine=m68k-unknown
+		os=-linux
+		;;
+	m68knommu-*)
+		basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=-linux
+		;;
+	m88k-omron*)
+		basic_machine=m88k-omron
+		;;
+	magnum | m3230)
+		basic_machine=mips-mips
+		os=-sysv
+		;;
+	merlin)
+		basic_machine=ns32k-utek
+		os=-sysv
+		;;
+	microblaze*)
+		basic_machine=microblaze-xilinx
+		;;
+	mingw64)
+		basic_machine=x86_64-pc
+		os=-mingw64
+		;;
+	mingw32)
+		basic_machine=i686-pc
+		os=-mingw32
+		;;
+	mingw32ce)
+		basic_machine=arm-unknown
+		os=-mingw32ce
+		;;
+	miniframe)
+		basic_machine=m68000-convergent
+		;;
+	*mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+		basic_machine=m68k-atari
+		os=-mint
+		;;
+	mips3*-*)
+		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+		;;
+	mips3*)
+		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+		;;
+	monitor)
+		basic_machine=m68k-rom68k
+		os=-coff
+		;;
+	morphos)
+		basic_machine=powerpc-unknown
+		os=-morphos
+		;;
+	moxiebox)
+		basic_machine=moxie-unknown
+		os=-moxiebox
+		;;
+	msdos)
+		basic_machine=i386-pc
+		os=-msdos
+		;;
+	ms1-*)
+		basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+		;;
+	msys)
+		basic_machine=i686-pc
+		os=-msys
+		;;
+	mvs)
+		basic_machine=i370-ibm
+		os=-mvs
+		;;
+	nacl)
+		basic_machine=le32-unknown
+		os=-nacl
+		;;
+	ncr3000)
+		basic_machine=i486-ncr
+		os=-sysv4
+		;;
+	netbsd386)
+		basic_machine=i386-unknown
+		os=-netbsd
+		;;
+	netwinder)
+		basic_machine=armv4l-rebel
+		os=-linux
+		;;
+	news | news700 | news800 | news900)
+		basic_machine=m68k-sony
+		os=-newsos
+		;;
+	news1000)
+		basic_machine=m68030-sony
+		os=-newsos
+		;;
+	news-3600 | risc-news)
+		basic_machine=mips-sony
+		os=-newsos
+		;;
+	necv70)
+		basic_machine=v70-nec
+		os=-sysv
+		;;
+	next | m*-next )
+		basic_machine=m68k-next
+		case $os in
+		    -nextstep* )
+			;;
+		    -ns2*)
+		      os=-nextstep2
+			;;
+		    *)
+		      os=-nextstep3
+			;;
+		esac
+		;;
+	nh3000)
+		basic_machine=m68k-harris
+		os=-cxux
+		;;
+	nh[45]000)
+		basic_machine=m88k-harris
+		os=-cxux
+		;;
+	nindy960)
+		basic_machine=i960-intel
+		os=-nindy
+		;;
+	mon960)
+		basic_machine=i960-intel
+		os=-mon960
+		;;
+	nonstopux)
+		basic_machine=mips-compaq
+		os=-nonstopux
+		;;
+	np1)
+		basic_machine=np1-gould
+		;;
+	neo-tandem)
+		basic_machine=neo-tandem
+		;;
+	nse-tandem)
+		basic_machine=nse-tandem
+		;;
+	nsr-tandem)
+		basic_machine=nsr-tandem
+		;;
+	op50n-* | op60c-*)
+		basic_machine=hppa1.1-oki
+		os=-proelf
+		;;
+	openrisc | openrisc-*)
+		basic_machine=or32-unknown
+		;;
+	os400)
+		basic_machine=powerpc-ibm
+		os=-os400
+		;;
+	OSE68000 | ose68000)
+		basic_machine=m68000-ericsson
+		os=-ose
+		;;
+	os68k)
+		basic_machine=m68k-none
+		os=-os68k
+		;;
+	pa-hitachi)
+		basic_machine=hppa1.1-hitachi
+		os=-hiuxwe2
+		;;
+	paragon)
+		basic_machine=i860-intel
+		os=-osf
+		;;
+	parisc)
+		basic_machine=hppa-unknown
+		os=-linux
+		;;
+	parisc-*)
+		basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=-linux
+		;;
+	pbd)
+		basic_machine=sparc-tti
+		;;
+	pbb)
+		basic_machine=m68k-tti
+		;;
+	pc532 | pc532-*)
+		basic_machine=ns32k-pc532
+		;;
+	pc98)
+		basic_machine=i386-pc
+		;;
+	pc98-*)
+		basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentium | p5 | k5 | k6 | nexgen | viac3)
+		basic_machine=i586-pc
+		;;
+	pentiumpro | p6 | 6x86 | athlon | athlon_*)
+		basic_machine=i686-pc
+		;;
+	pentiumii | pentium2 | pentiumiii | pentium3)
+		basic_machine=i686-pc
+		;;
+	pentium4)
+		basic_machine=i786-pc
+		;;
+	pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+		basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentiumpro-* | p6-* | 6x86-* | athlon-*)
+		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentium4-*)
+		basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pn)
+		basic_machine=pn-gould
+		;;
+	power)	basic_machine=power-ibm
+		;;
+	ppc | ppcbe)	basic_machine=powerpc-unknown
+		;;
+	ppc-* | ppcbe-*)
+		basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ppcle | powerpclittle | ppc-le | powerpc-little)
+		basic_machine=powerpcle-unknown
+		;;
+	ppcle-* | powerpclittle-*)
+		basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ppc64)	basic_machine=powerpc64-unknown
+		;;
+	ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+		basic_machine=powerpc64le-unknown
+		;;
+	ppc64le-* | powerpc64little-*)
+		basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ps2)
+		basic_machine=i386-ibm
+		;;
+	pw32)
+		basic_machine=i586-unknown
+		os=-pw32
+		;;
+	rdos | rdos64)
+		basic_machine=x86_64-pc
+		os=-rdos
+		;;
+	rdos32)
+		basic_machine=i386-pc
+		os=-rdos
+		;;
+	rom68k)
+		basic_machine=m68k-rom68k
+		os=-coff
+		;;
+	rm[46]00)
+		basic_machine=mips-siemens
+		;;
+	rtpc | rtpc-*)
+		basic_machine=romp-ibm
+		;;
+	s390 | s390-*)
+		basic_machine=s390-ibm
+		;;
+	s390x | s390x-*)
+		basic_machine=s390x-ibm
+		;;
+	sa29200)
+		basic_machine=a29k-amd
+		os=-udi
+		;;
+	sb1)
+		basic_machine=mipsisa64sb1-unknown
+		;;
+	sb1el)
+		basic_machine=mipsisa64sb1el-unknown
+		;;
+	sde)
+		basic_machine=mipsisa32-sde
+		os=-elf
+		;;
+	sei)
+		basic_machine=mips-sei
+		os=-seiux
+		;;
+	sequent)
+		basic_machine=i386-sequent
+		;;
+	sh)
+		basic_machine=sh-hitachi
+		os=-hms
+		;;
+	sh5el)
+		basic_machine=sh5le-unknown
+		;;
+	sh64)
+		basic_machine=sh64-unknown
+		;;
+	sparclite-wrs | simso-wrs)
+		basic_machine=sparclite-wrs
+		os=-vxworks
+		;;
+	sps7)
+		basic_machine=m68k-bull
+		os=-sysv2
+		;;
+	spur)
+		basic_machine=spur-unknown
+		;;
+	st2000)
+		basic_machine=m68k-tandem
+		;;
+	stratus)
+		basic_machine=i860-stratus
+		os=-sysv4
+		;;
+	strongarm-* | thumb-*)
+		basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	sun2)
+		basic_machine=m68000-sun
+		;;
+	sun2os3)
+		basic_machine=m68000-sun
+		os=-sunos3
+		;;
+	sun2os4)
+		basic_machine=m68000-sun
+		os=-sunos4
+		;;
+	sun3os3)
+		basic_machine=m68k-sun
+		os=-sunos3
+		;;
+	sun3os4)
+		basic_machine=m68k-sun
+		os=-sunos4
+		;;
+	sun4os3)
+		basic_machine=sparc-sun
+		os=-sunos3
+		;;
+	sun4os4)
+		basic_machine=sparc-sun
+		os=-sunos4
+		;;
+	sun4sol2)
+		basic_machine=sparc-sun
+		os=-solaris2
+		;;
+	sun3 | sun3-*)
+		basic_machine=m68k-sun
+		;;
+	sun4)
+		basic_machine=sparc-sun
+		;;
+	sun386 | sun386i | roadrunner)
+		basic_machine=i386-sun
+		;;
+	sv1)
+		basic_machine=sv1-cray
+		os=-unicos
+		;;
+	symmetry)
+		basic_machine=i386-sequent
+		os=-dynix
+		;;
+	t3e)
+		basic_machine=alphaev5-cray
+		os=-unicos
+		;;
+	t90)
+		basic_machine=t90-cray
+		os=-unicos
+		;;
+	tile*)
+		basic_machine=$basic_machine-unknown
+		os=-linux-gnu
+		;;
+	tx39)
+		basic_machine=mipstx39-unknown
+		;;
+	tx39el)
+		basic_machine=mipstx39el-unknown
+		;;
+	toad1)
+		basic_machine=pdp10-xkl
+		os=-tops20
+		;;
+	tower | tower-32)
+		basic_machine=m68k-ncr
+		;;
+	tpf)
+		basic_machine=s390x-ibm
+		os=-tpf
+		;;
+	udi29k)
+		basic_machine=a29k-amd
+		os=-udi
+		;;
+	ultra3)
+		basic_machine=a29k-nyu
+		os=-sym1
+		;;
+	v810 | necv810)
+		basic_machine=v810-nec
+		os=-none
+		;;
+	vaxv)
+		basic_machine=vax-dec
+		os=-sysv
+		;;
+	vms)
+		basic_machine=vax-dec
+		os=-vms
+		;;
+	vpp*|vx|vx-*)
+		basic_machine=f301-fujitsu
+		;;
+	vxworks960)
+		basic_machine=i960-wrs
+		os=-vxworks
+		;;
+	vxworks68)
+		basic_machine=m68k-wrs
+		os=-vxworks
+		;;
+	vxworks29k)
+		basic_machine=a29k-wrs
+		os=-vxworks
+		;;
+	w65*)
+		basic_machine=w65-wdc
+		os=-none
+		;;
+	w89k-*)
+		basic_machine=hppa1.1-winbond
+		os=-proelf
+		;;
+	xbox)
+		basic_machine=i686-pc
+		os=-mingw32
+		;;
+	xps | xps100)
+		basic_machine=xps100-honeywell
+		;;
+	xscale-* | xscalee[bl]-*)
+		basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+		;;
+	ymp)
+		basic_machine=ymp-cray
+		os=-unicos
+		;;
+	z8k-*-coff)
+		basic_machine=z8k-unknown
+		os=-sim
+		;;
+	z80-*-coff)
+		basic_machine=z80-unknown
+		os=-sim
+		;;
+	none)
+		basic_machine=none-none
+		os=-none
+		;;
+
+# Here we handle the default manufacturer of certain CPU types.  It is in
+# some cases the only manufacturer, in others, it is the most popular.
+	w89k)
+		basic_machine=hppa1.1-winbond
+		;;
+	op50n)
+		basic_machine=hppa1.1-oki
+		;;
+	op60c)
+		basic_machine=hppa1.1-oki
+		;;
+	romp)
+		basic_machine=romp-ibm
+		;;
+	mmix)
+		basic_machine=mmix-knuth
+		;;
+	rs6000)
+		basic_machine=rs6000-ibm
+		;;
+	vax)
+		basic_machine=vax-dec
+		;;
+	pdp10)
+		# there are many clones, so DEC is not a safe bet
+		basic_machine=pdp10-unknown
+		;;
+	pdp11)
+		basic_machine=pdp11-dec
+		;;
+	we32k)
+		basic_machine=we32k-att
+		;;
+	sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+		basic_machine=sh-unknown
+		;;
+	sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+		basic_machine=sparc-sun
+		;;
+	cydra)
+		basic_machine=cydra-cydrome
+		;;
+	orion)
+		basic_machine=orion-highlevel
+		;;
+	orion105)
+		basic_machine=clipper-highlevel
+		;;
+	mac | mpw | mac-mpw)
+		basic_machine=m68k-apple
+		;;
+	pmac | pmac-mpw)
+		basic_machine=powerpc-apple
+		;;
+	*-unknown)
+		# Make sure to match an already-canonicalized machine name.
+		;;
+	*)
+		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+		exit 1
+		;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+	*-digital*)
+		basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+		;;
+	*-commodore*)
+		basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+		;;
+	*)
+		;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+	# First match some system type aliases
+	# that might get confused with valid system types.
+	# -solaris* is a basic system type, with this one exception.
+	-auroraux)
+		os=-auroraux
+		;;
+	-solaris1 | -solaris1.*)
+		os=`echo $os | sed -e 's|solaris1|sunos4|'`
+		;;
+	-solaris)
+		os=-solaris2
+		;;
+	-svr4*)
+		os=-sysv4
+		;;
+	-unixware*)
+		os=-sysv4.2uw
+		;;
+	-gnu/linux*)
+		os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+		;;
+	# First accept the basic system types.
+	# The portable systems comes first.
+	# Each alternative MUST END IN A *, to match a version number.
+	# -sysv* is not here because it comes later, after sysvr4.
+	-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+	      | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+	      | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+	      | -sym* | -kopensolaris* | -plan9* \
+	      | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+	      | -aos* | -aros* \
+	      | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+	      | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+	      | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+	      | -bitrig* | -openbsd* | -solidbsd* \
+	      | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+	      | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+	      | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+	      | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+	      | -chorusos* | -chorusrdb* | -cegcc* \
+	      | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+	      | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+	      | -linux-newlib* | -linux-musl* | -linux-uclibc* \
+	      | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \
+	      | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+	      | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+	      | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+	      | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+	      | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+	      | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+	      | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*)
+	# Remember, each alternative MUST END IN *, to match a version number.
+		;;
+	-qnx*)
+		case $basic_machine in
+		    x86-* | i*86-*)
+			;;
+		    *)
+			os=-nto$os
+			;;
+		esac
+		;;
+	-nto-qnx*)
+		;;
+	-nto*)
+		os=`echo $os | sed -e 's|nto|nto-qnx|'`
+		;;
+	-sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+	      | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+	      | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+		;;
+	-mac*)
+		os=`echo $os | sed -e 's|mac|macos|'`
+		;;
+	-linux-dietlibc)
+		os=-linux-dietlibc
+		;;
+	-linux*)
+		os=`echo $os | sed -e 's|linux|linux-gnu|'`
+		;;
+	-sunos5*)
+		os=`echo $os | sed -e 's|sunos5|solaris2|'`
+		;;
+	-sunos6*)
+		os=`echo $os | sed -e 's|sunos6|solaris3|'`
+		;;
+	-opened*)
+		os=-openedition
+		;;
+	-os400*)
+		os=-os400
+		;;
+	-wince*)
+		os=-wince
+		;;
+	-osfrose*)
+		os=-osfrose
+		;;
+	-osf*)
+		os=-osf
+		;;
+	-utek*)
+		os=-bsd
+		;;
+	-dynix*)
+		os=-bsd
+		;;
+	-acis*)
+		os=-aos
+		;;
+	-atheos*)
+		os=-atheos
+		;;
+	-syllable*)
+		os=-syllable
+		;;
+	-386bsd)
+		os=-bsd
+		;;
+	-ctix* | -uts*)
+		os=-sysv
+		;;
+	-nova*)
+		os=-rtmk-nova
+		;;
+	-ns2 )
+		os=-nextstep2
+		;;
+	-nsk*)
+		os=-nsk
+		;;
+	# Preserve the version number of sinix5.
+	-sinix5.*)
+		os=`echo $os | sed -e 's|sinix|sysv|'`
+		;;
+	-sinix*)
+		os=-sysv4
+		;;
+	-tpf*)
+		os=-tpf
+		;;
+	-triton*)
+		os=-sysv3
+		;;
+	-oss*)
+		os=-sysv3
+		;;
+	-svr4)
+		os=-sysv4
+		;;
+	-svr3)
+		os=-sysv3
+		;;
+	-sysvr4)
+		os=-sysv4
+		;;
+	# This must come after -sysvr4.
+	-sysv*)
+		;;
+	-ose*)
+		os=-ose
+		;;
+	-es1800*)
+		os=-ose
+		;;
+	-xenix)
+		os=-xenix
+		;;
+	-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+		os=-mint
+		;;
+	-aros*)
+		os=-aros
+		;;
+	-zvmoe)
+		os=-zvmoe
+		;;
+	-dicos*)
+		os=-dicos
+		;;
+	-nacl*)
+		;;
+	-none)
+		;;
+	*)
+		# Get rid of the `-' at the beginning of $os.
+		os=`echo $os | sed 's/[^-]*-//'`
+		echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+		exit 1
+		;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system.  Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+	score-*)
+		os=-elf
+		;;
+	spu-*)
+		os=-elf
+		;;
+	*-acorn)
+		os=-riscix1.2
+		;;
+	arm*-rebel)
+		os=-linux
+		;;
+	arm*-semi)
+		os=-aout
+		;;
+	c4x-* | tic4x-*)
+		os=-coff
+		;;
+	c8051-*)
+		os=-elf
+		;;
+	hexagon-*)
+		os=-elf
+		;;
+	tic54x-*)
+		os=-coff
+		;;
+	tic55x-*)
+		os=-coff
+		;;
+	tic6x-*)
+		os=-coff
+		;;
+	# This must come before the *-dec entry.
+	pdp10-*)
+		os=-tops20
+		;;
+	pdp11-*)
+		os=-none
+		;;
+	*-dec | vax-*)
+		os=-ultrix4.2
+		;;
+	m68*-apollo)
+		os=-domain
+		;;
+	i386-sun)
+		os=-sunos4.0.2
+		;;
+	m68000-sun)
+		os=-sunos3
+		;;
+	m68*-cisco)
+		os=-aout
+		;;
+	mep-*)
+		os=-elf
+		;;
+	mips*-cisco)
+		os=-elf
+		;;
+	mips*-*)
+		os=-elf
+		;;
+	or32-*)
+		os=-coff
+		;;
+	*-tti)	# must be before sparc entry or we get the wrong os.
+		os=-sysv3
+		;;
+	sparc-* | *-sun)
+		os=-sunos4.1.1
+		;;
+	*-be)
+		os=-beos
+		;;
+	*-haiku)
+		os=-haiku
+		;;
+	*-ibm)
+		os=-aix
+		;;
+	*-knuth)
+		os=-mmixware
+		;;
+	*-wec)
+		os=-proelf
+		;;
+	*-winbond)
+		os=-proelf
+		;;
+	*-oki)
+		os=-proelf
+		;;
+	*-hp)
+		os=-hpux
+		;;
+	*-hitachi)
+		os=-hiux
+		;;
+	i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+		os=-sysv
+		;;
+	*-cbm)
+		os=-amigaos
+		;;
+	*-dg)
+		os=-dgux
+		;;
+	*-dolphin)
+		os=-sysv3
+		;;
+	m68k-ccur)
+		os=-rtu
+		;;
+	m88k-omron*)
+		os=-luna
+		;;
+	*-next )
+		os=-nextstep
+		;;
+	*-sequent)
+		os=-ptx
+		;;
+	*-crds)
+		os=-unos
+		;;
+	*-ns)
+		os=-genix
+		;;
+	i370-*)
+		os=-mvs
+		;;
+	*-next)
+		os=-nextstep3
+		;;
+	*-gould)
+		os=-sysv
+		;;
+	*-highlevel)
+		os=-bsd
+		;;
+	*-encore)
+		os=-bsd
+		;;
+	*-sgi)
+		os=-irix
+		;;
+	*-siemens)
+		os=-sysv4
+		;;
+	*-masscomp)
+		os=-rtu
+		;;
+	f30[01]-fujitsu | f700-fujitsu)
+		os=-uxpv
+		;;
+	*-rom68k)
+		os=-coff
+		;;
+	*-*bug)
+		os=-coff
+		;;
+	*-apple)
+		os=-macos
+		;;
+	*-atari*)
+		os=-mint
+		;;
+	*)
+		os=-none
+		;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer.  We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+	*-unknown)
+		case $os in
+			-riscix*)
+				vendor=acorn
+				;;
+			-sunos*)
+				vendor=sun
+				;;
+			-cnk*|-aix*)
+				vendor=ibm
+				;;
+			-beos*)
+				vendor=be
+				;;
+			-hpux*)
+				vendor=hp
+				;;
+			-mpeix*)
+				vendor=hp
+				;;
+			-hiux*)
+				vendor=hitachi
+				;;
+			-unos*)
+				vendor=crds
+				;;
+			-dgux*)
+				vendor=dg
+				;;
+			-luna*)
+				vendor=omron
+				;;
+			-genix*)
+				vendor=ns
+				;;
+			-mvs* | -opened*)
+				vendor=ibm
+				;;
+			-os400*)
+				vendor=ibm
+				;;
+			-ptx*)
+				vendor=sequent
+				;;
+			-tpf*)
+				vendor=ibm
+				;;
+			-vxsim* | -vxworks* | -windiss*)
+				vendor=wrs
+				;;
+			-aux*)
+				vendor=apple
+				;;
+			-hms*)
+				vendor=hitachi
+				;;
+			-mpw* | -macos*)
+				vendor=apple
+				;;
+			-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+				vendor=atari
+				;;
+			-vos*)
+				vendor=stratus
+				;;
+		esac
+		basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+		;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:

Property changes on: applgrid/tags/applgrid-01-06-36/config.sub
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: applgrid/tags/applgrid-01-06-36/config.h.in
===================================================================
--- applgrid/tags/applgrid-01-06-36/config.h.in	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/config.h.in	(revision 2010)
@@ -0,0 +1,103 @@
+/* config.h.in.  Generated from configure.ac by autoheader.  */
+
+/* Define to 1 if you have the `exp' function. */
+#undef HAVE_EXP
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#undef HAVE_GETTIMEOFDAY
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if your system has a GNU libc compatible `malloc' function, and
+   to 0 otherwise. */
+#undef HAVE_MALLOC
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* Define to 1 if you have the `mkdir' function. */
+#undef HAVE_MKDIR
+
+/* Define to 1 if you have the `pow' function. */
+#undef HAVE_POW
+
+/* Define to 1 if you have the `sqrt' function. */
+#undef HAVE_SQRT
+
+/* Define to 1 if `stat' has the bug that it succeeds when given the
+   zero-length file name argument. */
+#undef HAVE_STAT_EMPTY_STRING_BUG
+
+/* Define to 1 if stdbool.h conforms to C99. */
+#undef HAVE_STDBOOL_H
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#undef HAVE_STDDEF_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#undef HAVE_SYS_TIME_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Define to 1 if the system has the type `_Bool'. */
+#undef HAVE__BOOL
+
+/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
+   slash. */
+#undef LSTAT_FOLLOWS_SLASHED_SYMLINK
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#undef TIME_WITH_SYS_TIME
+
+/* Define to empty if `const' does not conform to ANSI C. */
+#undef const
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef __cplusplus
+#undef inline
+#endif
+
+/* Define to rpl_malloc if the replacement function should be used. */
+#undef malloc
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+#undef size_t
Index: applgrid/tags/applgrid-01-06-36/appl_grid/appl_TH1D.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/appl_TH1D.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/appl_TH1D.h	(revision 2010)
@@ -0,0 +1,175 @@
+/* emacs: this is -*- c++ -*- */
+/**
+ **   @file    appl_TH1D.h
+ **       
+ **            wrapper around our own histogram class to make it look 
+ **            and behave like the egregious root class        
+ **                   
+ **   @author  sutt
+ **   @date    Wed 30 Dec 2020 21:12:51 GMT
+ **
+ **   $Id: appl_TH1D.h, v0.0   Wed 30 Dec 2020 21:12:51 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  APPL_TH1D_H
+#define  APPL_TH1D_H
+
+#include <iostream>
+#include "histogram.h"
+
+#ifdef USEROOT
+
+#include "TH1D.h"
+#include "TDirectory.h"
+#endif
+
+namespace appl { 
+class TH1D;
+}
+
+#ifdef USEROOT
+TH1D*       convert( const  appl::TH1D* h );
+appl::TH1D* convert( const        TH1D* h );
+#endif
+
+namespace appl { 
+
+class TH1D : public histogram {
+
+public:
+
+  TH1D() { }   
+
+  TH1D( const histogram& h ) : histogram(h) { } 
+
+  TH1D( const std::string& name, const std::string& title, int nbins, double lo, double hi ) : 
+    histogram( name, nbins, lo, hi ), mtitle(title) { } 
+
+  TH1D( const std::string& name, const std::string& title, int nbins, const double* limits ) : 
+    histogram( name, nbins, limits ), mtitle(title) { } 
+
+  virtual ~TH1D() { } 
+
+  double GetBinContent( int i ) const { return my[i-1]; } 
+  double   GetBinError( int i ) const { return mye[i-1]; } 
+
+  void SetBinContent( int i, double d ) { my[i-1]=d; } 
+  void   SetBinError( int i, double d ) { mye[i-1]=d; } 
+
+  void SetBinContents( const std::vector<double>& v ) { if ( v.size() ==  my.size() )  my = v; }
+  void   SetBinErrors( const std::vector<double>& v ) { if ( v.size() == mye.size() ) mye = v; } 
+
+  double GetBinLowEdge( int i ) const { return mxlimits[i-1]; }
+  double  GetBinCenter( int i ) const { return mx[i-1]; }
+
+  double GetBinWidth( int i ) const { return mxlimits[i]-mxlimits[i-1]; } 
+
+  int GetNbinsX() const { return int(mx.size()); } 
+  
+  void Fill( double x, double w=1 ) { fill(x, w ); }    
+
+  int FindBin( double x ) const { return index(x)+1; }
+
+  void SetDirectory( int =0 ) const { } 
+
+  TH1D* Clone( const std::string& n="" ) { 
+    TH1D* h = new TH1D(*this);
+    h->SetName( n );
+    return h;
+  }
+
+  void Scale( double d ) { operator*=(d); }  
+
+  void Add( const TH1D& h ) { operator+=(h); }
+
+  void Sumw2() const { } 
+
+  void        SetName( const std::string& n ) { mname=n; } 
+  std::string GetName() const { return mname; } 
+
+  std::string GetTitle() const { return mtitle; } 
+  void        SetTitle( const std::string& s ) { mtitle=s; } 
+
+  void Reset() { 
+    size_t n = my.size();
+    my  = std::vector<double>(n,0);
+    mye = std::vector<double>(n,0);
+  } 
+
+#ifdef USEROOT
+  void Write() const { 
+    ::TH1D* h = convert( this );
+    h->Write(); 
+    h->SetDirectory(0);
+    /// make sure this doesn't leak memory ...  
+    delete h;
+  } 
+#endif
+
+protected:
+
+  std::string mtitle;
+  
+};
+
+}
+
+// convertors to and from root histograms
+
+#ifdef USEROOT
+inline appl::TH1D* convert( ::TH1D* h ) { 
+  std::vector<double> limits(h->GetNbinsX()+1);
+  for ( size_t i=0 ; i<limits.size() ; i++ ) limits[i] = h->GetBinLowEdge(i+1);
+  appl::TH1D* hist = new appl::TH1D( h->GetName(), h->GetTitle(), limits.size()-1, &limits[0] );
+  for ( size_t i=0 ; i<hist->size() ; i++ ) { 
+    hist->y(i)  = h->GetBinContent(i+1);
+    hist->ye(i) = h->GetBinError(i+1);
+  }
+  return hist;
+}
+
+
+inline ::TH1D* convert( const appl::TH1D* h ) { 
+  ::TH1D* hist = new ::TH1D( h->name().c_str(), h->GetTitle().c_str(), h->size(), &(h->xlimits()[0]) );
+  for ( size_t i=0 ; i<h->size() ; i++ ) { 
+    hist->SetBinContent(i+1, h->y(i) ); 
+    hist->SetBinError(i+1, h->ye(i) ); 
+  }
+  return hist;
+}
+
+
+inline ::TH1D* convert( const appl::TH1D& h ) { 
+  ::TH1D* hist = new ::TH1D( h.name().c_str(), h.GetTitle().c_str(), h.size(), &(h.xlimits()[0]) );
+  for ( size_t i=h.size() ; i-- ; ) { 
+    hist->SetBinContent(i+1, h.y(i) ); 
+    hist->SetBinError(i+1, h.ye(i) ); 
+  }
+  return hist;
+}
+#endif
+
+
+inline std::ostream& operator<<( std::ostream& s, const appl::TH1D& a ) { 
+  return s << *dynamic_cast<const histogram*>(&a);
+}
+
+
+typedef appl::TH1D aTH1D;
+
+
+#endif  // APPL_TH1D_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/vector_stream.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/vector_stream.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/vector_stream.h	(revision 2010)
@@ -0,0 +1,36 @@
+/* emacs: this is -*- c++ -*- */
+/**
+ **   @file    vector_stream.h        
+ **                   
+ **   @author  sutt
+ **   @date    Fri  1 Jan 2021 11:37:27 GMT
+ **
+ **   $Id: vector_stream.h, v0.0   Fri  1 Jan 2021 11:37:27 GMT sutt $
+ **
+ **   Copyright (C) 2021 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  VECTOR_STREAM_H
+#define  VECTOR_STREAM_H
+
+#include <iostream>
+
+template<typename T>
+std::ostream& operator<<(std::ostream& s, const std::vector<T>& v) {
+  for ( unsigned i=0 ; i<v.size() ; i++ ) s << "\t" << v[i];
+  return s;
+}
+
+#endif  // VECTOR_STREAM_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/stream_vector.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/stream_vector.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/stream_vector.h	(revision 2010)
@@ -0,0 +1,137 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **   @file    stream_vector.h        
+ **                   
+ **            template class to provide a key :: vector pair 
+ **            including the serialisation and deserialisation 
+ **            to a vector 
+ **  
+ **   @author  sutt
+ **   @date    Mon 28 Dec 2020 21:23:22 GMT
+ **
+ **   $Id: stream_vector.h, v0.0   Mon 28 Dec 2020 21:23:22 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  STREAM_VECTOR_H
+#define  STREAM_VECTOR_H
+
+#include <iostream>
+
+#include "serialise_base.h"
+#include "appl_grid/serialisable.h"
+
+
+template<typename T>
+class stream_vector : public serialisable {
+
+public:
+
+  /// constructors ...
+  
+  stream_vector() { }
+
+  stream_vector( const std::string& name ) : mname(name) { }
+ 
+  stream_vector( const std::string& name, const std::vector<T>& v ) : mname(name), mpayload(v) { } 
+
+  /// constructer from a stream 
+  stream_vector( const std::vector<SB::TYPE>& v ) {  deserialise( v ); }
+
+  stream_vector( const stream_vector& sv ) :
+    mname(sv.mname),
+    mpayload(sv.mpayload) 
+  { } 
+
+  stream_vector& operator=( stream_vector&& sv ) { 
+    std::swap( mname, sv.mname );
+    std::swap( mpayload, sv.mpayload );
+    return *this;
+  }
+
+  stream_vector& operator=( const stream_vector& sv ) { 
+    mname    = sv.mname;
+    mpayload = sv.mpayload;
+    return *this;
+  }
+  
+
+  /// destructor
+
+  virtual ~stream_vector() { } 
+
+
+
+  /// accessors ...
+
+  std::string name() const { return mname; }
+
+  size_t size() const { return mpayload.size(); }
+
+  std::vector<T>&       payload()       { return mpayload; }
+  const std::vector<T>& payload() const { return mpayload; }
+
+  T  operator[]( int i ) const { return mpayload.at(i); }
+  T& operator[]( int i )       { return mpayload.at(i); }
+
+  T  at( int i ) const { return mpayload.at(i); }
+  T& at( int i )       { return mpayload.at(i); }
+
+  void push_back( const T& t ) { mpayload.push_back(t); }
+  void add( const T& t )       { mpayload.push_back(t); }
+
+  typename std::vector<T>::iterator begin() { return mpayload.begin(); }
+  typename std::vector<T>::iterator end()   { return mpayload.end(); }
+
+  typename std::vector<T>::const_iterator begin() const { return mpayload.begin(); }
+  typename std::vector<T>::const_iterator end()   const { return mpayload.end(); }
+
+
+  /// serialise ...
+
+  void serialise_internal( std::vector<SB::TYPE>& s ) const { 
+    SB::serialise( s, name() );
+    SB::serialise( s, payload() );
+  }
+
+
+  /// deserialise ...
+
+  void deserialise_internal( std::vector<SB::TYPE>::const_iterator& itr ) { 
+    SB::deserialise( itr, mname ); 
+    SB::deserialise( itr, mpayload );
+  }
+  
+ 
+private:
+
+  std::string    mname;
+  std::vector<T> mpayload;
+
+};
+
+#include "appl_grid/vector_stream.h"
+
+
+template<typename T> 
+std::ostream& operator<<( std::ostream& s, const stream_vector<T>& sv ) { 
+  s << "key: " << sv.name() << std::endl;
+  for ( size_t i=0 ; i<sv.size() ; i++ ) s << "  " << sv[i] << std::endl;
+  return s;
+}
+
+
+#endif  // STREAM_VECTOR_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/appl_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/appl_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/appl_pdf.h	(revision 2010)
@@ -0,0 +1,247 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    appl_pdf.h
+ **
+ **     @brief   pdf transform functions header                  
+ **
+ **     @author  mark sutton
+ **     @date    Fri Dec 21 22:19:50 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: appl_pdf.h, v   Fri Dec 21 22:19:50 GMT 2007 sutt $
+ **
+ **/
+
+
+#ifndef __APPL_PDF_H
+#define __APPL_PDF_H
+
+#include <iostream>
+#include <fstream>
+#include <sstream>
+
+#include <vector> 
+#include <map> 
+#include <string> 
+
+#include <exception> 
+
+#include "vector_stream.h"
+
+namespace appl { 
+
+
+class appl_pdf;
+
+typedef std::map<const std::string, appl_pdf*> pdfmap;
+
+
+// this is a *maybe* nice class, a base class for pdf 
+// functions
+//
+// it has a virtual evaluate() method to be definied in 
+// the derived class, and a static std::map of all the names
+// of instances of the derived classes
+//
+// when a new instance of the class is created, it 
+// automatically adds it's name to the std::map, so the user 
+// doesn't need to worry about consistency, and removes 
+// itself when the derived instance is deleted
+// consequently, if you want to use pdfs added to the database, 
+// you need to make sure that they do not go out of scope 
+// NB: if you try to copy an appl_pdf then there stuff will 
+//     get messed up, since the names and appl_pdf pointers 
+//     are in a map, and you can't have duplicate keys 
+//     so things might not go as you would expect if you try 
+//     and use the copy - ie it might not have been added to 
+//     the map, whereas the original was added, but was then 
+//     removed when it went out of scope 
+ 
+
+class appl_pdf { 
+
+public:
+
+  // pdf error exception
+  class exception : public std::exception { 
+  public: 
+    exception(const std::string& s="") { std::cerr << what() << " " << s << std::endl; }; 
+    const char* what() const throw() { return "appl::appl_pdf::exception "; }
+  };
+  
+public:
+
+  /// constructor and destructor
+  /// include a flag to determine whether we want this appl_pdf registered 
+  /// directly, or defer, and register the lumi_pdf derived class - a needlessly 
+  /// complicated mechanism but nevertheless ...
+  appl_pdf(const std::string& name, bool base=false);
+
+  virtual ~appl_pdf();
+
+  /// retrieve an instance from the std::map 
+  static appl_pdf* getpdf(const std::string& s, bool printout=true);
+  
+  /// print out the pdf std::map
+  static void printmap(std::ostream& s=std::cout) {
+    pdfmap::iterator itr = __pdfmap.begin();
+    while ( itr!=__pdfmap.end() )  {
+      s << "pdfmap " << itr->first << "\t\t" << itr->second << std::endl;
+      itr++;
+    } 
+  }
+
+  /// initialise the factory  
+  static bool create_map(); 
+
+  virtual void evaluate(const double* fA, const double* fB, double* H) const = 0; 
+
+  virtual int decideSubProcess( const int , const int  ) const;
+
+  std::string   name() const { return m_name;  }
+
+  int     Nproc() const { return m_Nproc; } 
+  int     size()  const { return m_Nproc; } 
+
+
+
+  std::string rename(const std::string& name) { 
+    /// remove my entry from the std::map, and add me again with my new name
+    if ( __pdfmap.find(m_name)!=__pdfmap.end() ) { 
+      __pdfmap.erase(__pdfmap.find(m_name));
+    }
+    else { 
+      std::cout << "appl_pdf::rename() " << m_name << " not in std::map" << std::endl;
+    }
+    m_name = name;
+    addtopdfmap(m_name, this);
+    return m_name;
+  }
+
+
+  /// code to allow optional std::vector of subprocess contribution names
+
+  const std::vector<std::string>& subnames() const { return m_subnames; }
+
+  void addSubnames( const std::vector<std::string>& subnames ) { m_subnames = subnames; }
+
+  void  addSubname( const std::string& subname ) { 
+    if ( int(m_subnames.size())<m_Nproc-1 ) m_subnames.push_back(subname); 
+  }
+
+
+
+  /// is this a W+ or a W- pdf combination? or neither?
+  int getckmcharge() const { return m_ckmcharge; }
+
+  /// access the ckm matrices - if no matrices are required these std::vectors have 
+  /// zero size
+
+  const std::vector<double>&               getckmsum() const { return m_ckmsum; }
+  const std::vector<std::vector<double> >& getckm2()   const { return m_ckm2; }
+  const std::vector<std::vector<double> >& getckm()    const { return m_ckm; }
+  
+  /// set the ckm matrices from external values
+  void setckm( const std::vector<std::vector<double> >& ckm ); 
+  void setckm2( const std::vector<std::vector<double> >& ckm2 ); 
+
+  /// code to create the ckm matrices using the hardcoded default 
+  /// values if required
+  /// takes a bool input - if true creates the ckm for Wplus, 
+  /// false for Wminus
+  void make_ckm( bool Wp=true );
+
+  void SetNProc(int Nsub){ m_Nproc=Nsub; return;};
+
+  /// set some useful names for the different subprocesses
+  void setnames( const std::vector<std::string>& names) { m_names = names; } 
+  std::vector<std::string> getnames() const  { return m_names; } 
+
+  /// basic comparisons of two appl_pdfs - for the base class there is
+  /// little to be done except test the name - really, each derived 
+  /// class should implement it's own test
+
+  virtual bool operator==( const appl_pdf& pdf ) const { return ( name() == pdf.name() );  }
+
+  virtual bool operator!=( const appl_pdf& pdf ) const { return !operator==(pdf); }
+  
+  std::vector<double>               ckmsum() const { return m_ckmsum; }
+  std::vector<std::vector<double> > ckm2()   const { return m_ckm2; } /// squared N x N matrix
+  std::vector<std::vector<double> > ckm()    const { return m_ckm; } 
+
+public:
+
+  static bool overwrites() { return ALLOW_OVERWRITES; }
+
+  static void overwrites( bool t ) { ALLOW_OVERWRITES = t; }
+
+protected:
+
+  /// search the path for configuration files
+  static  std::ifstream& openpdf( const std::string& filename ); 
+
+  static void addtopdfmap(const std::string& s, appl_pdf* f) { 
+    if ( __pdfmap.find(s)==__pdfmap.end() ) { 
+      __pdfmap.insert( pdfmap::value_type( s, f ) );
+      // std::cout << "appl_pdf::addtomap() registering " << s << " in std::map addr \t" << f << std::endl;
+    }
+    else { 
+
+      std::stringstream s_; 
+      s_ << "appl_pdf::addtopdfmap() " << s << " already in std::map\t0x" << __pdfmap.find(s)->second;
+
+      /// check if the pdf that is already in map is the same as the new pdf, 
+      /// if it is then do nothing, but if it is different only *then* throw an 
+      /// exception 
+      if ( (*f) != (*__pdfmap.find(s)->second) ) {
+	s_ << "\nappl_pdf::addtppdfmap() mismatch with appl_pdf in map";
+        throw exception( s_.str() );
+      }
+    }
+  }
+  
+protected:
+
+  int         m_Nproc;
+  std::string m_name;
+
+  std::vector<std::string> m_subnames;
+
+  /// ckm matrix related information 
+  /// W+, W- or neither?
+  int  m_ckmcharge;
+
+  // ckm matrices
+  std::vector<double>               m_ckmsum;
+  std::vector<std::vector<double> > m_ckm2; /// squared N x N matrix
+  std::vector<std::vector<double> > m_ckm;  /// simple 3 x 3
+
+  /// some strings for more useful name if required
+  std::vector<std::string>           m_names;
+
+  static pdfmap                     __pdfmap;
+  static std::vector<std::string>   __pdfpath;
+
+  static bool                       ALLOW_OVERWRITES;
+
+};
+
+}
+
+
+
+
+std::ostream& operator<<( std::ostream& s, const appl::appl_pdf& p );
+
+#endif  // __APPL_PDF_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/simpletimer.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/simpletimer.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/simpletimer.h	(revision 2010)
@@ -0,0 +1,59 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    simpletimer.h
+ **
+ **     @brief   these functions have a precision of about 0.001 ms
+ **
+ **     @author  mark sutton
+ **     @date    Thu 22 Jan 2009 15:51:52 GMT 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: simpletimer.h, v0.0   Thu 22 Jan 2009 15:51:52 GMT sutt $
+ **
+ **/
+
+
+#ifndef SIMPLETIMER_H
+#define SIMPLETIMER_H
+
+#include <time.h>
+#include <sys/time.h>
+
+//#ifdef __cplusplus
+//extern "C" {
+//#endif
+
+
+inline struct timeval simpletimer_start(void) {
+  struct timeval start_time;
+  gettimeofday(&start_time, NULL);            
+  return start_time;
+}
+
+inline double simpletimer_stop(const struct timeval& start_time)
+{
+  struct timeval stop_time;
+  struct timeval diff_time;
+  gettimeofday(&stop_time, NULL);            
+  diff_time.tv_sec  = stop_time.tv_sec  - start_time.tv_sec;
+  diff_time.tv_usec = stop_time.tv_usec - start_time.tv_usec;
+  return (diff_time.tv_sec*1000.0) + (diff_time.tv_usec/1000.0);
+}
+
+
+//#ifdef __cplusplus
+//}
+//#endif
+
+#endif /* SIMPLETIMER_H */
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/appl_file.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/appl_file.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/appl_file.h	(revision 2010)
@@ -0,0 +1,168 @@
+/* emacs: this is -*- c++ -*- */
+/**
+ **   @file    appl_file.h        
+ **                   
+ **   @author  sutt
+ **   @date    Wed 30 Dec 2020 15:25:42 GMT
+ **
+ **   $Id: appl_file.h, v0.0   Wed 30 Dec 2020 15:25:42 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  APPL_FILE_H
+#define  APPL_FILE_H
+
+#include <iostream>
+
+#include "appl_grid/serialise_base.h"
+#include "appl_grid/stream_vector.h"
+#include "appl_grid/file_index.h"
+
+// #include <filesystem>
+
+namespace appl {
+
+class file { 
+
+public: 
+
+  file( const std::string& name, const std::string& option="rb" ); 
+
+  ~file();
+
+  void Close();
+
+  void close() { Close(); }
+
+  /// accessors
+
+  std::string filename() const { return mfilename; }
+
+  bool isopen() const { return mopen; }
+
+  ///  provide the index
+
+  const file_index& index() const { return mindex; } 
+
+  /// object writing ...
+
+  template<typename T>
+  void Write( const T& t ) {
+    if ( !isopen() ) return;
+    std::vector<SB::TYPE> v = t.serialise();
+    size_t bytes = gzwrite( mfile, (void*)(&v[0]), sizeof(SB::TYPE)*v.size() );
+    if ( bytes!=sizeof(SB::TYPE)*v.size() ) std::cerr << "issue writing object " << t.name() << std::endl;
+    msize += bytes;
+    mindex.add( t.name(), bytes );
+  }
+
+
+  template<typename T>
+  void Write( const std::string& name, const std::vector<T>& t ) {
+    stream_vector<T> sv( name, t );
+    Write( sv );
+  }
+
+  /// object reading ...
+
+  template<typename T>
+  void Read( std::vector<T>& v ) {
+    if ( !isopen() ) return; 
+    double vsize = 0;
+    double Tsize = 0;
+    gzread( mfile, &vsize, sizeof(SB::TYPE) );
+    gzread( mfile, &Tsize, sizeof(SB::TYPE) );
+    v.resize(vsize);
+    gzread( mfile, &v[0], Tsize*vsize );
+  }
+
+
+
+  template<typename T>
+  void Read( T& t ) { 
+    if ( !isopen() ) return;
+    std::vector<SB::TYPE> v(2);
+    gzread( mfile, &v[0], sizeof(SB::TYPE)*2 );
+    v.resize( v[1] );
+    size_t bytes = gzread( mfile, &v[2], sizeof(SB::TYPE)*(v[1]-2) );
+    if ( bytes!=(sizeof(SB::TYPE)*(v[1]-2)) ) std::cerr << "issue reading object " << t.name() << std::endl;
+    t = T(v);
+  }
+
+
+  template<typename T>
+  T Read() {
+    T t; 
+    Read(t);
+    return t;
+  }
+
+  template<typename T> 
+  T Read( const std::string& name ) {
+    return Get<T>(name);
+  }
+
+  /// non-sequential access using the index ...
+  
+  template<typename T> 
+  T Get( const std::string& name ) {
+    if ( isopen() ) { 
+      file_index::entry e = mindex.find( name );
+      if ( e.size>0 ) { 
+	// gzrewind( mfile );
+	gzseek( mfile, e.offset, SEEK_SET );
+	return Read<T>();
+      }
+    }    
+    std::cerr << "WARNING: could not retrieve object: " << name << std::endl; 
+    return T();
+  }
+
+
+private:
+
+  /// filename
+  std::string mfilename;
+
+  /// options, read write, etc
+  std::string mopt;
+  
+  /// the actual file
+  gzFile      mfile;
+
+  /// is is open ?
+  bool        mopen;
+
+  //the 
+  size_t      msize;
+
+  /// file index 
+  file_index  mindex;
+  
+};
+
+}
+
+
+inline std::ostream& operator<<( std::ostream& s, const appl::file& t ) { 
+  s << "file: " << t.filename() << "\t";
+  if ( !t.isopen() ) s <<  " file is not open";
+  else s << t.index();
+  return s;
+}
+
+
+#endif  // APPL_FILE_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/appl_timer.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/appl_timer.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/appl_timer.h	(revision 2010)
@@ -0,0 +1,56 @@
+/** emacs: this is -*- c++ -*- **/
+/**************************************************************************
+ **
+ **   File:         appl_timer.h  
+ **
+ **   Description:  small timer wrapper  
+ **                   
+ **                   
+ ** 
+ **   Author:       M.Sutton  
+ **
+ **   Created:      Fri Jun 29 18:59:09 BST 2001
+ **   Modified:     
+ **                   
+ **                   
+ **
+ **************************************************************************/ 
+
+
+#ifndef __APPL_TIMER
+#define __APPL_TIMER
+
+#include <time.h>
+#include <sys/time.h>
+
+#define APPL_TIMER_START  0
+#define APPL_TIMER_STOP   1
+#define APPL_TIMER_SINCE -1
+
+#ifdef __cplusplus
+// namespace appl { 
+extern "C" {
+#endif
+
+
+double appl_timer(int   );
+void   appl_timer_initkey(void);
+struct timeval appl_timer_start(void);
+double         appl_timer_stop(struct timeval );
+
+#ifdef __cplusplus
+}
+// };
+#endif
+
+#endif /* __APPL_TIMER */
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/histogram.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/histogram.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/histogram.h	(revision 2010)
@@ -0,0 +1,275 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **   @file    histogram.h        
+ **                   
+ **   @author  sutt
+ **   @date    Mon 28 Dec 2020 19:33:12 GMT
+ **
+ **   $Id: histogram.h, v0.0   Mon 28 Dec 2020 19:33:12 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  HISTOGRAM_H
+#define  HISTOGRAM_H
+
+#include <string>
+#include <vector>
+#include <cmath>
+#include <iostream>
+#include <exception>
+
+
+#include "binary_search.h"
+#include "serialise_base.h"
+#include "appl_grid/serialisable.h"
+
+
+
+class histogram : public serialisable {
+
+public:
+
+  // histogram error exception
+  class exception : public std::exception { 
+  public:
+    exception(const std::string& s) { std::cerr << what() << " " << s << std::endl; }; 
+    virtual const char* what() const throw() { return ""; }
+  };
+
+public:
+
+  histogram( const std::string& name="" );
+
+  histogram( const std::string& name, size_t nbins, const double* limits );
+
+  histogram( const std::string& name, const std::vector<double>& limits );
+
+  histogram( const std::string& name, size_t nbins, double lo, double hi );
+
+  histogram( const std::vector<SB::TYPE> stream );
+
+  histogram( const histogram& h );
+
+  virtual ~histogram() { } 
+
+  histogram& operator=( histogram&& h );
+
+  histogram& operator=( const histogram& h );
+
+  /// accessors 
+
+  std::string name() const { return mname; }
+  std::string& name()      { return mname; }
+
+  size_t size() const { return mx.size(); }
+
+  double  operator[]( int i ) const { return my.at(i); }
+  double& operator[]( int i )       { return my[i]; }
+
+  double&    y( int i ) { return my.at(i);  }
+  double&   ye( int i ) { return mye.at(i); }
+  double& yelo( int i ) { 
+    if ( myelo.size()==0 ) myelo = std::vector<double>( mye.begin(), mye.end() );
+    return myelo.at(i); 
+  }
+
+  double    y( int i ) const { return my.at(i);  }
+  double   ye( int i ) const { return mye.at(i); }
+  double yelo( int i ) const { 
+    if ( myelo.size()==0 ) return mye.at(i);
+    return myelo.at(i); 
+  }
+
+  void set( const std::vector<double>& v, 
+	    const std::vector<double>& ve=std::vector<double>(),
+	    const std::vector<double>& velo=std::vector<double>() );
+
+  void set_errors( const std::vector<double>& ve,
+		   const std::vector<double>& velo=std::vector<double>() );
+
+  std::vector<double>& y()    { return my;  }
+  std::vector<double>& ye()   { return mye;  }
+  std::vector<double>& yelo() { return myelo;  }
+
+  const std::vector<double>& y()  const { return my;  }
+  const std::vector<double>& ye() const { return mye;  }
+  /// const std::vector<double>& yelo() const { return myelo; }
+  /// or, if there are no asymmetric uncertainties  
+  /// just return the symmetric ones
+  const std::vector<double>& yelo() const { 
+    if ( myelo.size()>0 ) return myelo;
+    else                  return mye;
+  }
+
+  double lo( int i ) const { return mxlimits.at(i); }
+  double hi( int i ) const { return mxlimits.at(i+1); }
+
+  const std::vector<double>& xlimits() const { return mxlimits; } 
+  std::vector<double>&       xlimits()       { return mxlimits; } 
+  double&                    xlimit( int i)  { return mxlimits.at(i); } 
+
+  /// logical operators - 
+  /// NB: these only test on the BIN LIMITS, NOT THE BIN CONTENTS !!
+  
+  bool operator==( const histogram& h ) {
+    if ( size()!=h.size() ) return false;
+    if ( size()==0 ) return true;
+    double delta = mxlimits[1]-mxlimits[0];
+    for ( size_t i=mxlimits.size() ; i-- ; ) if ( std::fabs(mxlimits[i]-h.mxlimits[i])>delta*1e-10 ) return false;
+    return true;
+  }
+
+  bool operator!=( const histogram& h ) { 
+    return !(operator==(h));
+  }
+
+  /// mathematical operations ...
+
+  histogram& operator*=( double d ) { 
+    for ( size_t i=size() ; i-- ; ) { 
+      my[i]  *= d;
+      mye[i] *= d;
+    }
+    if ( myelo.size()>0 ) {
+      for ( size_t i=size() ; i-- ; ) mye[i] *= d;
+    }
+
+    return *this;
+  }
+
+  histogram operator*( double d ) const {
+    histogram h(*this);
+    return (h*=d);
+  }
+
+  histogram& operator*=( const std::vector<double>& v ) {
+    if ( v.size()!=size() ) throw exception(  "histogram: number of histogram and value bins don't match" );
+    
+    for ( size_t i=size() ; i-- ; ) { 
+      my[i]  *= v[i];
+      mye[i] *= v[i];
+    }
+    
+    if ( myelo.size()==v.size() ) {  
+      for ( size_t i=size() ; i-- ; ) myelo[i] *= v[i];
+    }
+    
+    return *this;
+  }
+
+
+  histogram operator*( const std::vector<double>& v ) const {
+    histogram h(*this);
+    return (h*=v);
+  }
+  
+
+  histogram& operator/=( double d ) {
+    return operator*=(1/d);
+  }
+
+  histogram operator/( double d ) const {
+    histogram h(*this);
+    return h/=d;
+  }
+
+  histogram& operator/=( const histogram& h);
+  histogram& operator*=( const histogram& h);
+  histogram& operator+=( const histogram& h);
+  
+  histogram operator/( const histogram& h) const {
+    histogram hr(*this);
+    return hr/=h;
+  }
+
+  histogram operator*( const histogram& h) const {
+    histogram hr(*this);
+    return hr*=h;
+  }
+
+  histogram operator+( histogram h ) const { 
+    return ( h += (*this) );
+  }
+
+  histogram operator-=( const histogram& h ) { 
+    return operator+=( h*(-1) );
+  }
+
+  histogram operator-( const histogram& h ) const { 
+    histogram hi(*this);
+    return hi-=h;
+  }
+  
+
+  /// filling ...
+
+  int index( double x ) const { return find_index( x, mxlimits ); }
+
+  void fill( double x, double w=1 );  
+
+  void clear(); 
+
+  /// serialisation and deserialisation ...
+
+  ///  void serialise_internal( std::vector<SB::TYPE>& s ) const;
+
+  ///  void deserialise_internal( const std::vector<SB::TYPE>& v );
+
+  virtual void serialise_internal( std::vector<SB::TYPE>& s ) const; 
+  virtual void deserialise_internal( std::vector<SB::TYPE>::const_iterator& itr );
+
+  /// merge bins i and bin i+1
+  void merge_bins( size_t i, bool norm=true );
+
+protected:
+
+  void create( size_t nbins, const double* limits );
+ 
+protected:
+  
+  std::string mname;              ///! serialise_var
+
+  std::vector<double> mxlimits;  ///! serialise_var
+  
+  std::vector<double> mx;        
+  
+  std::vector<double> my;        ///! serialise_var
+  std::vector<double> mye;       ///! serialise_var
+  std::vector<double> myelo;     /// possible lower limit errors for asymmetric
+
+};
+
+
+template<typename T>
+histogram operator*( const T& t, histogram h ) { return (h*=t); }
+
+  
+
+
+inline std::ostream& operator<<( std::ostream& s, const histogram& h ) { 
+  s << "key: " << h.name() << std::endl;
+  for ( size_t i=0 ; i<h.size() ; i++ ) { 
+    s << "  " << h.lo(i) << "\t- " << h.hi(i) 
+      << "\t: " << h.y(i) << "\t+- " 
+      << h.ye(i) << "\t( " << h.yelo(i) << " )";
+    // if ( h.y(i)!=0 ) s << "\t( +- " << 100*h.ye(i)/h.y(i) << " % )";
+    s << std::endl; 
+  }
+  return s;
+}
+
+
+#endif  // HISTOGRAM_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/serialisable.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/serialisable.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/serialisable.h	(revision 2010)
@@ -0,0 +1,92 @@
+/* emacs: this is -*- c++ -*- */
+/**
+ **   @file    serialisable.h        
+ **                   
+ **   @author  sutt
+ **   @date    Sat  9 Jan 2021 02:12:03 GMT
+ **
+ **   $Id: serialisable.h, v0.0   Sat  9 Jan 2021 02:12:03 GMT sutt $
+ **
+ **   Copyright (C) 2021 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  SERIALISABLE_H
+#define  SERIALISABLE_H
+
+#include <iostream>
+
+#include "appl_grid/serialise_base.h"
+
+class serialisable {
+
+public:
+
+  // histogram error exception
+  class exception : public std::exception { 
+  public:
+    exception(const std::string& s) { std::cerr << what() << " " << s << std::endl; }; 
+    virtual const char* what() const throw() { return ""; }
+  };
+
+public:
+
+  serialisable() { } 
+
+  virtual ~serialisable() { } 
+
+  virtual void serialise_internal( std::vector<SB::TYPE>& s ) const = 0;
+ 
+  virtual void deserialise_internal( std::vector<SB::TYPE>::const_iterator& itr ) = 0;
+
+  /// Serialisation and deserialisation wrappers ...
+
+  std::vector<SB::TYPE> serialise() const { 
+   
+    std::vector<SB::TYPE> s;
+
+    s.push_back( SB::WRITEGUARD );
+    s.push_back(0); // overall size
+    
+    serialise_internal( s );
+    
+    s.push_back( SB::WRITEGUARD );
+    s[1] = s.size();
+
+    return s;
+  }
+  
+  
+  void deserialise( const std::vector<SB::TYPE>& v ) { 
+  
+    std::vector<SB::TYPE>::const_iterator itr = v.begin();    
+    if ( (*itr++) != SB::WRITEGUARD ) throw exception("read error");
+    (*itr++);
+
+    deserialise_internal( itr );
+
+    SB::TYPE var = (*itr++);
+    if ( var != SB::WRITEGUARD ) throw exception("read error");   
+
+  } 
+
+
+};
+
+inline std::ostream& operator<<( std::ostream& s, const serialisable&  ) { 
+  return s;
+}
+
+
+#endif  // SERIALISABLE_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/lumi_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/lumi_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/lumi_pdf.h	(revision 2010)
@@ -0,0 +1,187 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    lumi_pdf.h
+ **
+ **     @brief   a lumi pdf type - to read in the combinations
+ **              for the subprocesses from a file
+ **
+ **              the file format is as from madgraph nproc rows ...
+ **     
+ **                index  npairs   p1 p2   p3 p4   p5 p6 ...  
+ **                index  npairs   ...
+ **                ...
+ **
+ **              where  npairs is the number of parton-parton pairs
+ **              in this subprocess
+ **
+ **     @author  mark sutton
+ **     @date    Mon 28 Jan 2013 15:41:10 GMT 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: lumi_pdf.h, v0.0   Mon 28 Jan 2013 15:41:10 GMT sutt $
+ **
+ **/
+
+
+#ifndef  LUMI_PDF_H
+#define  LUMI_PDF_H
+
+#include <vector>
+#include <map>
+#include <iostream>
+
+#include "appl_grid/appl_pdf.h" 
+#include "appl_grid/combination.h"
+
+
+class lumi_pdf : public appl::appl_pdf {
+
+public:
+
+  lumi_pdf(const std::string& s="", const std::vector<int>& combinations=std::vector<int>() );   // , int Wcharge=0 );
+
+  lumi_pdf(const std::string& s, const std::vector<combination>& combinations, int ckmcharge=0 );             // , int Wcharge=0 );
+
+  virtual ~lumi_pdf() {   } 
+
+  void evaluate(const double* _fA, const double* _fB, double* H) const;
+
+  /// additional user defined functions to actually initialise 
+  /// based on the input file
+
+  /// how many combinations of subprocesses are there ?
+  unsigned size() const { return m_combinations.size(); }
+ 
+  //  void initialise(const std::string& filename);
+
+  //  bool initialised() const { return m_initialised; }
+
+  const combination& operator[](int i) const { return m_combinations.at(i); }  
+  combination&       operator[](int i)       { return m_combinations.at(i); }  
+
+  const combination& at(int i)         const { return m_combinations.at(i); }  
+  combination&       at(int i)               { return m_combinations.at(i); }  
+
+  int  decideSubProcess(const int iflav1, const int iflav2) const ;
+
+  size_t  nSubProcesses(const int iflav1, const int iflav2) const ;
+
+  std::vector<int> decideSubProcesses(const int iflav1, const int iflav2) const ;
+
+
+  int  decideSubProcess(const int iproc ) const;
+
+
+  /// serialise into a single vector
+  std::vector<int>               serialise() const;
+
+  /// ... or a vector aof vectors ...
+  std::vector<std::vector<int> > vectorise() const;
+
+  /// write to a file ..
+  void write(const std::string& filename) const;
+ 
+  /// write to a stream ...
+  void write(std::ostream& s=std::cout) const; 
+
+  //  std::string summary(std::ostream& s=std::cout) const; 
+  std::string summary() const; 
+
+  /// does this pdf contain a specific species, i
+  bool contains( int i ) const; 
+
+
+  // private:
+
+  /// add a combination
+  void add(const combination& c) {  
+    m_combinations.push_back(c);
+    m_Nproc = m_combinations.size();
+  }
+
+  /// delete a combination
+  void remove( int i );
+
+
+  void create_lookup();
+  
+  void clear_lookup() { m_lookup.clear(); } 
+
+  void removeDuplicates();
+
+  void restoreDuplicates();
+
+  /// detailed comparison of two lumi pdfs - checks names and 
+  /// all the constituent combinations
+  virtual bool operator==( const appl_pdf& pdf ) const {
+    const lumi_pdf* p = dynamic_cast<const lumi_pdf*>( &pdf );
+    if ( p==0 ) return false;
+    if (  name() !=  p->name() ) return false;
+    if ( Nproc() != p->Nproc() ) return false;
+    for ( int i=0 ; i<Nproc() ; i++ ) if ( !(this->at(i) == p->at(i)) ) return false;
+    return true;
+  } 
+  
+  virtual bool operator!=( const appl_pdf& pdf ) const { return !operator==(pdf); }
+
+
+public: 
+
+  static void runlatex( bool b=true ) { m_runlatex=b; } 
+
+private:
+
+  /// this might eventually become a std::string encoding the grid
+  std::string m_filename;  
+
+  /// has this been initialised yet?
+  //  bool m_initialised;
+
+  /// ckm matrices should they be needed ...
+  //std::vector<double>                m_ckmsum;
+  //std::vector<std::vector<double> >  m_ckm2;
+
+  std::vector<combination> m_combinations;
+
+  /// flag that this is an amcatnlo pdf
+  //  bool m_amcflag;
+
+  /// lookup table for decideSubprocess
+  std::vector<std::vector<std::vector<int> > >  m_lookup;
+
+  /// NNLO code process lookup 
+  std::map<int, int>  m_proclookup;
+
+private:
+
+  static bool m_runlatex;
+
+};
+
+
+
+inline std::ostream& operator<<( std::ostream& s, const lumi_pdf& _g ) { 
+  s << "lumi_pdf:   " << _g.name() << "\t processes " << _g.Nproc() << "\n";
+  for ( int i=0 ; i<_g.Nproc() ; i++ ) s << _g[i] << std::endl;
+  return s;
+}
+
+
+void latex( const lumi_pdf& p, const std::string& d="" );
+
+
+
+#endif  // LUMI_PDF_H 
+
+
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/stream_grid.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/stream_grid.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/stream_grid.h	(revision 2010)
@@ -0,0 +1,194 @@
+/* emacs: this is -*- c++ -*- */
+/**
+ **   @file    stream_grid.h        
+ **                   
+ **   @author  sutt
+ **   @date    Wed 30 Dec 2020 18:06:03 GMT
+ **
+ **   $Id: stream_grid.h, v0.0   Wed 30 Dec 2020 18:06:03 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  STREAM_GRID_H
+#define  STREAM_GRID_H
+
+#include <iostream>
+#include <vector>
+
+#include "appl_grid/vector_stream.h"
+#include "appl_grid/serialise_base.h"
+#include "appl_grid/serialisable.h"
+
+
+
+class stream_grid : public serialisable {
+
+public:
+
+  // grid error exception
+  class exception : public std::exception { 
+  public:
+    exception(const std::string& s) { std::cerr << what() << " " << s << std::endl; }; 
+    virtual const char* what() const throw() { return ""; }
+  };
+
+public:
+
+  stream_grid() { }
+
+  stream_grid( const std::string& name, 
+	       const std::vector<double>& xv, 
+	       const std::vector<double>& yv, 
+	       const std::vector<double>& zv ) :
+    mname(name),
+    mxaxis( xv ), 
+    myaxis( yv ), 
+    mzaxis( zv ),
+    mnz(zv.size()),  
+    mnyz(zv.size()*yv.size())
+  { 
+    //    mnodes = std::vector<std::vector<std::vector<double> > >( xv.size(), std::vector<std::vector<double> >( yv.size(), std::vector<double>(zv.size(), 0 ) ) );
+    mnodes = std::vector<double>(xv.size()*yv.size()*zv.size(), 0 );
+  } 
+
+  stream_grid( const stream_grid& sg ) :
+    mname(sg.mname),
+    mxaxis(sg.mxaxis),
+    myaxis(sg.myaxis),
+    mzaxis(sg.mzaxis),
+    mnz(sg.mnz),
+    mnyz(sg.mnyz),
+    mnodes(sg.mnodes) 
+  { } 
+
+
+  stream_grid& operator=( stream_grid&& sg ) { 
+    std::swap(mname,  sg.mname);
+    std::swap(mxaxis, sg.mxaxis);
+    std::swap(myaxis, sg.myaxis);
+    std::swap(mzaxis, sg.mzaxis);
+    std::swap(mnz,    sg.mnz);
+    std::swap(mnyz,   sg.mnyz);
+    std::swap(mnodes, sg.mnodes);
+    return *this;
+  }
+
+
+  stream_grid( const std::vector<SB::TYPE>& v ) {
+    deserialise( v );
+    mnz  = mzaxis.size();
+    mnyz = mzaxis.size()*myaxis.size();
+  } 
+
+  virtual ~stream_grid() { } 
+
+  std::string name() const { return mname; }
+
+  //  void set( int ix, int iy, int iz, double d ) { mnodes[ix+mnx*iy+mnxy*iz] = d; } 
+  void set( int ix, int iy, int iz, double d ) { mnodes[ix*mnyz+iy*mnz+iz] = d; } 
+
+  //  double& get( int ix, int iy, int iz )       { return mnodes[ix+mnx*iy+mnxy*iz]; } 
+  //  double  get( int ix, int iy, int iz ) const { return mnodes[ix+mnx*iy+mnxy*iz]; } 
+
+  //  double& operator()( int ix, int iy, int iz )       { return mnodes[ix+mnx*iy+mnxy*iz]; } 
+  //  double  operator()( int ix, int iy, int iz ) const { return mnodes[ix+mnx*iy+mnxy*iz]; } 
+
+  double& get( int ix, int iy, int iz )       { return mnodes[ix*mnyz+iy*mnz+iz]; }
+  double  get( int ix, int iy, int iz ) const { return mnodes[ix*mnyz+iy*mnz+iz]; }
+
+  double& operator()( int ix, int iy, int iz )       { return mnodes[ix*mnyz+iy*mnz+iz]; }
+  double  operator()( int ix, int iy, int iz ) const { return mnodes[ix*mnyz+iy*mnz+iz]; }
+
+  const std::vector<double>& xaxis() const { return mxaxis; }
+  const std::vector<double>& yaxis() const { return myaxis; }
+  const std::vector<double>& zaxis() const { return mzaxis; }
+
+  const double& xaxis(int i) const { return mxaxis[i]; }
+  const double& yaxis(int i) const { return myaxis[i]; }
+  const double& zaxis(int i) const { return mzaxis[i]; }
+
+
+  /// serialise and deserialise
+
+  virtual void serialise_internal( std::vector<SB::TYPE>& s ) const { 
+    SB::serialise( s, name() );
+    SB::serialise( s, mxaxis );
+    SB::serialise( s, myaxis );
+    SB::serialise( s, mzaxis );
+    SB::serialise( s, mnodes );
+  }
+
+
+  virtual void deserialise_internal( std::vector<SB::TYPE>::const_iterator& itr ) { 
+    SB::deserialise( itr, mname );
+    SB::deserialise( itr, mxaxis );
+    SB::deserialise( itr, myaxis );
+    SB::deserialise( itr, mzaxis );
+    SB::deserialise( itr, mnodes );
+  }
+
+
+private:
+
+  std::string mname;
+  
+  std::vector<double> mxaxis;
+  std::vector<double> myaxis;
+  std::vector<double> mzaxis;
+
+  size_t mnz;
+  size_t mnyz;
+
+  std::vector<double> mnodes;
+
+};
+
+
+inline std::ostream& operator<<( std::ostream& s, const stream_grid& sg ) { 
+
+  s << "[ x axis: " << sg.xaxis().size() << "\t" << sg.xaxis()[0] << " .. " << sg.xaxis()[sg.xaxis().size()-1] << " ]\n"; 
+  s << "[ y axis: " << sg.yaxis().size() << "\t" << sg.yaxis()[0] << " .. " << sg.yaxis()[sg.yaxis().size()-1] << " ]\n"; 
+  s << "[ z axis: " << sg.zaxis().size() << "\t" << sg.zaxis()[0] << " .. " << sg.zaxis()[sg.zaxis().size()-1] << " ]\n"; 
+
+
+#if 0
+"\n";
+  s << "xaxis: " << sg.xaxis() << "\n";
+  s << "yaxis: " << sg.yaxis() << "\n";
+  s << "zaxis: " << sg.zaxis() << "\n";
+
+  std::vector<double> x = sg.xaxis();
+  std::vector<double> y = sg.yaxis();
+  std::vector<double> z = sg.zaxis();
+
+  for ( size_t iz=0 ; iz<z.size() ; iz++ ) { 
+    for ( size_t iy=0 ; iy<y.size() ; iy++ ) { 
+      for ( size_t ix=0 ; ix<x.size() ; ix++ ) { 
+        double v = sg( ix, iy, iz );
+	if ( v==0 ) s << "   ";
+	else        s << int(3*v*1e-3) << " ";
+      }
+      std::cout << "\n";
+    }
+    std::cout << "\n";
+  }
+#endif
+
+  return s;
+}
+
+
+#endif  // STREAM_GRID_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/appl_grid.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/appl_grid.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/appl_grid.h	(revision 2010)
@@ -0,0 +1,912 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    appl_grid.h
+ **
+ **     @author  mark sutton
+ **     @date    16th Oct 2007 17:01:39
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: appl_grid.h, v1.00 2007/10/16 17:01:39 sutt $     
+ **
+ **/
+
+
+// Fixme: this needs to be tidied up. eg there are too many different, 
+//        and too many version of, accessors for x/y, Q2/tau etc there 
+//        should be only  one set, for x and Q2 *or* y and tau, but 
+//        not both. In fact they should be for x and Q2, since y and tau 
+//        should be purely an internal grid issue of no concern for the 
+//        user.
+
+#ifndef __APPL_GRID_H
+#define __APPL_GRID_H
+
+#include <vector>
+#include <iostream>
+#include <cmath>
+#include <string>
+#include <exception>
+
+#include "appl_grid/appl_root.h"
+
+#include "appl_grid/correction.h"
+#include "appl_grid/appl_TH1D.h"
+
+
+double _fy(double x);
+double _fx(double y);
+double _fun(double y);
+
+
+
+
+
+namespace appl { 
+
+std::string date();
+
+/// forward declarations - full definitions included 
+/// from appl_grid.cxx 
+class igrid;
+class appl_pdf;
+
+
+const int MAXGRIDS = 5;
+
+std::string compiled();
+std::string version();
+
+bool file_exists(const std::string& s);
+
+
+class RefStore { 
+
+public:
+
+    appl::TH1D  m_ref_order[MAXGRIDS];
+
+};
+
+
+
+/// externally visible grid class
+class grid {
+
+public:
+
+  // grid error exception
+  class exception : public std::exception { 
+  public:
+    exception(const std::string& s) { std::cerr << what() << " " << s << std::endl; }; 
+    //    exception(std::stringstream& s) { std::cerr << what() << " " << s.str() << std::endl; }; 
+    virtual const char* what() const throw() { return "appl::grid::exception"; }
+  };
+
+  typedef enum { STANDARD=0, AMCATNLO=1, SHERPA=2, LAST_TYPE=3 } CALCULATION; 
+
+public:
+
+  grid(int NQ2=50,  double Q2min=10000.0, double Q2max=25000000.0,  int Q2order=5,  
+       int Nx=50,   double xmin=1e-5,     double xmax=0.9,          int xorder=5,
+       int Nobs=20, double obsmin=100.0,  double obsmax=7000.0, 
+       std::string genpdf="mcfm_pdf", 
+       int leading_order=0, int _nloops=1, 
+       std::string transform="f2", 
+       std::string qtransform="h0" );
+
+  grid( int Nobs, const double* obsbins,
+	int NQ2=50,  double Q2min=10000.0, double Q2max=25000000.0, int Q2order=5,
+        int Nx=50,   double xmin=1e-5,     double xmax=0.9,         int xorder=5, 
+	std::string genpdf="mcfm_pdf",
+	int leading_order=0, int _nloops=1, 
+	std::string transform="f2", 
+	std::string qtransform="h0", 
+	bool isdis=false );
+
+  grid( const std::vector<double>& obs,
+	int NQ2=50,  double Q2min=10000.0, double Q2max=25000000.0,   int Q2order=5, 
+        int Nx=50,   double xmin=1e-5,     double xmax=0.9,           int xorder=5, 
+	std::string genpdf="mcfm_pdf", 
+	int leading_order=0, int _nloops=1, 
+	std::string transform="f2", 
+	std::string qtransform="h0" );
+
+  // build a grid but don't build the internal igrids - these can be added later
+  grid( const std::vector<double>& obs,
+	std::string genpdf="nlojet_pdf", 
+	int leading_order=0, int _nloops=1, 
+	std::string transform="f2", 
+	std::string qtransform="h0" );
+
+  // copy constructor
+  grid(const grid& g);
+
+  // read from a file
+  //  grid(const std::string& filename, bool, const std::string dirname="grid");
+
+  // read from a file
+  grid(const std::string& filename, const std::string& dirname="grid" );
+
+#ifdef USEROOT
+  void construct_root( const std::string& filename, const std::string& dirname );
+#endif
+
+  void construct_appl( const std::string& filename );
+
+  // add an igrid for a given bin and a given order 
+  void add_igrid(int bin, int order, igrid* g);
+
+  virtual ~grid();
+  
+  // update grid with one set of event weights
+  void fill(const double x1, const double x2, const double Q2, 
+	    const double obs, 
+	    const double* weight, const int iorder);
+  
+  
+  void fill_phasespace(const double x1, const double x2, const double Q2, 
+		       const double obs, 
+		       const double* weight, const int iorder);
+  
+  
+  void fill_grid(const double x1, const double x2, const double Q2, 
+		 const double obs, 
+		 const double* weight, const int iorder)  {
+    if (isOptimised())   fill(x1, x2, Q2, obs, weight, iorder);
+    else                 fill_phasespace(x1, x2, Q2, obs, weight, iorder);
+  }
+
+
+  void fill_index(const int ix1, const int ix2, const int iQ2, 
+		  const int iobs, 
+		  const double* weight, const int iorder);
+
+
+  // trim/untrim the grid to reduce memory footprint
+  void trim(int iorder=-1);
+  void untrim(int iorder=-1);
+ 
+  // formatted output 
+  std::ostream& print(std::ostream& s=std::cout) const;
+
+  // formatted output 
+  std::ostream& printpdf(std::ostream& s=std::cout) const;
+
+  std::ostream& debug(std::ostream& s=std::cout) const; 
+
+  // don't do anything anymore
+  // void setuppdf(void (*pdf)(const double& , const double&, double* ) );
+
+  // get the interpolated pdf's
+  //  void pdfinterp(double x1, double Q2, double* f);
+
+
+  // perform the convolution to a specified number of loops
+  // nloops=-1 gives the nlo part only
+  std::vector<double>  vconvolute(void   (*pdf)(const double& , const double&, double* ), 
+				  double (*alphas)(const double& ), 
+				  int     _nloops, 
+				  double  rscale_factor=1,
+				  double  fscale_factor=1,
+				  double  Escale=1 );
+
+  std::vector<double>  vconvolute(void   (*pdf1)(const double& , const double&, double* ), 
+				  void   (*pdf2)(const double& , const double&, double* ), 
+				  double (*alphas)(const double& ), 
+				  int     _nloops, 
+				  double  rscale_factor=1,
+				  double  fscale_factor=1,
+				  double  Escale1=1,
+				  double  Escale2=1 );
+
+
+  // perform the convolution to a specified number of loops
+  // nloops=-1 gives the nlo part only
+  std::vector<double>  vconvolute(double Escale,
+				  void   (*pdf)(const double& , const double&, double* ), 
+				  double (*alphas)(const double& ), 
+				  int     _nloops, 
+				  double  rscale_factor=1,
+				  double  fscale_factor=1  ) { 
+    return vconvolute(pdf, alphas, _nloops, rscale_factor, fscale_factor, Escale); 
+  }
+
+
+  // perform the convolution to the max number of loops in grid
+  std::vector<double> vconvolute(void   (*pdf)(const double& , const double&, double* ), 
+				 double (*alphas)(const double& ) )   { 
+    return vconvolute( pdf, alphas, nloops() ); 
+  } 
+
+
+  // perform the convolution to the max number of loops in grid
+  std::vector<double> vconvolute(double Escale, 
+				 void   (*pdf)(const double& , const double&, double* ), 
+				 double (*alphas)(const double& ) )   { 
+    return vconvolute( Escale, pdf, alphas, nloops() ); 
+  } 
+
+
+  // perform the convolution to a specified number of loops 
+  // for a single sub process, nloops=-1 gives the nlo part only
+  std::vector<double> vconvolute_subproc(int subproc, 
+					 void   (*pdf)(const double& , const double&, double* ), 
+					 double (*alphas)(const double& ), 
+					 int     _nloops, 
+					 double  rscale_factor=1, double Escale=1 ); 
+
+
+  // perform the convolution to a specified number of loops 
+  // for a single sub process, nloops=-1 gives the nlo part only
+  std::vector<double> vconvolute_subproc(int subproc, 
+					 double Escale, 
+					 void   (*pdf)(const double& , const double&, double* ), 
+					 double (*alphas)(const double& ), 
+					 int     _nloops, 
+					 double  rscale_factor=1 ) { 
+    return vconvolute_subproc(subproc, pdf, alphas, _nloops, rscale_factor, Escale); 
+  } 
+
+
+  // perform the convolution to the max number of loops in grid 
+  // for a single sub process
+  std::vector<double> vconvolute_subproc(int subproc, 
+					 void   (*pdf)(const double& , const double&, double* ), 
+					 double (*alphas)(const double& ) )   { 
+    return vconvolute_subproc( subproc, pdf, alphas, nloops() ); 
+  } 
+
+  // perform the convolution to the max number of loops in grid 
+  // for a single sub process
+  std::vector<double> vconvolute_subproc(int subproc, 
+					 double Escale,
+					 void   (*pdf)(const double& , const double&, double* ), 
+					 double (*alphas)(const double& ) )   { 
+    return vconvolute_subproc( subproc, Escale, pdf, alphas, m_order-1 ); 
+  } 
+  
+
+  double vconvolute_bin( int bin, 
+			 void (*pdf)(const double& , const double&, double* ), 
+			 double (*alphas)(const double&) ); 
+				       
+
+  // perform the convolution to a specified number of loops
+  // nloops=-1 gives the nlo part only
+  appl::TH1D* aconvolute(void   (*pdf)(const double& , const double&, double* ), 
+		  double (*alphas)(const double& ), 
+		  int     _nloops, 
+		  double  rscale_factor=1,
+		  double  fscale_factor=1,
+		  double  Escale=1 );
+
+#ifdef USEROOT
+  ::TH1D* convolute(void   (*pdf)(const double& , const double&, double* ), 
+		  double (*alphas)(const double& ), 
+		  int     _nloops, 
+		  double  rscale_factor=1,
+		  double  fscale_factor=1,
+		  double  Escale=1 ) { 
+    return convert( aconvolute( pdf, alphas, _nloops, rscale_factor, fscale_factor, Escale ) );
+  }
+#endif
+
+
+  // perform the convolution to a specified number of loops
+  // nloops=-1 gives the nlo part only
+  appl::TH1D* aconvolute(void   (*pdf1)(const double& , const double&, double* ), 
+		  void   (*pdf2)(const double& , const double&, double* ), 
+		  double (*alphas)(const double& ), 
+		  int     _nloops, 
+		  double  rscale_factor=1,
+		  double  fscale_factor=1,
+		  double  Escale1=1,
+		  double  Escale2=1  );
+
+#ifdef USEROOT
+  ::TH1D* convolute(void   (*pdf1)(const double& , const double&, double* ), 
+		  void   (*pdf2)(const double& , const double&, double* ), 
+		  double (*alphas)(const double& ), 
+		  int     _nloops, 
+		  double  rscale_factor=1,
+		  double  fscale_factor=1,
+		  double  Escale1=1,
+		  double  Escale2=1  ) {
+    return convert( aconvolute( pdf1, pdf2, alphas, _nloops, rscale_factor, fscale_factor, Escale1, Escale2 ) );
+  }
+#endif
+
+
+
+
+  appl::TH1D* aconvolute(double Escale,
+		  void   (*pdf)(const double& , const double&, double* ), 
+		  double (*alphas)(const double& ), 
+		  int     _nloops, 
+		  double  rscale_factor=1,
+		  double  fscale_factor=1 ) { 
+    return aconvolute(pdf, alphas, _nloops, rscale_factor, fscale_factor, Escale); 
+  }
+
+
+  // perform the convolution to the max number of loops in grid
+  appl::TH1D* aconvolute(void   (*pdf)(const double& , const double&, double* ), 
+		  double (*alphas)(const double& ) )   { 
+    return aconvolute( pdf, alphas, nloops() ); 
+  } 
+
+  // perform the convolution to the max number of loops in grid
+  appl::TH1D* aconvolute(double Escale,
+		  void   (*pdf)(const double& , const double&, double* ), 
+		  double (*alphas)(const double& ) )   { 
+    return aconvolute( Escale, pdf, alphas, nloops() ); 
+  } 
+
+
+  // perform the convolution to a specified number of loops 
+  // for a single sub process, _nloops=-1 gives the nlo part only
+  appl::TH1D* aconvolute_subproc(int subproc, 
+			  void   (*pdf)(const double& , const double&, double* ), 
+			  double (*alphas)(const double& ), 
+			  int     _nloops, 
+			  double  rscale_factor=1, double Escale=1 );
+#ifdef USEROOT
+  ::TH1D* convolute_subproc(int subproc, 
+			  void   (*pdf)(const double& , const double&, double* ), 
+			  double (*alphas)(const double& ), 
+			  int     _nloops, 
+			  double  rscale_factor=1, double Escale=1 ) { 
+    return convert( aconvolute_subproc( subproc, pdf, alphas, _nloops, rscale_factor, Escale ) );
+  }
+#endif  
+
+
+  appl::TH1D* aconvolute_subproc(int subproc, 
+			  double Escale,
+			  void   (*pdf)(const double& , const double&, double* ), 
+			  double (*alphas)(const double& ), 
+			  int     _nloops, 
+			  double  rscale_factor=1 ) { 
+    return aconvolute_subproc( subproc, pdf, alphas, _nloops, rscale_factor, Escale);
+  }
+
+  // perform the convolution to the max number of loops in grid 
+  // for a single sub process
+  appl::TH1D* aconvolute_subproc(int subproc, 
+			  void   (*pdf)(const double& , const double&, double* ), 
+			  double (*alphas)(const double& ) )   { 
+    return aconvolute_subproc( subproc, pdf, alphas, nloops() ); 
+  } 
+
+  appl::TH1D* aconvolute_subproc(int subproc, 
+			  double Escale,
+			  void   (*pdf)(const double& , const double&, double* ), 
+			  double (*alphas)(const double& ) )   { 
+    return aconvolute_subproc( subproc, Escale, pdf, alphas, nloops() ); 
+  } 
+
+
+  // optimise the bin limits
+  void optimise(bool force=false, int extrabins=1 );
+  void optimise(int NQ2, int Nx);
+  void optimise(int NQ2, int Nx1, int Nx2);
+
+  // redefine the limits by hand
+  void redefine(int iobs, int iorder,
+		int NQ2, double Q2min, double Q2max, 
+		int Nx,  double  xmin, double  xmax);
+
+  bool setNormalised(bool t=true) { return m_normalised=t; } 
+  bool getNormalised() const      { return m_normalised; } 
+
+
+  // set the filling to be symmetric and test status
+  bool symmetrise(bool t=true) { return m_symmetrise=t; } 
+  bool isSymmetric()     const { return m_symmetrise; } 
+
+  bool reweight(bool t=false); 
+
+  // access to internal grids if need be
+  const igrid* weightgrid(int iorder, int iobs) const { return m_grids[iorder].at(iobs); }
+  
+  // save grid to specified file
+  void Write(const std::string& filename, const std::string& dirname="grid", const std::string& pdfname="" );
+
+
+  // accessors for the observable after possible bin combination
+  int    Nobs()               const { return m_ref_combined->GetNbinsX(); }
+  double obs(int iobs)        const { return m_ref_combined->GetBinCenter(iobs+1); } 
+  int    obsbin(double obs)   const { return m_ref_combined->FindBin(obs)-1; } 
+  double obslow(int iobs)     const { return m_ref_combined->GetBinLowEdge(iobs+1); }
+  double obsmin()             const { return obslow(0); } 
+  double obsmax()             const { return obslow(Nobs()); } 
+  double deltaobs(int iobs)   const { return m_ref_combined->GetBinWidth(iobs+1); }
+
+  const appl::TH1D* getReference() const { return m_ref_combined; } 
+  appl::TH1D*       getReference()       { return m_ref_combined; } 
+  
+  const appl::TH1D* getReference( int i ) const;
+  appl::TH1D*       getReference( int i );
+
+  void setReference( const appl::TH1D* h, int i=-1 ); 
+
+  std::string getGeneratedPDF()  const { return m_genwithpdf; }
+  int         getGeneratediPDF() const { return m_genwithipdf; }
+
+  void  setGeneratedPDF( const std::string& s ) { m_genwithpdf = s; }
+  void  setGeneratediPDF( int i ) { m_genwithipdf = i; }
+
+  void setGeneratedPDF( const std::string& s, int i ) { 
+    setGeneratedPDF( s );
+    setGeneratediPDF( i );
+  }
+
+  // accessors for the observable befor any bin combination
+  int    Nobs_internal()               const { return m_ref->GetNbinsX(); }
+  double obs_internal(int iobs)        const { return m_ref->GetBinCenter(iobs+1); } 
+  int    obsbin_internal(double obs)   const { return m_ref->FindBin(obs)-1; } 
+  double obslow_internal(int iobs)     const { return m_ref->GetBinLowEdge(iobs+1); }
+  double deltaobs_internal(int iobs)   const { return m_ref->GetBinWidth(iobs+1); }
+  double obsmin_internal()             const { return obslow_internal(0); } 
+  double obsmax_internal()             const { return obslow_internal(Nobs_internal()); } 
+
+  const appl::TH1D* getReference_internal() const { return m_ref; } 
+  appl::TH1D*       getReference_internal()       { return m_ref; } 
+
+ 
+ 
+
+  // number of subprocesses 
+  int subProcesses(int i) const;
+
+  // general status accessors
+  double& run() { return m_run; }
+ 
+  // accessors for the status information
+  bool isOptimised() const { return m_optimised; }
+  bool isTrimmed()   const { return m_trimmed; }
+
+  // lowest order of process
+  int  leadingOrder() const { return m_leading_order; } 
+
+  /// maximum number of orders ( lo=1, nlo=2, nnlo=3 )  
+  /// but aMC@NLO uses 4 grids for the NLO, so m_order
+  /// will be 4, but really it is still only available 
+  /// 1 loop, so take account of this 
+  int  nloops() const {
+    if ( m_type!=AMCATNLO ) return m_order-1;
+    else if ( m_order>1 ) return 1;
+    else                  return 0;
+    //    else return m_order;
+  } 
+
+
+  int  ninternalgrids() const { return m_order; } 
+
+  // find out which transform and which pdf combination are being used
+  std::string getTransform()  const { return m_transform; }
+  std::string getQTransform() const { return m_qtransform; }
+
+  static double transformvar();
+  static double transformvar(double v);
+
+  std::string getGenpdf()    const { return m_genpdfname; }
+
+  std::string version()      const { return m_version; } 
+
+  std::string appl_version() const;
+
+  double getCMSScale()          const { return m_cmsScale; }
+  void   setCMSScale(double cmsScale) { m_cmsScale=cmsScale; }
+
+  double getDynamicScale()          const     { return m_dynamicScale; }
+  void   setDynamicScale(double dynamicScale) { m_dynamicScale=dynamicScale; }
+
+
+  // set optimise flag on all sub grids
+  bool setOptimised(bool t=true) { 
+    return m_optimised=t;
+    //    for ( int iorder=0 ; iorder<2 ; iorder++ ) { 
+    //      for ( int iobs=0 ; iobs<Nobs() ; iobs++ ) m_grids[iorder][iobs]->setOptimised(t); 
+    //    }
+  }
+
+  // find the number of words used for storage
+  int size() const; 
+
+  // get the cross sections
+  double& crossSection()      { return m_total; } 
+  double& crossSectionError() { return m_totalerror; } 
+  
+  //  double Lambda() const { return m_Lambda2; }
+
+  // very lovely algebraic operators
+  grid& operator=(const grid& g); 
+  grid& operator*=(const double& d); 
+  grid& operator*=(const std::vector<double>& v); 
+  grid& operator*=(const std::vector<std::vector<double> >& vv); 
+  grid& operator+=(const grid& g);
+
+  /// test if grids have the same limits etc
+  bool operator==(const grid& g) const;   
+
+  // shouldn't have these, the grid is too large a structure 
+  // to be passed in a return
+  // grid operator*(const double& d) const { return grid(*this)*=d; }
+  // grid operator+(const grid& g)   const { return grid(*this)+=g; }
+  
+  void setDocumentation(const std::string& s);
+  void addDocumentation(const std::string& s);
+
+  std::string  getDocumentation() const { return m_documentation; }
+  std::string& getDocumentation()       { return m_documentation; }
+
+
+  /// set the range of the observable bins, with an optional
+  /// scaling of the observable values for channging units
+  void setBinRange(int ilower, int iupper, double xScaleFactor=1);
+  void setRange(double lower, double upper, double xScaleFactor=1);
+
+
+  /// add a correction as a std::vector
+  void addCorrection( std::vector<double>& v, const std::string& label="", bool combine=false );
+
+
+  /// add a correction by histogram
+  void addCorrection(appl::TH1D* h, const std::string& label="", double scale=1, bool combine=false );
+
+  
+  /// access the corrections
+  //  const std::vector<std::vector<double> >& corrections() const { 
+  std::vector<correction> corrections() const;
+
+
+  /// access the corrections
+  //  const std::vector<std::vector<double> >& corrections() const { 
+  const std::vector<appl::TH1D>& acorrections() const { 
+    return m_corrections;
+  }
+
+  //  const appl::TH1D& correction(int i) const { 
+  //    return m_corrections[i];
+  //  }
+
+  /// get the correction labels
+  const std::vector<std::string >& correctionLabels() const { 
+    return m_correctionLabels;
+  }
+
+  /// will the corrections be applied? 
+  bool getApplyCorrections() const { return m_applyCorrections; } 
+
+  bool setApplyCorrections(bool b) { 
+    std::cout << "appl::grid bin-by-bin corrections will " 
+	      << ( b ? "" : "not " ) << "be applied" << std::endl;
+    return m_applyCorrections=b; 
+  } 
+
+  /// apply corrections to a std::vector
+  void applyCorrections(std::vector<double>& v, std::vector<bool>& applied);
+
+
+  /// will a specific correction be applied? 
+  bool getApplyCorrection(unsigned i) const {
+    if      ( m_applyCorrections )         return true; 
+    else if ( i<m_applyCorrection.size() ) return m_applyCorrection.at(i);
+    return false; 
+  }
+ 
+  bool setApplyCorrection(unsigned i, bool b) { 
+    if ( i>=m_corrections.size() ) return false; 
+    std::cout << "appl::grid bin-by-bin correction will " 
+	      << ( b ? "" : "not " ) << "be applied for correction " << i;
+    if ( m_correctionLabels[i]!="" ) std::cout << " ("  << m_correctionLabels[i] << ")";
+    std::cout << std::endl;
+    return m_applyCorrection[i]=b;
+  } 
+  
+  /// apply corrections to a std::vector
+  bool applyCorrection(unsigned i, std::vector<double>& v);
+  
+
+  /// set the ckm matrix values if need be
+  /// takes a 3x3 matrix with the format { { Vud, Vus, Vub }, { Vcd, Vcs, Vcb }, { Vtd, Vts, Vtb } }  
+  void setckm( const std::vector<std::vector<double> >& ckm );
+
+  /// takes a flat 9 element vector (or c array) with the format { Vud, Vus, Vub, Vcd, Vcs, Vcb, Vtd, Vts, Vtb }  
+  void setckm( const std::vector<double>& ckm );
+  void setckm( const double* ckm );
+
+
+  /// set the squared ckm matrix values if need be
+  /// the squared terms for eihter W+ or W- production - you probably should use setckm()
+  void setckm2( const std::vector<std::vector<double> >& ckm2 );
+
+  /// set the ckm matrix and squared ckm matrix values if need be
+  const std::vector<std::vector<double> >& getckm()  const;
+  const std::vector<std::vector<double> >& getckm2() const;
+
+
+  /// flag custom convolution routines
+
+  void sherpa()   { m_type = SHERPA;   std::cout << "appl::grid::sherpa()   using SHERPA convolution" << std::endl; }
+  void amcatnlo() { m_type = AMCATNLO; std::cout << "appl::grid::amcatnlo() using aMC@NLO convolution" << std::endl; }
+  void standard() { m_type = STANDARD; std::cout << "appl::grid::standard() using standard convolution" << std::endl; }
+
+  CALCULATION  calculation() const { return m_type; }
+
+  static std::string _calculation(CALCULATION C) { 
+    switch (C) {
+    case STANDARD:
+      return "standard";
+    case SHERPA:
+      return "sherpa";
+    case AMCATNLO:
+      return "amcatnlo";
+    case LAST_TYPE:
+      return "last_type"; // NB: shouldn't ever be used
+    }
+    return "unknown";
+  }
+
+  /// reduce number of subprocesses if possible
+  void shrink(const std::string& name, int ckmcharge=0);
+
+  /// fix broken parton luminosity duplication in 
+  void fixbroken(const std::string& name );
+
+  void move( int iorder, int isrc, std::vector<int>& idest );
+
+  size_t combine_nbins() const { return m_ref_combined->GetNbinsX(); }
+  size_t         nbins() const { return m_ref->GetNbinsX(); }
+
+
+  void merge_bins( size_t i );
+
+  void merge_bins( size_t i, size_t n ) { 
+    for ( size_t ii=0 ; ii<n ; ii++ ) merge_bins(i);
+  }
+
+  void merge_last_bins() { merge_bins( m_ref->size()-2 ); }
+
+  void merge( int i, int j );
+
+  /// set bins to be combined after the convolution
+  void combine( std::vector<int>& v) {
+    m_combine = v;
+    //    std::cout << "grid::combine(): " << v.size() << std::endl;
+    if ( m_combine.size() ) combineReference(true); 
+  }
+
+  std::vector<int> combine() const { return m_combine; }
+
+  /// set combine the  be combined after the convolution
+  void combineReference(bool force=false);
+
+  void combineBins(std::vector<double>& v, int power=1 ) const;
+
+  double fx(double x) const;
+  double fy(double x) const;
+
+  const appl_pdf* genpdf(int i, bool =false ) const { return m_genpdf[i]; }
+
+  void setGenpdf( const std::string& genpdfname );  /// use with care
+
+  std::vector<double>&       userdata()       { return m_userdata; }
+  const std::vector<double>& userdata() const { return m_userdata; }
+
+  void replaceBin( int iobs, grid& ig );
+
+protected:
+
+  // internal common construct for the different types of constructor
+  void construct(int Nobs,
+		 int NQ2=50,  double Q2min=10000.0, double Q2max=25000000.0, int Q2order=4,  
+		 int Nx=50,   double xmin=1e-5,     double xmax=0.9,         int xorder=3,
+		 int order=2, 
+		 std::string transform="f", 
+		 std::string qtransform="h0",
+		 bool isdis=false );
+
+#ifdef USEROOT
+  // save grid to specified file
+  void Write_root(const std::string& filename, const std::string& dirname="grid", const std::string& pdfname="" );
+#endif
+
+  // save grid to specified file
+  void Write_appl(const std::string& filename, const std::string& dirname="grid", const std::string& pdfname="" );
+
+
+  /// not yet implemented 
+
+  int serialise( double& /*x1*/, double& /*x2*/, double& /*Q2*/, std::vector<double>& /*w*/, bool b=true ) {
+    static int i=0;
+    if ( b==true ) { 
+      i=0;
+      /// work out how many entries there are 
+      /// and return that
+      return 0;
+    }
+    /// iterate through every node, in every subprocess, in every 
+    /// observable bin
+    
+    /// get next entry
+
+    /// set x1, x2 etc ...
+
+    i++;
+
+    return i;
+  } 
+  
+protected:
+
+  /// std::string manipulators to parse the pdf names 
+
+  /// return chomped std::string
+  static std::string chomptoken(std::string& s1, const std::string& s2)
+  {
+    std::string s3 = "";
+    std::string::size_type pos = s1.find(s2);
+    if ( pos != std::string::npos ) {
+      s3 = s1.substr(0, pos);
+      s1.erase(0, pos+1);
+    }
+    else { 
+      s3 = s1.substr(0, s1.size());
+      s1.erase(0, s1.size()+1);
+    }
+    return s3;
+  } 
+ 
+  static std::vector<std::string> parse(std::string s, const std::string& key) {
+    std::vector<std::string> clauses;
+    while ( s.size() ) clauses.push_back( chomptoken(s, key) );
+    return clauses;
+  }
+  
+  /// get the required pdf combinations from those registered   
+  void findgenpdf( std::string s );
+
+  /// add a generic pdf to the data base of registered pdfs
+  void addpdf( const std::string& s, const std::vector<int>& combinations=std::vector<int>() );
+
+  appl_pdf* genpdf(int i) { return m_genpdf[i]; }
+  
+public: 
+
+  int subproc() const { return m_subproc; }
+
+  static void disable_threads(bool b=true);
+
+  void include_photon( bool b );
+
+  bool include_photon() const { return m_photon; } 
+
+  bool isDIS() const { return m_isDIS; }
+
+  /// should duplicates be removed from the lumi_pdf ?
+  bool use_duplicates() const { return m_use_duplicates; }
+
+  bool use_duplicates(bool b) { return m_use_duplicates=b; }
+
+  /// zero the grid and the reference
+  void clear();
+
+protected:
+  
+  RefStore m_refstore;
+
+  // histograms for saving the observable
+  appl::TH1D*  m_ref;
+  appl::TH1D*  m_ref_combined;
+
+  // order in alpha_s of tree level contribution 
+  int  m_leading_order; 
+
+  // how many orders in the calculation, lo, nlo, nnlo etc
+  // actually this is abused for the amc@nlo grids and is really an count
+  // of how manty grids there are, so usually, there is only 1 grid per
+  // order, but amc@nlo uses 1 grid for the LO part, and 4 grids for the 
+  // NLO part 
+  int  m_order;
+
+  // the actual weight grids themselves
+  //  igrid** m_grids[MAXGRIDS]; /// up to MAXGRIDS grids LO, NLO, NNLO, Real virtual, etc 
+  std::vector<igrid*> m_grids[MAXGRIDS]; /// up to MAXGRIDS grids LO, NLO, NNLO, Real virtual, etc 
+
+  // total cross section qand uncertainty
+  double m_total;
+  double m_totalerror;
+
+  // state variables
+  double   m_run;
+  bool     m_optimised;
+  bool     m_trimmed;
+
+  bool   m_normalised;
+
+  bool   m_symmetrise; 
+ 
+  // transform and pdf combination tags
+  std::string m_transform;
+ 
+  std::string m_qtransform; 
+
+  std::string m_genpdfname; 
+
+  // pdf combination class
+  appl_pdf* m_genpdf[MAXGRIDS];
+
+  static const std::string m_version;
+
+  double m_cmsScale;
+
+  double m_dynamicScale;
+
+  /// bin by bin correction factors 
+  //  std::vector<std::vector<double> > m_corrections;
+  std::vector<appl::TH1D>    m_corrections;
+  std::vector<std::string>   m_correctionLabels;
+  
+
+  /// should we apply the corrections?
+  bool m_applyCorrections;
+
+  /// flag vector to determine whether each individual 
+  /// correction should be applied
+  std::vector<bool>  m_applyCorrection;
+
+  std::string m_documentation;
+  
+  std::vector<double>                m_ckmsum;
+  std::vector<std::vector<double> >  m_ckm2;
+  std::vector<std::vector<double> >  m_ckm;
+
+  CALCULATION     m_type; 
+
+  bool            m_read;
+
+  std::vector<int> m_combine;
+
+  int  m_subproc;
+  int  m_bin;
+
+  std::vector<double> m_userdata;
+
+  /// limits on y=log(1/x) and Q=sqrt(Q2) - have as class 
+  /// variables, so we can calculate them once and for all 
+  /// when grid is read in 
+  /// NB: not definied for new grids that are still being 
+  ///     filled
+
+  double m_ymax;
+  double m_Qmax; 
+
+  /// pdf used for generation
+
+  std::string m_genwithpdf; 
+  int         m_genwithipdf; 
+
+  /// flag to determine whether this grid 
+  /// contains a photon density
+  bool m_photon;
+  
+  bool m_isDIS;
+
+  bool m_use_duplicates;
+
+};
+
+
+};
+
+// shouldn't have this, grid is too large a structure 
+// grid operator*(const double& d, const appl::grid& g) { return g*d; }
+
+std::ostream& operator<<(std::ostream& s, const appl::grid& mygrid);
+
+
+
+#endif // __APPL_GRID_H 
Index: applgrid/tags/applgrid-01-06-36/appl_grid/combination.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/combination.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/combination.h	(revision 2010)
@@ -0,0 +1,145 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    combination.h
+ **
+ **     @author  mark sutton
+ **     @date    Thu  4 Jul 2013 23:20:40 CEST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: combination.h, v0.0   Thu  4 Jul 2013 23:20:40 CEST sutt $
+ **
+ **/
+
+
+#ifndef  COMBINATION_H
+#define  COMBINATION_H
+
+#include <iostream>
+
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+#include <string>
+
+#include <vector>
+#include <utility>
+
+
+
+class combination { 
+
+public:
+
+  typedef std::pair<int,int> cpair;
+
+public:
+
+  combination();
+
+  /// constructors and destructor
+  combination(const std::vector<int>& v);
+
+  combination(const std::string& line);
+
+  combination(const combination& c);
+
+  combination& operator=( const combination& c );
+
+  virtual ~combination() {}
+
+  /// evaluate the actual combination
+
+  double evaluate( const double* xfA, const double* xfB,
+		   const std::vector<double>& ckmsum=std::vector<double>(), 
+		   const std::vector<std::vector<double> >& ckm2=std::vector<std::vector<double> >()  ) const;  
+
+  /// number of pairs
+  unsigned size() const { return m_pairs.size(); }  
+
+  /// do any of these remaining need to be public?
+
+  /// index
+  const std::vector<int>&  index()  const { return m_index; }
+  std::vector<int>&        index()        { return m_index; }
+
+  void add_index( int i )  { m_index.push_back(i); }
+
+  /// accessors to the pairs themselves
+  cpair  pair(int i) const { return m_pairs[i]; }
+
+  const std::vector<cpair>& pairs() const { return m_pairs; }
+
+  cpair&       operator[](int i)       { return m_pairs[i]; } 
+  const cpair& operator[](int i) const { return m_pairs[i]; } 
+
+  bool operator==( const combination& c ) const;
+
+  /// some spurious logical operators
+  bool operator<(  const combination& c ) const { return m_index[0]<c.m_index[0]; }
+  bool operator>(  const combination& c ) const { return m_index[0]>c.m_index[0]; }
+  bool operator<=(  const combination& c ) const { return m_index[0]<=c.m_index[0]; }
+
+  std::vector<int> serialise() const;
+
+  bool contains( const cpair& p ) { for ( unsigned i=0 ; i<m_pairs.size() ; i++ ) if ( m_pairs[i]==p ) return true; return false; } 
+
+private:
+
+  /// actually construct combination from the pair list
+  void construct(const std::vector<int>& v);
+
+  /// remap from the pdg to lhapdf code
+  int remap(int i) {
+    if ( i==21 ) return 0;
+    if ( i==22 ) return 7; 
+    return i; 
+  } 
+
+private:
+
+  std::vector<int>   m_index;
+  size_t             m_size;
+  
+  std::vector<cpair> m_pairs;
+
+  //  static int         gindex;
+  
+};
+
+
+
+
+inline std::ostream& operator<<( std::ostream& s, const combination& c ) { 
+  s << "[ (";
+  for ( unsigned ic=0 ; ic<c.index().size() ; ic++ ) s << " " << c.index()[ic]; 
+  s << ") : ";
+  if ( c.size()>10 ) { 
+    for ( int i=0 ; i<4 ; i++ ) s << "\t(" << c[i].first << ", " << c[i].second << ")";
+    s << "\t  ... ";
+    for ( unsigned i=c.size()-3 ; i<c.size() ; i++ ) s << "\t(" << c[i].first << ", " << c[i].second << ")";
+  }
+  else { 
+    for ( unsigned i=0 ; i<c.size() ; i++ ) s << "\t(" << c[i].first << ", " << c[i].second << ")";
+  }
+  s << " ]";
+  return s;
+}
+
+
+inline std::ostream& operator<<( std::ostream& s, const std::pair<int,int>& p ) {
+  return s << "( " << p.first << " " << p.second << ")"; 
+} 
+
+#endif  // COMBINATION_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/Directory.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/Directory.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/Directory.h	(revision 2010)
@@ -0,0 +1,70 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    Directory.h
+ **
+ **     @brief   Description:  class to keep a directory for each object 
+ **              in a root sort of way, but needed to keep 
+ **              the root objects out of the actual code.   
+ **
+ **     @author  mark sutton
+ **     @date    Wed May  4 17:54:25 BST 2005
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **/
+
+
+#ifndef __DIRECTORY_H
+#define __DIRECTORY_H
+
+#include "appl_grid/appl_root.h"
+
+#ifdef USEROOT
+
+#include "TDirectory.h"
+
+// #include "utils.h"
+
+class Directory {
+
+ public:
+
+  Directory() : mDir(NULL), mPop(NULL) { } 
+  Directory(std::string n) : mDir(NULL) { 
+    mPop = gDirectory;
+    //    depunctuate(n);
+    mDir = gDirectory->mkdir(n.c_str());
+  } 
+
+  void push() { 
+    mPop = gDirectory;
+    if (mDir) mDir->cd();
+  } 
+
+  void pop() { if (mPop) mPop->cd(); }
+
+  void Write() { 
+    push();
+    mDir->Write();
+    pop();
+  } 
+
+ private:
+  
+  TDirectory* mDir;
+  TDirectory* mPop;
+
+};
+
+#endif
+
+#endif  /* __DIRECTORY_H */
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/integral.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/integral.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/integral.h	(revision 2010)
@@ -0,0 +1,39 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    integral.h
+ **
+ **     @author  mark sutton
+ **     @date    Mon 24 Mar 2014 13:17:43 CET 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: integral.h, v0.0   Mon 24 Mar 2014 13:17:43 CET sutt $
+ **
+ **/
+
+
+#ifndef  INTEGRAL_H
+#define  INTEGRAL_H
+
+#include <vector>
+
+#include "appl_grid/appl_grid.h"
+
+namespace appl { 
+
+double integral( const std::vector<double>& d, const appl::grid& g );
+double integral( const appl::TH1D* h ); //, const appl::grid& g );
+
+}
+
+#endif  // INTEGRAL_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/generic_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/generic_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/generic_pdf.h	(revision 2010)
@@ -0,0 +1,175 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    generic_pdf.h
+ **
+ **     @brief   a generic pdf type - to read in the combinations
+ **              for the subprocesses from a file, with the file 
+ **              format determined by Tancredi (details will be 
+ **              filled in as the implementation becomes more complete)                       
+ **
+ **     @author  mark sutton
+ **     @date    Mon 28 Jan 2013 15:41:10 GMT 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: generic_pdf.h, v0.0   Mon 28 Jan 2013 15:41:10 GMT sutt $
+ **
+ **/
+
+
+#ifndef  GENERIC_PDF_H
+#define  GENERIC_PDF_H
+
+#include <iostream>
+
+#include "appl_grid/appl_pdf.h" 
+
+
+
+
+class generic_pdf : public appl::appl_pdf {
+
+public:
+
+  //  generic_pdf(const std::string& s="", const std::vector<int> combinations=std::vector<int>() );
+  generic_pdf(const std::string& s="" );
+
+  virtual ~generic_pdf() {   } 
+
+  /// actually evaluate the 
+  void evaluate(const double* _fA, const double* _fB, double* H) const;
+
+  /// additional user defined functions to actually initialise 
+  /// based on the input file
+
+  void initialise(const std::string& filename);
+
+  bool initialised() const { return m_initialised; }
+
+
+public:
+
+  void ReadSubprocessSteering(const std::string& fname);
+
+  void Print_ckm();  
+
+  void PrintSubprocess();
+
+  int  GetSubProcessNumber(){ return procname.size(); }
+
+  //double* GetGeneralisedPdf(const double *,const double *); 
+
+  int  decideSubProcess(const int iflav1, const int iflav2) const;
+
+  int  decideSubProcessSet(const int iflav1, const int iflav2) {
+    int ip = decideSubProcess( iflav1, iflav2 ); 
+    currentsubprocess=ip;
+    return ip;
+  }
+
+
+  //  void SetSubCurrentProcess(int mypro) { currentsubprocess=mypro; }
+
+  //  int GetCurrentSubProcess() { 
+  //    if ( currentsubprocess==-1 ) std::cout << " generic_pdf: current subprocess not defined ! " << std::endl;
+  //    return currentsubprocess;
+  //  }
+
+  std::string GetSubProcessName(int isub) {return procname[isub];};
+
+  //  void SetCurrentProcess(int mypro){ currentprocess=mypro; }
+  
+  //  int GetCurrentProcess() { 
+  //    if (currentprocess==-1) std::cout<<" generic_pdf: current process not defined ! "<< std::endl;
+  //    return currentprocess;
+  //  }
+  
+  int GetnQuark() { return m_nQuark; }
+
+  void SetnQuark(int nq) { m_nQuark=nq; }
+
+  void MakeCkm();
+
+  void PrintFlavourMap() {
+    std::cout << " generic_pdf: print out flavour std::map " << std::endl;
+    std::map<int,int>::iterator imap;
+    for (imap = flavourtype.begin(); imap!=flavourtype.end(); ++imap){
+      std::cout<<" Flavourtype["<<imap->first<<"]= "<<imap->second<<" "<< std::endl;
+    }
+  }
+
+  //int GetNProc() {return procname.size(); };
+
+  // generic_pdf() : appl_pdf("vrapzLO") { 
+  // m_Nproc=this->GetSubProcessNumber(); 
+  //} 
+
+
+  
+private:
+
+
+  /// this might eventually become a std::string encoding the grid
+  std::string m_filename;  
+
+  /// has this been initialised yet?
+  bool m_initialised;
+
+  /// ckm matrices should they be needed ...
+  //std::vector<double>                m_ckmsum;
+  //std::vector<std::vector<double> >  m_ckm2;
+
+  bool    m_debug;
+
+  int     m_nQuark; //Number of Quarks 
+
+  //  double* m_H; // generalised PDF defined in GetSubprocess
+
+  // std::map name to index
+  // std::maps flavour type names to iflavour <-> -6...6
+  std::map<std::string, int> iflavour; 
+  
+  // std::maps iflavour to flavour typs names -6...6 <-> name 
+  std::map<int, std::string> flavname;  
+
+  // std::maps iflavour to types -6...6 <->  -2, -1, 0, 1, 2 
+  std::map<int,int> flavourtype;
+
+  // sum of quark flavour belonging to one flavour type up,down,gluon
+  std::map<int,double> pdfA; //hadron A
+  std::map<int,double> pdfB; //hadron B
+
+  
+  // std::maps iflavour -2, -1, 0, 1, 2 of first or second proton to iprocess
+  std::map<int,int> Flav1; 
+  std::map<int,int> Flav2;
+
+  std::vector<std::string> procname; // names of subprocesses
+
+  int currentsubprocess;
+  int currentprocess;
+
+  bool m_ckmflag;
+};
+
+
+
+inline std::ostream& operator<<( std::ostream& s, const generic_pdf& _g ) { 
+  return s << "generic_pdf " << _g.name() << " " << _g.Nproc();  
+}
+
+
+
+#endif  // GENERIC_PDF_H 
+
+
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/file_index.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/file_index.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/file_index.h	(revision 2010)
@@ -0,0 +1,129 @@
+/* emacs: this is -*- c++ -*- */
+/**
+ **   @file    file_index.h        
+ **                   
+ **   @author  sutt
+ **   @date    Thu 31 Dec 2020 14:42:12 GMT
+ **
+ **   $Id: file_index.h, v0.0   Thu 31 Dec 2020 14:42:12 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  FILE_INDEX_H
+#define  FILE_INDEX_H
+
+#include <iostream>
+#include <vector>
+#include <map>
+#include <string>
+
+#include "appl_grid/serialise_base.h"
+#include "appl_grid/serialisable.h"
+
+
+class file_index : public serialisable {
+
+public: 
+
+  // histogram error exception
+  class exception : public std::exception { 
+  public:
+    exception(const std::string& s) { std::cerr << what() << " " << s << std::endl; }; 
+    virtual const char* what() const throw() { return ""; }
+  };
+
+  struct entry {
+    entry( int sz=0, int off=0 ) : size(sz), offset(off) { } 
+    double size; 
+    double offset;
+  };
+
+  typedef std::map<std::string, entry> map_t;
+  typedef std::map<int, std::string>   rmap_t;
+ 
+public:
+
+  file_index();
+
+  virtual ~file_index();
+
+  std::string name() const { return mname; }
+
+  size_t size() const { return mkeys.size(); }
+
+  std::vector<std::string>::const_iterator begin() const { return mkeys.begin(); }
+  std::vector<std::string>::const_iterator   end() const { return mkeys.end(); }
+
+  entry find( const std::string& s ) const { 
+    map_t::const_iterator itr = mmap.find( s ); 
+    if ( itr!=mmap.end() ) return itr->second; 
+    return entry();
+  }
+  
+  void clear() { 
+    mkeys.clear();
+    mmap.clear();
+    mrmap.clear();
+  }
+
+  void add( const std::string& _s, int bytes, int offset=-1, int counter=0 );
+
+protected:
+
+  void check_for_duplicates( std::string& s, int& counter );
+
+  /// serialisation and deserialisation ...
+
+  virtual void serialise_internal( std::vector<SB::TYPE>& s ) const;
+  
+  virtual void deserialise_internal( std::vector<SB::TYPE>::const_iterator& itr );
+
+private:
+  
+  std::string mname;
+
+  std::vector<std::string> mkeys;
+
+  map_t  mmap;
+  rmap_t mrmap;
+
+  int    mrunning;
+
+};
+
+
+inline std::ostream& operator<<( std::ostream& s, const file_index::entry& e ) { 
+  std::string space = "";
+  if ( e.offset<0x10000 ) space += "\t"; 
+  return s << "index: 0x" << std::hex << int(e.offset) << std::dec << space << "  size: " << int(e.size);
+}
+
+
+inline std::ostream& operator<<( std::ostream& s, const file_index& f ) { 
+  s << "file index: " << f.size() << " keys\n";
+  for ( std::vector<std::string>::const_iterator itr=f.begin() ; itr!=f.end() ; itr++ ) { 
+    std::string space = "";
+    if ( itr->size()<31 ) space += "\t"; 
+    if ( itr->size()<23 ) space += "\t"; 
+    if ( itr->size()<15 ) space += "\t"; 
+    if ( itr->size()<7 )  space += "\t"; 
+    s << "    Key: " << *itr << space << "  " << f.find(*itr) << "\n"; 
+  } 
+  return s;
+}
+
+
+#endif  // FILE_INDEX_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/fastnlo.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/fastnlo.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/fastnlo.h	(revision 2010)
@@ -0,0 +1,74 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    fastnlo.h
+ **
+ **     @brief   native fastnlo interface for appl_grid                    
+ **
+ **     @author  mark sutton
+ **     @date    Thu  8 Oct 2009 23:04:35 BST 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: fastnlo.h, v0.0   Thu  8 Oct 2009 23:04:35 BST sutt $
+ **
+ **/
+
+
+#ifndef __FASTNLO_H
+#define __FASTNLO_H
+
+#include <iostream>
+#include <vector>
+#include <string>
+
+#include "appl_grid/appl_grid.h"
+#include "appl_grid/appl_pdf.h"
+#include "appl_grid/appl_timer.h"
+
+
+class fastnlo {
+  
+public:
+  
+  fastnlo(const std::string& filename ); 
+  
+  virtual ~fastnlo() { 
+    if ( m_manage_grids ) { 
+      for ( int i=m_grid.size() ; i-- ; ) { delete m_grid[i]; m_grid[i]=NULL; } 
+    }
+  } 
+
+  unsigned size() const { return m_grid.size(); }
+
+  std::vector<appl::grid*> grids() { return m_grid; }
+
+  const appl::grid* operator[](int i) const { return m_grid[i]; }
+
+  void manageGrids(bool b=true) { m_manage_grids=b; }  
+
+private:
+  
+  bool m_manage_grids;
+
+  std::vector<appl::grid*> m_grid;
+  
+};
+
+
+inline std::ostream& operator<<( std::ostream& s, const fastnlo& _f) {
+  for ( unsigned i=0 ; i<_f.size() ; i++ ) s << _f[i] << "\n"; 
+  return s;
+}
+
+
+#endif  // __FASTNLO_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/correction.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/correction.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/correction.h	(revision 2010)
@@ -0,0 +1,78 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    correction.h
+ **
+ **     @brief   class to store the multipliciative post processing
+ **              corrections to be applied, only basic for the time 
+ **              but will be extended as appropriate
+ **
+ **     @author  mark sutton
+ **     @date    Sun 23 Mar 2014 09:08:46 CET 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: correction.h, v0.0   Sun 23 Mar 2014 09:08:46 CET sutt $
+ **
+ **/
+
+
+#ifndef  CORRECTION_H
+#define  CORRECTION_H
+
+#include <iostream>
+#include <vector>
+#include <string>
+
+
+// typedef std::vector<double> correction;
+
+
+class correction {
+
+public:
+
+  correction(const std::vector<double>& v, const std::string& s="" ) : mlabel(s), mv(v) { } 
+
+  virtual ~correction() { } 
+
+  std::string label() const { return mlabel; }
+
+  unsigned size() const { return mv.size(); }
+
+  double& operator[](int i)       { return mv[i]; }
+  double  operator[](int i) const { return mv[i]; }
+
+  operator std::vector<double>&() { return mv; } 
+
+  std::vector<double>& v() { return mv; } 
+  const std::vector<double>& v() const { return mv; } 
+
+  correction operator=(const std::vector<double>& v) { mv=v; return *this; } 
+
+private:
+
+  std::string         mlabel;   
+  std::vector<double> mv;  
+
+};
+
+
+inline std::ostream& operator<<( std::ostream& s, const correction& c ) { 
+  s << "correction: " << c.label() << "\n";
+  for ( size_t i=0 ; i<c.size() ; i++ ) s << "  " << c[i];
+  return s;
+}
+
+
+
+#endif  // CORRECTION_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/binary_search.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/binary_search.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/binary_search.h	(revision 2010)
@@ -0,0 +1,63 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **   @file    binary.h        
+ **                   
+ **            fast binary search algorithm
+ **   
+ **   @author  sutt
+ **   @date    Tue 29 Dec 2020 15:53:45 GMT
+ **
+ **   $Id: binary.h, v0.0   Tue 29 Dec 2020 15:53:45 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+
+#ifndef  BINARY_H
+#define  BINARY_H
+
+#include <algorithm>
+
+inline int find_index( double x, const std::vector<double>& v ) { 
+
+  if ( x<v[0] || x>=v.back() ) return -1;
+
+  int a = 0;
+  int b = v.size()-1;
+  
+  while ( b>(a+1) ) { 
+
+    /// fast integer division by two 
+    int c = ( a+b ) >> 1;
+    
+    if ( x<v[c] ) b = c;
+    else          a = c;
+   
+  }
+
+  return a;
+}
+
+
+/// simpler binary search std::lower_bound but it returns an iterator 
+/// to the list, but we want to find the index to the bin - ie, binary search 
+/// on one vector, and then access the corresponding element in a different vector
+/// would be better with a vector of bins, rather than indepent vectors etc
+
+// std::vector<double>::const_iterator find_lowerbound( double x, const std::vector<double>& v ) { 
+//   return std::lower_bound<std::vector<double>::const_iterator>( v.begin(), v.end(), x );
+// }
+
+
+#endif  // BINARY_H 
+
+
+
+
+
+
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/serialise_base.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/serialise_base.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/serialise_base.h	(revision 2010)
@@ -0,0 +1,86 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **   @file    serialise_base.h        
+ **                   
+ **   @author  sutt
+ **   @date    Tue 29 Dec 2020 13:57:14 GMT
+ **
+ **   $Id: serialise_base.h, v0.0   Tue 29 Dec 2020 13:57:14 GMT sutt $
+ **
+ **   Copyright (C) 2020 sutt (sutt@cern.ch)    
+ **
+ **/
+
+#ifndef  SERIALISE_BASE_H
+#define  SERIALISE_BASE_H
+
+#include <vector>
+#include <string>
+#include <iostream>
+#include <cstdint>
+#include <cstdlib>
+
+#include <zlib.h>
+
+
+namespace SB { 
+
+  /// serialisation
+
+  const double WRITEGUARD = 1234567890123456; 
+
+  typedef uint64_t TYPE;  
+
+  inline void serialise( std::vector<TYPE>& strm, const std::string& s ) { 
+    strm.push_back( s.size() );
+    for ( size_t i=0 ; i<s.size() ; i++ ) strm.push_back( s[i] ); 
+  } 
+
+  template<typename T>
+  void serialise( std::vector<TYPE>& strm, const T& t ) { 
+    strm.push_back( *((SB::TYPE*)&t) );
+  } 
+
+  template<typename T>
+  void serialise( std::vector<TYPE>& strm, const std::vector<T>& v ) { 
+    strm.push_back( v.size() );
+    for ( size_t i=0 ; i<v.size() ; i++ ) serialise( strm, v[i] );
+  } 
+
+#if 0
+  /// doesn't work for some reason
+  inline void serialise( std::vector<TYPE>& strm, const std::string& s ) { 
+    serialise( strm, std::vector<char>(s.begin(),s.end()) );
+  }
+#endif
+
+
+  /// deserialise 
+
+  inline void deserialise( std::vector<TYPE>::const_iterator& itr, std::string& s ) { 
+    s.clear();
+    size_t n = (*itr++);
+    for ( size_t i=0 ; i<n ; i++ ) s += char(*itr++);
+  }
+
+  template<typename T>
+  void deserialise( std::vector<TYPE>::const_iterator& itr, T& t ) { 
+    t = *((T*)&(*itr++));
+  }
+
+  template<typename T> 
+  void deserialise( std::vector<TYPE>::const_iterator& itr, std::vector<T>& v ) { 
+    size_t n = (*itr++);
+    v.clear();
+    v.resize(n);
+    for ( size_t i=0 ; i<n ; i++ ) deserialise( itr, v[i] );
+  }
+
+}
+
+
+
+#endif  // SERIALISE_BASE_H 
+
+
+
Index: applgrid/tags/applgrid-01-06-36/appl_grid/basic_pdf.h
===================================================================
--- applgrid/tags/applgrid-01-06-36/appl_grid/basic_pdf.h	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/appl_grid/basic_pdf.h	(revision 2010)
@@ -0,0 +1,88 @@
+/** emacs: this is -*- c++ -*- **/
+/**
+ **     @file    basic_pdf.h
+ **
+ **     @brief   incoming parton combinations (not including a top 
+ **              density) the 121 different sub processes correspond 
+ **              to flavour combinations of where the first parton 
+ **              if from proton 1 and the second from proton 2 of
+ **
+ **                0 - bbar-bbar
+ **                1 - bbar-cbar
+ **                2 - bbar-sbar 
+ **                ...
+ **                120 - b-b
+ **
+ **              The flavours are in the standard lhapdf convention:
+ **
+ **                -6 (tbar) to +6 (top) with the gluon, flavour 0  
+ **
+ **     @author  mark sutton
+ **     @date    Mon Dec 10 01:36:04 GMT 2007 
+ **
+ **     @copyright (C) 2002-2019 mark sutton (sutt @ cern.ch) 
+ **
+ **     $Id: basic_pdf.h, v1.0   Mon Dec 10 01:36:04 GMT 2007 sutt $
+ **
+ **/
+
+
+
+#ifndef BASIC_PDF_H
+#define BASIC_PDF_H
+
+#include <cmath>
+
+#include "appl_grid/appl_pdf.h" 
+
+
+// basic pdf combination class
+class basic_pdf : public appl::appl_pdf {
+
+public:
+
+  basic_pdf() : appl::appl_pdf("basic") { m_Nproc=121; } 
+
+  void evaluate(const double* _fA, const double* _fB, double* H) const;
+
+  int  decideSubProcess(const int iflav1, const int iflav2) const;
+
+};  
+  
+
+
+#if 0
+
+inline void basic_pdf::evaluate(const double* _fA, const double* _fB, double* H) const {  
+
+  // remapping from pdg -6..6 convention to u..t ubar..tbar g internal
+  // basic convention
+
+  const double* f1 = _fA+6;
+  const double* f2 = _fB+6;
+
+  ///  double H[121]; /// do not include top: 11x11 rather than 13x13
+  int ih=0;
+  /// from -b to b rather than -t to t
+  for ( int i=-5 ; i<=5 ; i++ )  { 
+    for ( int j=-5 ; j<=5 ; j++ )  { 
+      /// f1 partons first !!! 
+      H[ih++] = f1[i]*f2[j];
+    }
+  }
+}
+  
+
+inline int  basic_pdf::decideSubProcess(const int iflav1, const int iflav2) const { 
+  if ( std::fabs(iflav1)>5 || std::fabs(iflav2)>5 ) return -1;
+  return (iflav1+5)*11+(iflav2+5);
+}
+
+#endif
+
+// fortran callable wrapper
+extern "C" void fbasic_pdf__(const double* fA, const double* fB, double* H);
+
+
+#endif // BASIC_PDF_H
+
Index: applgrid/tags/applgrid-01-06-36/share/photonNLO.config
===================================================================
--- applgrid/tags/applgrid-01-06-36/share/photonNLO.config	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/share/photonNLO.config	(revision 2010)
@@ -0,0 +1,34 @@
+0
+0 3   -5 -5  -3 -3  -1 -1
+1 6   -5 -4  -5 -2  -3 -4  -3 -2  -1 -4  -1 -2
+2 6   -5 -3  -5 -1  -3 -5  -3 -1  -1 -5  -1 -3
+3 3   -5 0  -3 0  -1 0
+4 6   -5 1  -5 3  -3 1  -3 5  -1 3  -1 5
+5 6   -5 2  -5 4  -3 2  -3 4  -1 2  -1 4
+6 3   -5 5  -3 3  -1 1
+7 6   -4 -5  -4 -3  -4 -1  -2 -5  -2 -3  -2 -1
+8 2   -4 -4  -2 -2
+9 2   -4 -2  -2 -4
+10 2   -4 0  -2 0
+11 6   -4 1  -4 3  -4 5  -2 1  -2 3  -2 5
+12 2   -4 2  -2 4
+13 2   -4 4  -2 2
+14 3   0 -5  0 -3  0 -1
+15 2   0 -4  0 -2
+16 1   0 0
+17 3   0 1  0 3  0 5
+18 2   0 2  0 4
+19 6   1 -5  1 -3  3 -5  3 -1  5 -3  5 -1
+20 6   1 -4  1 -2  3 -4  3 -2  5 -4  5 -2
+21 3   1 -1  3 -3  5 -5
+22 3   1 0  3 0  5 0
+23 3   1 1  3 3  5 5
+24 6   1 2  1 4  3 2  3 4  5 2  5 4
+25 6   1 3  1 5  3 1  3 5  5 1  5 3
+26 6   2 -5  2 -3  2 -1  4 -5  4 -3  4 -1
+27 2   2 -4  4 -2
+28 2   2 -2  4 -4
+29 2   2 0  4 0
+30 6   2 1  2 3  2 5  4 1  4 3  4 5
+31 2   2 2  4 4
+32 2   2 4  4 2
Index: applgrid/tags/applgrid-01-06-36/share/basic.config
===================================================================
--- applgrid/tags/applgrid-01-06-36/share/basic.config	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/share/basic.config	(revision 2010)
@@ -0,0 +1,122 @@
+0
+0	1	-5  -5
+1	1	-5  -4
+2	1	-5  -3
+3	1	-5  -2
+4	1	-5  -1
+5	1	-5   0
+6	1	-5   1
+7	1	-5   2
+8	1	-5   3
+9	1	-5   4
+10	1	-5   5
+11	1	-4  -5
+12	1	-4  -4
+13	1	-4  -3
+14	1	-4  -2
+15	1	-4  -1
+16	1	-4   0
+17	1	-4   1
+18	1	-4   2
+19	1	-4   3
+20	1	-4   4
+21	1	-4   5
+22	1	-3  -5
+23	1	-3  -4
+24	1	-3  -3
+25	1	-3  -2
+26	1	-3  -1
+27	1	-3   0
+28	1	-3   1
+29	1	-3   2
+30	1	-3   3
+31	1	-3   4
+32	1	-3   5
+33	1	-2  -5
+34	1	-2  -4
+35	1	-2  -3
+36	1	-2  -2
+37	1	-2  -1
+38	1	-2   0
+39	1	-2   1
+40	1	-2   2
+41	1	-2   3
+42	1	-2   4
+43	1	-2   5
+44	1	-1  -5
+45	1	-1  -4
+46	1	-1  -3
+47	1	-1  -2
+48	1	-1  -1
+49	1	-1   0
+50	1	-1   1
+51	1	-1   2
+52	1	-1   3
+53	1	-1   4
+54	1	-1   5
+55	1	 0  -5
+56	1	 0  -4
+57	1	 0  -3
+58	1	 0  -2
+59	1	 0  -1
+60	1	 0   0
+61	1	 0   1
+62	1	 0   2
+63	1	 0   3
+64	1	 0   4
+65	1	 0   5
+66	1	 1  -5
+67	1	 1  -4
+68	1	 1  -3
+69	1	 1  -2
+70	1	 1  -1
+71	1	 1   0
+72	1	 1   1
+73	1	 1   2
+74	1	 1   3
+75	1	 1   4
+76	1	 1   5
+77	1	 2  -5
+78	1	 2  -4
+79	1	 2  -3
+80	1	 2  -2
+81	1	 2  -1
+82	1	 2   0
+83	1	 2   1
+84	1	 2   2
+85	1	 2   3
+86	1	 2   4
+87	1	 2   5
+88	1	 3  -5
+89	1	 3  -4
+90	1	 3  -3
+91	1	 3  -2
+92	1	 3  -1
+93	1	 3   0
+94	1	 3   1
+95	1	 3   2
+96	1	 3   3
+97	1	 3   4
+98	1	 3   5
+99	1	 4  -5
+100	1	 4  -4
+101	1	 4  -3
+102	1	 4  -2
+103	1	 4  -1
+104	1	 4   0
+105	1	 4   1
+106	1	 4   2
+107	1	 4   3
+108	1	 4   4
+109	1	 4   5
+110	1	 5  -5
+111	1	 5  -4
+112	1	 5  -3
+113	1	 5  -2
+114	1	 5  -1
+115	1	 5   0
+116	1	 5   1
+117	1	 5   2
+118	1	 5   3
+119	1	 5   4
+120	1	 5   5
Index: applgrid/tags/applgrid-01-06-36/share/photonLO.config
===================================================================
--- applgrid/tags/applgrid-01-06-36/share/photonLO.config	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/share/photonLO.config	(revision 2010)
@@ -0,0 +1,7 @@
+0
+0 6   -5 0  -3 0  -1 0  1 0  3 0  5 0
+1 6   -5 5  -3 3  -1 1  1 -1  3 -3  5 -5
+2 4   -4 0  -2 0  2 0  4 0
+3 4   -4 4  -2 2  2 -2  4 -4
+4 6   0 -5  0 -3  0 -1  0 1  0 3  0 5
+5 4   0 -4  0 -2  0 2  0 4
Index: applgrid/tags/applgrid-01-06-36/share/top.config
===================================================================
--- applgrid/tags/applgrid-01-06-36/share/top.config	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/share/top.config	(revision 2010)
@@ -0,0 +1,30 @@
+0
+0  1   0  0  
+1  2   0  2   0  4
+2  2   2  0   4  0
+3  2   0 -2   0 -4
+4  2  -2  0  -4  0
+5  3   0  1   0  3   0  5
+6  3   1  0   3  0   5  0
+7  3   0 -1   0 -3   0 -5
+8  3  -1  0  -3  0  -5  0
+9  4  -2  1  -2  3  -4  1  -4  3
+10 4   1 -2   1 -4   3 -2   3 -4
+11 2   2  2   4  4 
+12 5  -2 -1  -2 -3  -4 -1  -4 -3  -2 -5
+13 5  -1 -2  -1 -4  -3 -2  -3 -4  -5 -2
+14 2   2 -2   4 -4
+15 2  -2  2  -4  4
+16 3   1 -1   3 -3   5 -5
+17 3  -1  1  -3  3  -5  5
+18 3  -1 -1  -3 -3  -5 -5
+19 2  -2 -2  -4 -4
+20 3   1  1   3  3   5  5 
+21 5   2  1   2  3   4  1   4  3   2  5
+22 5   1  2   1  4   3  2   3  4   5  2
+23 5  -1  2  -1  4  -3  2  -3  4  -5  2
+24 5   2 -1   2 -3   4 -1   4 -3   5 -2
+
+
+
+
Index: applgrid/tags/applgrid-01-06-36/share/mcfmwm.config
===================================================================
--- applgrid/tags/applgrid-01-06-36/share/mcfmwm.config	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/share/mcfmwm.config	(revision 2010)
@@ -0,0 +1,9 @@
+-1
+0 4    1 -4    1 -2    3 -4   3 -2   5 -4    5 -2  
+1 4   -4  1   -2  1   -4  3  -2  3  -4  5   -2  5  
+2 5    1  0    2  0    3  0   4  0   5  0 
+3 5   -1  0   -2  0   -3  0  -4  0  -5  0   
+4 5    0  1    0  2    0  3   0  4   0  5
+5 5    0 -1    0 -2    0 -3   0 -4   0 -5  
+
+
Index: applgrid/tags/applgrid-01-06-36/share/mcfmwp.config
===================================================================
--- applgrid/tags/applgrid-01-06-36/share/mcfmwp.config	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/share/mcfmwp.config	(revision 2010)
@@ -0,0 +1,8 @@
+1
+0 4   -3  2   -3  4   -1  2   -1  4   -5  2   -5  4  
+1 4    2 -3    4 -3    2 -1    4 -1    2 -5    4 -5  
+2 5   -1  0   -2  0   -3  0   -4  0   -5  0   
+3 5    1  0    2  0    3  0    4  0    5  0 
+4 5    0 -1    0 -2    0 -3    0 -4    0 -5  
+5 5    0  1    0  2    0  3    0  4    0  5
+
Index: applgrid/tags/applgrid-01-06-36/configure.ac
===================================================================
--- applgrid/tags/applgrid-01-06-36/configure.ac	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/configure.ac	(revision 2010)
@@ -0,0 +1,208 @@
+AC_INIT(APPLgrid, 1.6.36, sutt@cern.ch,applgrid,https://applgrid.hepforge.org)
+AM_INIT_AUTOMAKE
+AM_CONFIG_HEADER(amconfig.h:amconfig.in)
+#AC_PROG_FC
+#AC_CHECK_PROGS(FC, [gfortran g77 f77 fort77 f90], no)
+#AC_PROG_FC
+#AC_LANG_PUSH(Fortran)
+#AC_FC_SRCEXT(f90)
+#AC_FC_LIBRARY_LDFLAGS
+#AC_FC_WRAPPERS
+#AC_FC_FREEFORM
+#AC_FC_DUMMY_MAIN
+#AC_LANG_POP(Fortran)
+AC_PROG_CC
+AC_PROG_CXX 
+AM_PROG_LIBTOOL
+AM_SILENT_RULES([no])
+AC_HEADER_STDC
+AC_FUNC_UTIME_NULL
+AC_CONFIG_MACRO_DIR([m4])
+## Set Fortran compiler behaviour
+#if test "x$FCFLAGS" == "x"; then 
+#  FCFLAGS="-O2"
+#fi
+## Try to respect users' Fortran compiler variables
+#if test "x$FC" == "x"; then 
+#  if test "x$F77" == "x"; then 
+#FC="$GFORTRAN"
+#FC=gfortran
+#  else
+#    FC="$F77"
+#  fi
+#fi
+
+
+CFLAGS=" -I/opt/local/include $CFLAGS " 
+CXXFLAGS=" -I/opt/local/include $CXXFLAGS " 
+LDFLAGS=" -L/opt/local/lib $LDFLAGS -lpthread "
+
+# check for zlib
+AC_SEARCH_LIBS( gzopen, z )
+
+# AX_CHECK_ZLIB([ZLIBTEST=true], ZLIBTEST="")
+# AX_CHECK_ZLIB()
+# AC_MSG_NOTICE([ZLIBTEST is set to $ZLIBTEST])
+# AM_CONDITIONAL(HAVE_LIBZ, test "x$ZLIBTEST" != "x")
+
+
+AC_ARG_WITH([root],
+            [AS_HELP_STRING([--without-root],[disable support for root])],
+            [],
+            [with_root=yes])
+          
+USEROOT=
+AS_IF([test "x$with_root" != xno],
+      [AC_SUBST([USEROOT], [1])],
+      [AC_SUBST([USEROOT], [0])])
+
+AC_MSG_CHECKING([compile with root]) 
+AC_MSG_RESULT([$with_root]) 
+
+			        
+ROOT_CXXFLAGS=
+ROOT_LDFLAGS=
+
+if test "$USEROOT" == "1"; then 
+AH_TEMPLATE([HAVE_CERNROOT],[Define 1 if the cern root package is installed])
+AC_PATH_PROG(ROOTPATH,root,no)
+if test "$ROOTPATH" = "no"; then
+    AC_MSG_WARN([**************************])
+    AC_MSG_WARN([])
+    AC_MSG_WARN([root was not found ])
+    AC_MSG_WARN([])
+    AC_MSG_WARN([**************************])
+    AC_SUBST([USEROOT], [0])
+    echo "" > appl_grid/appl_root.h
+else
+    echo "#define USEROOT" > appl_grid/appl_root.h
+    ROOT_CXXFLAGS=$(root-config --cflags)
+    ROOT_LDFLAGS=$(root-config --ldflags)
+    ROOT_LDFLAGS+=" " 
+    ROOT_LDFLAGS+=$(root-config --libs)
+    AC_DEFINE_UNQUOTED(HAVE_CERNROOT,1,[Define 1 if root is installed]) 
+fi
+else
+    echo "" > appl_grid/appl_root.h
+fi 
+
+if test "$USEROOT" != "1"; then 
+    echo "" > appl_grid/appl_root.h
+    AH_TEMPLATE([HAVE_CERNROOT],[Define 0 if the cern root package is installed])
+    AC_MSG_WARN([****************************************************************])
+    AC_MSG_WARN([ compiling without root])
+    if ( echo $CXXFLAGS | grep -q "std=c++" ); then 
+       AC_MSG_WARN([ using $CXXFLAGS ])
+    else
+       touch /tmp/t.cxx 
+       ( g++ -std=c++11 -c /tmp/t.cxx ) && COMP=" -std=c++11 "
+       ( g++ -std=c++1y -c /tmp/t.cxx ) && COMP=" -std=c++1y "
+       ( g++ -std=c++1z -c /tmp/t.cxx ) && COMP=" -std=c++1z "
+       CXXFLAGS+=$COMP
+       AC_MSG_WARN([ defaulting to $COMP : for a different c++ version ])
+       AC_MSG_WARN([ should set CXXFLAGS+=" -std=<c++_version> " and run ])
+       AC_MSG_WARN([ configure again] )
+    fi
+    AC_MSG_WARN([****************************************************************])
+fi
+
+( echo $CXXFLAGS | grep -q "m64" ) || CXXFLAGS+=" -m64 " 
+
+
+AC_SUBST(ROOT_LDFLAGS)
+AC_SUBST(ROOT_CXXFLAGS)
+
+
+AM_CONDITIONAL(USE_ROOT, [test "$USEROOT" == "1"] )
+
+
+# lhapdf isn't technically needed 
+# AH_TEMPLATE([HAVE_LHAPDF],[Define 1 if lhapdf is installed])
+# AC_PATH_PROG(LHAPDFPATH,lhapdf-config,no)
+
+HOPPET_LDFLAGS=
+HOPPET_CXXFLAGS=
+AH_TEMPLATE([HAVE_HOPPET],[Define 1 if the hoppet library is installed])
+AC_PATH_PROG(HOPPETPATH,hoppet-config,no)
+if test "$HOPPETPATH" = "no"; then
+    AC_MSG_WARN([********************************************************************])
+    AC_MSG_WARN([])
+    AC_MSG_WARN([   HOPPET was not found - you will not be able to arbirarily])
+    AC_MSG_WARN([   change the factorisation scale in the convolution])
+    AC_MSG_WARN([])
+    AC_MSG_WARN([********************************************************************])
+else 
+     HOPPET_LDFLAGS=$(hoppet-config --libs)
+     HOPPET_CXXFLAGS=$(hoppet-config --cxxflags) 
+     AC_DEFINE_UNQUOTED(HAVE_HOPPET,1,[Define 1 if hoppet is installed]) 
+fi
+
+AC_SUBST(HOPPET_LDFLAGS)
+AC_SUBST(HOPPET_CXXFLAGS)
+AM_CONDITIONAL(USE_HOPPET, [test "$HOPPETPATH" != "no"] )
+
+
+# check for the location of the libgfortran.{so,a} libraries
+# would not be needed but some installations have only libgfortran.a
+# and some only libgfortran.so, so we can'r simply use -print-file-name 
+# to get the path.
+
+AC_MSG_NOTICE([Checking for gfortran library ])
+
+FRTLIB=$(gfortran --print-file-name=libgfortran.so ) 
+FRTLPATH=$( echo $FRTLIB | sed 's|libgfortran.so||')
+
+if test "$FRTLPATH" != ""; then
+    AC_MSG_NOTICE([found libgfortran.so])
+    AM_CONDITIONAL(USE_FRTLSO, [test "$FRTLPATH" != ""])
+    AC_SUBST([FRTLLIB],[" -L$(\cd $FRTLPATH && \pwd) -lgfortran "])
+else
+    AC_MSG_WARN([libgfortran.so not found - checking for libgfotran.a])
+    FRTLIB=$(gfortran --print-file-name=libgfortran.a)
+    FRTLPATH=$(echo $FRTLIB | sed 's|libgfortran.a||')
+    if test "$FRTLPATH" != ""; then
+        AM_CONDITIONAL(USE_FRTLSO, [! test "$FRTLPATH" != ""])
+        AC_MSG_NOTICE([found libgfortran.a])
+        AC_SUBST([FRTLLIB],[" -L$(\cd $FRTLPATH && \pwd) -lgfortran "])
+    else
+        AM_CONDITIONAL(USE_FRTLSO, [test "x$FRTLPATH" != "x"])
+        AC_SUBST([FRTLLIB],[""])
+        AC_MSG_WARN([not found gfortran library - may not be able to link against fortran code or hoppet])
+    fi
+fi
+
+
+
+
+
+LHAPDF_MAJOR_VERSION=0
+LHAPDF_CXXFLAGS=
+AH_TEMPLATE([HAVE_LHAPDF],[Define 1 if the LHAPDF library is installed])
+AC_PATH_PROG(LHAPDFPATH,lhapdf-config,no)
+if test "$LHAPDFPATH" != "no"; then
+
+    # get LHAPDF major version - do nothing is version 6 not installed	
+    LHAPDF_MAJOR_VERSION=$(lhapdf-config --version | sed 's/\..*//')
+
+    if test $LHAPDF_MAJOR_VERSION -gt 5; then
+
+       # find include path
+       LHAPDF_CXXFLAGS=$(lhapdf-config --libs)
+
+       AC_DEFINE_UNQUOTED(HAVE_LHAPDF,1,[Define 1 if LHAPDF is installed]) 
+    
+       LHAPDF_VERSION=$(lhapdf-config --version)
+
+       AH_TEMPLATE([LHAPDF_VERSION],[LHAPDF version])
+       AC_DEFINE_UNQUOTED(LHAPDF_VERSION,"$LHAPDF_VERSION",[LHAPDF version]) 
+
+    fi
+fi
+
+AC_SUBST(LHAPDF_CXXFLAGS)
+AM_CONDITIONAL(USE_LHAPDF, [test $LHAPDF_MAJOR_VERSION -gt 5 ] )
+
+
+AC_CONFIG_FILES([bin/Makefile bin/applgrid-config])
+
+AC_OUTPUT(Makefile src/Makefile)
Index: applgrid/tags/applgrid-01-06-36/configure.scan
===================================================================
--- applgrid/tags/applgrid-01-06-36/configure.scan	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/configure.scan	(revision 2010)
@@ -0,0 +1,34 @@
+#                                               -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+AC_PREREQ(2.61)
+AC_INIT(FULL-PACKAGE-NAME, VERSION, BUG-REPORT-ADDRESS)
+AC_CONFIG_SRCDIR([config.h.in])
+AC_CONFIG_HEADER([config.h])
+
+# Checks for programs.
+AC_PROG_CXX
+AC_PROG_CC
+AC_PROG_INSTALL
+AC_PROG_MAKE_SET
+
+# Checks for libraries.
+
+# Checks for header files.
+AC_HEADER_STDC
+AC_CHECK_HEADERS([stdlib.h sys/time.h])
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_HEADER_STDBOOL
+AC_C_CONST
+AC_C_INLINE
+AC_HEADER_TIME
+
+# Checks for library functions.
+AC_FUNC_MALLOC
+AC_FUNC_STAT
+AC_CHECK_FUNCS([gettimeofday mkdir pow sqrt])
+
+AC_CONFIG_FILES([Makefile
+                 src/Makefile])
+AC_OUTPUT
Index: applgrid/tags/applgrid-01-06-36/libtool
===================================================================
--- applgrid/tags/applgrid-01-06-36/libtool	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/libtool	(revision 2010)
@@ -0,0 +1,11805 @@
+#! /bin/sh
+# Generated automatically by config.status (applgrid) 1.6.36
+# Libtool was configured on host babybadger:
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+
+# Provide generalized library-building support services.
+# Written by Gordon Matzigkeit, 1996
+
+# Copyright (C) 2014 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# GNU Libtool is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of of the License, or
+# (at your option) any later version.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program or library that is built
+# using GNU Libtool, you may include this file under the  same
+# distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+
+# The names of the tagged configurations supported by this script.
+available_tags='CXX '
+
+# Configured defaults for sys_lib_dlsearch_path munging.
+: ${LT_SYS_LIBRARY_PATH=""}
+
+# ### BEGIN LIBTOOL CONFIG
+
+# Which release of libtool.m4 was used?
+macro_version=2.4.6
+macro_revision=2.4.6
+
+# Whether or not to build shared libraries.
+build_libtool_libs=yes
+
+# Whether or not to build static libraries.
+build_old_libs=yes
+
+# What type of objects to build.
+pic_mode=default
+
+# Whether or not to optimize for fast installation.
+fast_install=needless
+
+# Shared archive member basename,for filename based shared library versioning on AIX.
+shared_archive_member_spec=
+
+# Shell to use when invoking shell scripts.
+SHELL="/bin/sh"
+
+# An echo program that protects backslashes.
+ECHO="printf %s\\n"
+
+# The PATH separator for the build system.
+PATH_SEPARATOR=":"
+
+# The host system.
+host_alias=
+host=x86_64-apple-darwin18.7.0
+host_os=darwin18.7.0
+
+# The build system.
+build_alias=
+build=x86_64-apple-darwin18.7.0
+build_os=darwin18.7.0
+
+# A sed program that does not truncate output.
+SED="/bin/sed"
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="$SED -e 1s/^X//"
+
+# A grep program that handles long lines.
+GREP="/usr/bin/grep"
+
+# An ERE matcher.
+EGREP="/usr/bin/grep -E"
+
+# A literal string matcher.
+FGREP="/usr/bin/grep -F"
+
+# A BSD- or MS-compatible name lister.
+NM="/usr/bin/nm -B"
+
+# Whether we need soft or hard links.
+LN_S="ln -s"
+
+# What is the maximum length of a command?
+max_cmd_len=196608
+
+# Object file suffix (normally "o").
+objext=o
+
+# Executable file suffix (normally "").
+exeext=
+
+# whether the shell understands "unset".
+lt_unset=unset
+
+# turn spaces into newlines.
+SP2NL="tr \\040 \\012"
+
+# turn newlines into spaces.
+NL2SP="tr \\015\\012 \\040\\040"
+
+# convert $build file names to $host format.
+to_host_file_cmd=func_convert_file_noop
+
+# convert $build files to toolchain format.
+to_tool_file_cmd=func_convert_file_noop
+
+# An object symbol dumper.
+OBJDUMP="objdump"
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method="pass_all"
+
+# Command to use when deplibs_check_method = "file_magic".
+file_magic_cmd="\$MAGIC_CMD"
+
+# How to find potential files when deplibs_check_method = "file_magic".
+file_magic_glob=""
+
+# Find potential files using nocaseglob when deplibs_check_method = "file_magic".
+want_nocaseglob="no"
+
+# DLL creation program.
+DLLTOOL="false"
+
+# Command to associate shared and link libraries.
+sharedlib_from_linklib_cmd="printf %s\\n"
+
+# The archiver.
+AR="ar"
+
+# Flags to create an archive.
+AR_FLAGS="cru"
+
+# How to feed a file listing to the archiver.
+archiver_list_spec=""
+
+# A symbol stripping program.
+STRIP="strip"
+
+# Commands used to install an old-style archive.
+RANLIB="ranlib"
+old_postinstall_cmds="chmod 644 \$oldlib~\$RANLIB \$tool_oldlib"
+old_postuninstall_cmds=""
+
+# Whether to use a lock for old archive extraction.
+lock_old_archive_extraction=yes
+
+# A C compiler.
+LTCC="gcc"
+
+# LTCC compiler flags.
+LTCFLAGS=" -I/opt/local/include -g -O2 "
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe="sed -n -e 's/^.*[	 ]\\([BCDEGRST][BCDEGRST]*\\)[	 ][	 ]*_\\([_A-Za-z][_A-Za-z0-9]*\\)\$/\\1 _\\2 \\2/p' | sed '/ __gnu_lto/d'"
+
+# Transform the output of nm in a proper C declaration.
+global_symbol_to_cdecl="sed -n -e 's/^T .* \\(.*\\)\$/extern int \\1();/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/extern char \\1;/p'"
+
+# Transform the output of nm into a list of symbols to manually relocate.
+global_symbol_to_import=""
+
+# Transform the output of nm in a C name address pair.
+global_symbol_to_c_name_address="sed -n -e 's/^: \\(.*\\) .*\$/  {\"\\1\", (void *) 0},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/  {\"\\1\", (void *) \\&\\1},/p'"
+
+# Transform the output of nm in a C name address pair when lib prefix is needed.
+global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \\(.*\\) .*\$/  {\"\\1\", (void *) 0},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(lib.*\\)\$/  {\"\\1\", (void *) \\&\\1},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/  {\"lib\\1\", (void *) \\&\\1},/p'"
+
+# The name lister interface.
+nm_interface="BSD nm"
+
+# Specify filename containing input files for $NM.
+nm_file_list_spec=""
+
+# The root where to search for dependent libraries,and where our libraries should be installed.
+lt_sysroot=
+
+# Command to truncate a binary pipe.
+lt_truncate_bin="/bin/dd bs=4096 count=1"
+
+# The name of the directory that contains temporary libtool files.
+objdir=.libs
+
+# Used to examine libraries when file_magic_cmd begins with "file".
+MAGIC_CMD=file
+
+# Must we lock files when doing compilation?
+need_locks="no"
+
+# Manifest tool.
+MANIFEST_TOOL=":"
+
+# Tool to manipulate archived DWARF debug symbol files on Mac OS X.
+DSYMUTIL="dsymutil"
+
+# Tool to change global to local symbols on Mac OS X.
+NMEDIT="nmedit"
+
+# Tool to manipulate fat objects and archives on Mac OS X.
+LIPO="lipo"
+
+# ldd/readelf like tool for Mach-O binaries on Mac OS X.
+OTOOL="otool"
+
+# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4.
+OTOOL64=":"
+
+# Old archive suffix (normally "a").
+libext=a
+
+# Shared library suffix (normally ".so").
+shrext_cmds="\`test .\$module = .yes && echo .so || echo .dylib\`"
+
+# The commands to extract the exported symbol list from a shared archive.
+extract_expsyms_cmds=""
+
+# Variables whose values should be saved in libtool wrapper scripts and
+# restored at link time.
+variables_saved_for_relink="PATH DYLD_LIBRARY_PATH  GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+
+# Do we need the "lib" prefix for modules?
+need_lib_prefix=no
+
+# Do we need a version for libraries?
+need_version=no
+
+# Library versioning type.
+version_type=darwin
+
+# Shared library runtime path variable.
+runpath_var=
+
+# Shared library path variable.
+shlibpath_var=DYLD_LIBRARY_PATH
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=yes
+
+# Format of library name prefix.
+libname_spec="lib\$name"
+
+# List of archive names.  First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME
+library_names_spec="\$libname\$release\$major\$shared_ext \$libname\$shared_ext"
+
+# The coded name of the library, if different from the real name.
+soname_spec="\$libname\$release\$major\$shared_ext"
+
+# Permission mode override for installation of shared libraries.
+install_override_mode=""
+
+# Command to use after installation of a shared archive.
+postinstall_cmds=""
+
+# Command to use after uninstallation of a shared archive.
+postuninstall_cmds=""
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=""
+
+# As "finish_cmds", except a single script fragment to be evaled but
+# not shown.
+finish_eval=""
+
+# Whether we should hardcode library paths into libraries.
+hardcode_into_libs=no
+
+# Compile-time system search path for libraries.
+sys_lib_search_path_spec="/Library/Developer/CommandLineTools/usr/lib/clang/10.0.1  /usr/local/lib"
+
+# Detected run-time system search path for libraries.
+sys_lib_dlsearch_path_spec="/usr/local/lib /lib /usr/lib"
+
+# Explicit LT_SYS_LIBRARY_PATH set during ./configure time.
+configure_time_lt_sys_library_path=""
+
+# Whether dlopen is supported.
+dlopen_support=unknown
+
+# Whether dlopen of programs is supported.
+dlopen_self=unknown
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=unknown
+
+# Commands to strip libraries.
+old_striplib="strip -S"
+striplib="strip -x"
+
+
+# The linker used to build libraries.
+LD="/Library/Developer/CommandLineTools/usr/bin/ld"
+
+# How to create reloadable object files.
+reload_flag=" -r"
+reload_cmds="\$LTCC \$LTCFLAGS -nostdlib \$wl-r -o \$output\$reload_objs"
+
+# Commands used to build an old-style archive.
+old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$tool_oldlib"
+
+# A language specific compiler.
+CC="gcc"
+
+# Is the compiler the GNU compiler?
+with_gcc=yes
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=" -fno-builtin -fno-rtti -fno-exceptions"
+
+# Additional compiler flags for building library objects.
+pic_flag=" -fno-common -DPIC"
+
+# How to pass a linker flag through the compiler.
+wl="-Wl,"
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=""
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o="yes"
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=no
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=no
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=""
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec="\`for conv in \$convenience\\\"\\\"; do test  -n \\\"\$conv\\\" && new_convenience=\\\"\$new_convenience \$wl-force_load,\$conv\\\"; done; func_echo_all \\\"\$new_convenience\\\"\`"
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object="no"
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=""
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=""
+
+# Commands used to build a shared archive.
+archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module"
+archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym"
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags"
+module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym"
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld="no"
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag="\$wl-undefined \${wl}dynamic_lookup"
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=""
+
+# Flag to hardcode $libdir into a binary during linking.
+# This must work even if $libdir does not exist
+hardcode_libdir_flag_spec=""
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=""
+
+# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=no
+
+# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting $shlibpath_var if the
+# library is relocated.
+hardcode_direct_absolute=no
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=no
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=unsupported
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=yes
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=no
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=yes
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=no
+
+# The commands to list exported symbols.
+export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols"
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*"
+
+# Symbols that must always be exported.
+include_expsyms=""
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=""
+
+# Commands necessary for finishing linking programs.
+postlink_cmds=""
+
+# Specify filename containing input files.
+file_list_spec=""
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=immediate
+
+# The directories searched by this compiler when creating a shared library.
+compiler_lib_search_dirs=""
+
+# Dependencies to place before and after the objects being linked to
+# create a shared library.
+predep_objects=""
+postdep_objects=""
+predeps=""
+postdeps=""
+
+# The library search path used internally by the compiler when linking
+# a shared library.
+compiler_lib_search_path=""
+
+# ### END LIBTOOL CONFIG
+
+
+# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE
+
+# func_munge_path_list VARIABLE PATH
+# -----------------------------------
+# VARIABLE is name of variable containing _space_ separated list of
+# directories to be munged by the contents of PATH, which is string
+# having a format:
+# "DIR[:DIR]:"
+#       string "DIR[ DIR]" will be prepended to VARIABLE
+# ":DIR[:DIR]"
+#       string "DIR[ DIR]" will be appended to VARIABLE
+# "DIRP[:DIRP]::[DIRA:]DIRA"
+#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
+#       "DIRA[ DIRA]" will be appended to VARIABLE
+# "DIR[:DIR]"
+#       VARIABLE will be replaced by "DIR[ DIR]"
+func_munge_path_list ()
+{
+    case x$2 in
+    x)
+        ;;
+    *:)
+        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\"
+        ;;
+    x:*)
+        eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\"
+        ;;
+    *::*)
+        eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
+        eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\"
+        ;;
+    *)
+        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\"
+        ;;
+    esac
+}
+
+
+# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
+func_cc_basename ()
+{
+    for cc_temp in $*""; do
+      case $cc_temp in
+        compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+        distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+        \-*) ;;
+        *) break;;
+      esac
+    done
+    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+}
+
+
+# ### END FUNCTIONS SHARED WITH CONFIGURE
+
+#! /bin/sh
+## DO NOT EDIT - This file generated from ./build-aux/ltmain.in
+##               by inline-source v2014-01-03.01
+
+# libtool (GNU libtool) 2.4.6
+# Provide generalized library-building support services.
+# Written by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+
+# Copyright (C) 1996-2015 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# GNU Libtool is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+
+PROGRAM=libtool
+PACKAGE=libtool
+VERSION=2.4.6
+package_revision=2.4.6
+
+
+## ------ ##
+## Usage. ##
+## ------ ##
+
+# Run './libtool --help' for help with using this script from the
+# command line.
+
+
+## ------------------------------- ##
+## User overridable command paths. ##
+## ------------------------------- ##
+
+# After configure completes, it has a better idea of some of the
+# shell tools we need than the defaults used by the functions shared
+# with bootstrap, so set those here where they can still be over-
+# ridden by the user, but otherwise take precedence.
+
+: ${AUTOCONF="autoconf"}
+: ${AUTOMAKE="automake"}
+
+
+## -------------------------- ##
+## Source external libraries. ##
+## -------------------------- ##
+
+# Much of our low-level functionality needs to be sourced from external
+# libraries, which are installed to $pkgauxdir.
+
+# Set a version string for this script.
+scriptversion=2015-01-20.17; # UTC
+
+# General shell script boiler plate, and helper functions.
+# Written by Gary V. Vaughan, 2004
+
+# Copyright (C) 2004-2015 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+
+# As a special exception to the GNU General Public License, if you distribute
+# this file as part of a program or library that is built using GNU Libtool,
+# you may include this file under the same distribution terms that you use
+# for the rest of that program.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNES FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Please report bugs or propose patches to gary@gnu.org.
+
+
+## ------ ##
+## Usage. ##
+## ------ ##
+
+# Evaluate this file near the top of your script to gain access to
+# the functions and variables defined here:
+#
+#   . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh
+#
+# If you need to override any of the default environment variable
+# settings, do that before evaluating this file.
+
+
+## -------------------- ##
+## Shell normalisation. ##
+## -------------------- ##
+
+# Some shells need a little help to be as Bourne compatible as possible.
+# Before doing anything else, make sure all that help has been provided!
+
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '${1+"$@"}'='"$@"'
+  setopt NO_GLOB_SUBST
+else
+  case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac
+fi
+
+# NLS nuisances: We save the old values in case they are required later.
+_G_user_locale=
+_G_safe_locale=
+for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+do
+  eval "if test set = \"\${$_G_var+set}\"; then
+          save_$_G_var=\$$_G_var
+          $_G_var=C
+	  export $_G_var
+	  _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\"
+	  _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\"
+	fi"
+done
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+# Make sure IFS has a sensible default
+sp=' '
+nl='
+'
+IFS="$sp	$nl"
+
+# There are apparently some retarded systems that use ';' as a PATH separator!
+if test "${PATH_SEPARATOR+set}" != set; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
+fi
+
+
+
+## ------------------------- ##
+## Locate command utilities. ##
+## ------------------------- ##
+
+
+# func_executable_p FILE
+# ----------------------
+# Check that FILE is an executable regular file.
+func_executable_p ()
+{
+    test -f "$1" && test -x "$1"
+}
+
+
+# func_path_progs PROGS_LIST CHECK_FUNC [PATH]
+# --------------------------------------------
+# Search for either a program that responds to --version with output
+# containing "GNU", or else returned by CHECK_FUNC otherwise, by
+# trying all the directories in PATH with each of the elements of
+# PROGS_LIST.
+#
+# CHECK_FUNC should accept the path to a candidate program, and
+# set $func_check_prog_result if it truncates its output less than
+# $_G_path_prog_max characters.
+func_path_progs ()
+{
+    _G_progs_list=$1
+    _G_check_func=$2
+    _G_PATH=${3-"$PATH"}
+
+    _G_path_prog_max=0
+    _G_path_prog_found=false
+    _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:}
+    for _G_dir in $_G_PATH; do
+      IFS=$_G_save_IFS
+      test -z "$_G_dir" && _G_dir=.
+      for _G_prog_name in $_G_progs_list; do
+        for _exeext in '' .EXE; do
+          _G_path_prog=$_G_dir/$_G_prog_name$_exeext
+          func_executable_p "$_G_path_prog" || continue
+          case `"$_G_path_prog" --version 2>&1` in
+            *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;;
+            *)     $_G_check_func $_G_path_prog
+		   func_path_progs_result=$func_check_prog_result
+		   ;;
+          esac
+          $_G_path_prog_found && break 3
+        done
+      done
+    done
+    IFS=$_G_save_IFS
+    test -z "$func_path_progs_result" && {
+      echo "no acceptable sed could be found in \$PATH" >&2
+      exit 1
+    }
+}
+
+
+# We want to be able to use the functions in this file before configure
+# has figured out where the best binaries are kept, which means we have
+# to search for them ourselves - except when the results are already set
+# where we skip the searches.
+
+# Unless the user overrides by setting SED, search the path for either GNU
+# sed, or the sed that truncates its output the least.
+test -z "$SED" && {
+  _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
+  for _G_i in 1 2 3 4 5 6 7; do
+    _G_sed_script=$_G_sed_script$nl$_G_sed_script
+  done
+  echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed
+  _G_sed_script=
+
+  func_check_prog_sed ()
+  {
+    _G_path_prog=$1
+
+    _G_count=0
+    printf 0123456789 >conftest.in
+    while :
+    do
+      cat conftest.in conftest.in >conftest.tmp
+      mv conftest.tmp conftest.in
+      cp conftest.in conftest.nl
+      echo '' >> conftest.nl
+      "$_G_path_prog" -f conftest.sed <conftest.nl >conftest.out 2>/dev/null || break
+      diff conftest.out conftest.nl >/dev/null 2>&1 || break
+      _G_count=`expr $_G_count + 1`
+      if test "$_G_count" -gt "$_G_path_prog_max"; then
+        # Best one so far, save it but keep looking for a better one
+        func_check_prog_result=$_G_path_prog
+        _G_path_prog_max=$_G_count
+      fi
+      # 10*(2^10) chars as input seems more than enough
+      test 10 -lt "$_G_count" && break
+    done
+    rm -f conftest.in conftest.tmp conftest.nl conftest.out
+  }
+
+  func_path_progs "sed gsed" func_check_prog_sed $PATH:/usr/xpg4/bin
+  rm -f conftest.sed
+  SED=$func_path_progs_result
+}
+
+
+# Unless the user overrides by setting GREP, search the path for either GNU
+# grep, or the grep that truncates its output the least.
+test -z "$GREP" && {
+  func_check_prog_grep ()
+  {
+    _G_path_prog=$1
+
+    _G_count=0
+    _G_path_prog_max=0
+    printf 0123456789 >conftest.in
+    while :
+    do
+      cat conftest.in conftest.in >conftest.tmp
+      mv conftest.tmp conftest.in
+      cp conftest.in conftest.nl
+      echo 'GREP' >> conftest.nl
+      "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' <conftest.nl >conftest.out 2>/dev/null || break
+      diff conftest.out conftest.nl >/dev/null 2>&1 || break
+      _G_count=`expr $_G_count + 1`
+      if test "$_G_count" -gt "$_G_path_prog_max"; then
+        # Best one so far, save it but keep looking for a better one
+        func_check_prog_result=$_G_path_prog
+        _G_path_prog_max=$_G_count
+      fi
+      # 10*(2^10) chars as input seems more than enough
+      test 10 -lt "$_G_count" && break
+    done
+    rm -f conftest.in conftest.tmp conftest.nl conftest.out
+  }
+
+  func_path_progs "grep ggrep" func_check_prog_grep $PATH:/usr/xpg4/bin
+  GREP=$func_path_progs_result
+}
+
+
+## ------------------------------- ##
+## User overridable command paths. ##
+## ------------------------------- ##
+
+# All uppercase variable names are used for environment variables.  These
+# variables can be overridden by the user before calling a script that
+# uses them if a suitable command of that name is not already available
+# in the command search PATH.
+
+: ${CP="cp -f"}
+: ${ECHO="printf %s\n"}
+: ${EGREP="$GREP -E"}
+: ${FGREP="$GREP -F"}
+: ${LN_S="ln -s"}
+: ${MAKE="make"}
+: ${MKDIR="mkdir"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+: ${SHELL="${CONFIG_SHELL-/bin/sh}"}
+
+
+## -------------------- ##
+## Useful sed snippets. ##
+## -------------------- ##
+
+sed_dirname='s|/[^/]*$||'
+sed_basename='s|^.*/||'
+
+# Sed substitution that helps us do robust quoting.  It backslashifies
+# metacharacters that are still active within double-quoted strings.
+sed_quote_subst='s|\([`"$\\]\)|\\\1|g'
+
+# Same as above, but do not quote variable references.
+sed_double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Sed substitution that turns a string into a regex matching for the
+# string literally.
+sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g'
+
+# Sed substitution that converts a w32 file name or path
+# that contains forward slashes, into one that contains
+# (escaped) backslashes.  A very naive implementation.
+sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g'
+
+# Re-'\' parameter expansions in output of sed_double_quote_subst that
+# were '\'-ed in input to the same.  If an odd number of '\' preceded a
+# '$' in input to sed_double_quote_subst, that '$' was protected from
+# expansion.  Since each input '\' is now two '\'s, look for any number
+# of runs of four '\'s followed by two '\'s and then a '$'.  '\' that '$'.
+_G_bs='\\'
+_G_bs2='\\\\'
+_G_bs4='\\\\\\\\'
+_G_dollar='\$'
+sed_double_backslash="\
+  s/$_G_bs4/&\\
+/g
+  s/^$_G_bs2$_G_dollar/$_G_bs&/
+  s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g
+  s/\n//g"
+
+
+## ----------------- ##
+## Global variables. ##
+## ----------------- ##
+
+# Except for the global variables explicitly listed below, the following
+# functions in the '^func_' namespace, and the '^require_' namespace
+# variables initialised in the 'Resource management' section, sourcing
+# this file will not pollute your global namespace with anything
+# else. There's no portable way to scope variables in Bourne shell
+# though, so actually running these functions will sometimes place
+# results into a variable named after the function, and often use
+# temporary variables in the '^_G_' namespace. If you are careful to
+# avoid using those namespaces casually in your sourcing script, things
+# should continue to work as you expect. And, of course, you can freely
+# overwrite any of the functions or variables defined here before
+# calling anything to customize them.
+
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+EXIT_MISMATCH=63  # $? = 63 is used to indicate version mismatch to missing.
+EXIT_SKIP=77	  # $? = 77 is used to indicate a skipped test to automake.
+
+# Allow overriding, eg assuming that you follow the convention of
+# putting '$debug_cmd' at the start of all your functions, you can get
+# bash to show function call trace with:
+#
+#    debug_cmd='eval echo "${FUNCNAME[0]} $*" >&2' bash your-script-name
+debug_cmd=${debug_cmd-":"}
+exit_cmd=:
+
+# By convention, finish your script with:
+#
+#    exit $exit_status
+#
+# so that you can set exit_status to non-zero if you want to indicate
+# something went wrong during execution without actually bailing out at
+# the point of failure.
+exit_status=$EXIT_SUCCESS
+
+# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
+# is ksh but when the shell is invoked as "sh" and the current value of
+# the _XPG environment variable is not equal to 1 (one), the special
+# positional parameter $0, within a function call, is the name of the
+# function.
+progpath=$0
+
+# The name of this program.
+progname=`$ECHO "$progpath" |$SED "$sed_basename"`
+
+# Make sure we have an absolute progpath for reexecution:
+case $progpath in
+  [\\/]*|[A-Za-z]:\\*) ;;
+  *[\\/]*)
+     progdir=`$ECHO "$progpath" |$SED "$sed_dirname"`
+     progdir=`cd "$progdir" && pwd`
+     progpath=$progdir/$progname
+     ;;
+  *)
+     _G_IFS=$IFS
+     IFS=${PATH_SEPARATOR-:}
+     for progdir in $PATH; do
+       IFS=$_G_IFS
+       test -x "$progdir/$progname" && break
+     done
+     IFS=$_G_IFS
+     test -n "$progdir" || progdir=`pwd`
+     progpath=$progdir/$progname
+     ;;
+esac
+
+
+## ----------------- ##
+## Standard options. ##
+## ----------------- ##
+
+# The following options affect the operation of the functions defined
+# below, and should be set appropriately depending on run-time para-
+# meters passed on the command line.
+
+opt_dry_run=false
+opt_quiet=false
+opt_verbose=false
+
+# Categories 'all' and 'none' are always available.  Append any others
+# you will pass as the first argument to func_warning from your own
+# code.
+warning_categories=
+
+# By default, display warnings according to 'opt_warning_types'.  Set
+# 'warning_func'  to ':' to elide all warnings, or func_fatal_error to
+# treat the next displayed warning as a fatal error.
+warning_func=func_warn_and_continue
+
+# Set to 'all' to display all warnings, 'none' to suppress all
+# warnings, or a space delimited list of some subset of
+# 'warning_categories' to display only the listed warnings.
+opt_warning_types=all
+
+
+## -------------------- ##
+## Resource management. ##
+## -------------------- ##
+
+# This section contains definitions for functions that each ensure a
+# particular resource (a file, or a non-empty configuration variable for
+# example) is available, and if appropriate to extract default values
+# from pertinent package files. Call them using their associated
+# 'require_*' variable to ensure that they are executed, at most, once.
+#
+# It's entirely deliberate that calling these functions can set
+# variables that don't obey the namespace limitations obeyed by the rest
+# of this file, in order that that they be as useful as possible to
+# callers.
+
+
+# require_term_colors
+# -------------------
+# Allow display of bold text on terminals that support it.
+require_term_colors=func_require_term_colors
+func_require_term_colors ()
+{
+    $debug_cmd
+
+    test -t 1 && {
+      # COLORTERM and USE_ANSI_COLORS environment variables take
+      # precedence, because most terminfo databases neglect to describe
+      # whether color sequences are supported.
+      test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"}
+
+      if test 1 = "$USE_ANSI_COLORS"; then
+        # Standard ANSI escape sequences
+        tc_reset=''
+        tc_bold='';   tc_standout=''
+        tc_red='';   tc_green=''
+        tc_blue='';  tc_cyan=''
+      else
+        # Otherwise trust the terminfo database after all.
+        test -n "`tput sgr0 2>/dev/null`" && {
+          tc_reset=`tput sgr0`
+          test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold`
+          tc_standout=$tc_bold
+          test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso`
+          test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1`
+          test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2`
+          test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4`
+          test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5`
+        }
+      fi
+    }
+
+    require_term_colors=:
+}
+
+
+## ----------------- ##
+## Function library. ##
+## ----------------- ##
+
+# This section contains a variety of useful functions to call in your
+# scripts. Take note of the portable wrappers for features provided by
+# some modern shells, which will fall back to slower equivalents on
+# less featureful shells.
+
+
+# func_append VAR VALUE
+# ---------------------
+# Append VALUE onto the existing contents of VAR.
+
+  # We should try to minimise forks, especially on Windows where they are
+  # unreasonably slow, so skip the feature probes when bash or zsh are
+  # being used:
+  if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then
+    : ${_G_HAVE_ARITH_OP="yes"}
+    : ${_G_HAVE_XSI_OPS="yes"}
+    # The += operator was introduced in bash 3.1
+    case $BASH_VERSION in
+      [12].* | 3.0 | 3.0*) ;;
+      *)
+        : ${_G_HAVE_PLUSEQ_OP="yes"}
+        ;;
+    esac
+  fi
+
+  # _G_HAVE_PLUSEQ_OP
+  # Can be empty, in which case the shell is probed, "yes" if += is
+  # useable or anything else if it does not work.
+  test -z "$_G_HAVE_PLUSEQ_OP" \
+    && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \
+    && _G_HAVE_PLUSEQ_OP=yes
+
+if test yes = "$_G_HAVE_PLUSEQ_OP"
+then
+  # This is an XSI compatible shell, allowing a faster implementation...
+  eval 'func_append ()
+  {
+    $debug_cmd
+
+    eval "$1+=\$2"
+  }'
+else
+  # ...otherwise fall back to using expr, which is often a shell builtin.
+  func_append ()
+  {
+    $debug_cmd
+
+    eval "$1=\$$1\$2"
+  }
+fi
+
+
+# func_append_quoted VAR VALUE
+# ----------------------------
+# Quote VALUE and append to the end of shell variable VAR, separated
+# by a space.
+if test yes = "$_G_HAVE_PLUSEQ_OP"; then
+  eval 'func_append_quoted ()
+  {
+    $debug_cmd
+
+    func_quote_for_eval "$2"
+    eval "$1+=\\ \$func_quote_for_eval_result"
+  }'
+else
+  func_append_quoted ()
+  {
+    $debug_cmd
+
+    func_quote_for_eval "$2"
+    eval "$1=\$$1\\ \$func_quote_for_eval_result"
+  }
+fi
+
+
+# func_append_uniq VAR VALUE
+# --------------------------
+# Append unique VALUE onto the existing contents of VAR, assuming
+# entries are delimited by the first character of VALUE.  For example:
+#
+#   func_append_uniq options " --another-option option-argument"
+#
+# will only append to $options if " --another-option option-argument "
+# is not already present somewhere in $options already (note spaces at
+# each end implied by leading space in second argument).
+func_append_uniq ()
+{
+    $debug_cmd
+
+    eval _G_current_value='`$ECHO $'$1'`'
+    _G_delim=`expr "$2" : '\(.\)'`
+
+    case $_G_delim$_G_current_value$_G_delim in
+      *"$2$_G_delim"*) ;;
+      *) func_append "$@" ;;
+    esac
+}
+
+
+# func_arith TERM...
+# ------------------
+# Set func_arith_result to the result of evaluating TERMs.
+  test -z "$_G_HAVE_ARITH_OP" \
+    && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \
+    && _G_HAVE_ARITH_OP=yes
+
+if test yes = "$_G_HAVE_ARITH_OP"; then
+  eval 'func_arith ()
+  {
+    $debug_cmd
+
+    func_arith_result=$(( $* ))
+  }'
+else
+  func_arith ()
+  {
+    $debug_cmd
+
+    func_arith_result=`expr "$@"`
+  }
+fi
+
+
+# func_basename FILE
+# ------------------
+# Set func_basename_result to FILE with everything up to and including
+# the last / stripped.
+if test yes = "$_G_HAVE_XSI_OPS"; then
+  # If this shell supports suffix pattern removal, then use it to avoid
+  # forking. Hide the definitions single quotes in case the shell chokes
+  # on unsupported syntax...
+  _b='func_basename_result=${1##*/}'
+  _d='case $1 in
+        */*) func_dirname_result=${1%/*}$2 ;;
+        *  ) func_dirname_result=$3        ;;
+      esac'
+
+else
+  # ...otherwise fall back to using sed.
+  _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`'
+  _d='func_dirname_result=`$ECHO "$1"  |$SED "$sed_dirname"`
+      if test "X$func_dirname_result" = "X$1"; then
+        func_dirname_result=$3
+      else
+        func_append func_dirname_result "$2"
+      fi'
+fi
+
+eval 'func_basename ()
+{
+    $debug_cmd
+
+    '"$_b"'
+}'
+
+
+# func_dirname FILE APPEND NONDIR_REPLACEMENT
+# -------------------------------------------
+# Compute the dirname of FILE.  If nonempty, add APPEND to the result,
+# otherwise set result to NONDIR_REPLACEMENT.
+eval 'func_dirname ()
+{
+    $debug_cmd
+
+    '"$_d"'
+}'
+
+
+# func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT
+# --------------------------------------------------------
+# Perform func_basename and func_dirname in a single function
+# call:
+#   dirname:  Compute the dirname of FILE.  If nonempty,
+#             add APPEND to the result, otherwise set result
+#             to NONDIR_REPLACEMENT.
+#             value returned in "$func_dirname_result"
+#   basename: Compute filename of FILE.
+#             value retuned in "$func_basename_result"
+# For efficiency, we do not delegate to the functions above but instead
+# duplicate the functionality here.
+eval 'func_dirname_and_basename ()
+{
+    $debug_cmd
+
+    '"$_b"'
+    '"$_d"'
+}'
+
+
+# func_echo ARG...
+# ----------------
+# Echo program name prefixed message.
+func_echo ()
+{
+    $debug_cmd
+
+    _G_message=$*
+
+    func_echo_IFS=$IFS
+    IFS=$nl
+    for _G_line in $_G_message; do
+      IFS=$func_echo_IFS
+      $ECHO "$progname: $_G_line"
+    done
+    IFS=$func_echo_IFS
+}
+
+
+# func_echo_all ARG...
+# --------------------
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+    $ECHO "$*"
+}
+
+
+# func_echo_infix_1 INFIX ARG...
+# ------------------------------
+# Echo program name, followed by INFIX on the first line, with any
+# additional lines not showing INFIX.
+func_echo_infix_1 ()
+{
+    $debug_cmd
+
+    $require_term_colors
+
+    _G_infix=$1; shift
+    _G_indent=$_G_infix
+    _G_prefix="$progname: $_G_infix: "
+    _G_message=$*
+
+    # Strip color escape sequences before counting printable length
+    for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan"
+    do
+      test -n "$_G_tc" && {
+        _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"`
+        _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"`
+      }
+    done
+    _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`"  " ## exclude from sc_prohibit_nested_quotes
+
+    func_echo_infix_1_IFS=$IFS
+    IFS=$nl
+    for _G_line in $_G_message; do
+      IFS=$func_echo_infix_1_IFS
+      $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2
+      _G_prefix=$_G_indent
+    done
+    IFS=$func_echo_infix_1_IFS
+}
+
+
+# func_error ARG...
+# -----------------
+# Echo program name prefixed message to standard error.
+func_error ()
+{
+    $debug_cmd
+
+    $require_term_colors
+
+    func_echo_infix_1 "  $tc_standout${tc_red}error$tc_reset" "$*" >&2
+}
+
+
+# func_fatal_error ARG...
+# -----------------------
+# Echo program name prefixed message to standard error, and exit.
+func_fatal_error ()
+{
+    $debug_cmd
+
+    func_error "$*"
+    exit $EXIT_FAILURE
+}
+
+
+# func_grep EXPRESSION FILENAME
+# -----------------------------
+# Check whether EXPRESSION matches any line of FILENAME, without output.
+func_grep ()
+{
+    $debug_cmd
+
+    $GREP "$1" "$2" >/dev/null 2>&1
+}
+
+
+# func_len STRING
+# ---------------
+# Set func_len_result to the length of STRING. STRING may not
+# start with a hyphen.
+  test -z "$_G_HAVE_XSI_OPS" \
+    && (eval 'x=a/b/c;
+      test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \
+    && _G_HAVE_XSI_OPS=yes
+
+if test yes = "$_G_HAVE_XSI_OPS"; then
+  eval 'func_len ()
+  {
+    $debug_cmd
+
+    func_len_result=${#1}
+  }'
+else
+  func_len ()
+  {
+    $debug_cmd
+
+    func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len`
+  }
+fi
+
+
+# func_mkdir_p DIRECTORY-PATH
+# ---------------------------
+# Make sure the entire path to DIRECTORY-PATH is available.
+func_mkdir_p ()
+{
+    $debug_cmd
+
+    _G_directory_path=$1
+    _G_dir_list=
+
+    if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then
+
+      # Protect directory names starting with '-'
+      case $_G_directory_path in
+        -*) _G_directory_path=./$_G_directory_path ;;
+      esac
+
+      # While some portion of DIR does not yet exist...
+      while test ! -d "$_G_directory_path"; do
+        # ...make a list in topmost first order.  Use a colon delimited
+	# list incase some portion of path contains whitespace.
+        _G_dir_list=$_G_directory_path:$_G_dir_list
+
+        # If the last portion added has no slash in it, the list is done
+        case $_G_directory_path in */*) ;; *) break ;; esac
+
+        # ...otherwise throw away the child directory and loop
+        _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"`
+      done
+      _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'`
+
+      func_mkdir_p_IFS=$IFS; IFS=:
+      for _G_dir in $_G_dir_list; do
+	IFS=$func_mkdir_p_IFS
+        # mkdir can fail with a 'File exist' error if two processes
+        # try to create one of the directories concurrently.  Don't
+        # stop in that case!
+        $MKDIR "$_G_dir" 2>/dev/null || :
+      done
+      IFS=$func_mkdir_p_IFS
+
+      # Bail out if we (or some other process) failed to create a directory.
+      test -d "$_G_directory_path" || \
+        func_fatal_error "Failed to create '$1'"
+    fi
+}
+
+
+# func_mktempdir [BASENAME]
+# -------------------------
+# Make a temporary directory that won't clash with other running
+# libtool processes, and avoids race conditions if possible.  If
+# given, BASENAME is the basename for that directory.
+func_mktempdir ()
+{
+    $debug_cmd
+
+    _G_template=${TMPDIR-/tmp}/${1-$progname}
+
+    if test : = "$opt_dry_run"; then
+      # Return a directory name, but don't create it in dry-run mode
+      _G_tmpdir=$_G_template-$$
+    else
+
+      # If mktemp works, use that first and foremost
+      _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null`
+
+      if test ! -d "$_G_tmpdir"; then
+        # Failing that, at least try and use $RANDOM to avoid a race
+        _G_tmpdir=$_G_template-${RANDOM-0}$$
+
+        func_mktempdir_umask=`umask`
+        umask 0077
+        $MKDIR "$_G_tmpdir"
+        umask $func_mktempdir_umask
+      fi
+
+      # If we're not in dry-run mode, bomb out on failure
+      test -d "$_G_tmpdir" || \
+        func_fatal_error "cannot create temporary directory '$_G_tmpdir'"
+    fi
+
+    $ECHO "$_G_tmpdir"
+}
+
+
+# func_normal_abspath PATH
+# ------------------------
+# Remove doubled-up and trailing slashes, "." path components,
+# and cancel out any ".." path components in PATH after making
+# it an absolute path.
+func_normal_abspath ()
+{
+    $debug_cmd
+
+    # These SED scripts presuppose an absolute path with a trailing slash.
+    _G_pathcar='s|^/\([^/]*\).*$|\1|'
+    _G_pathcdr='s|^/[^/]*||'
+    _G_removedotparts=':dotsl
+		s|/\./|/|g
+		t dotsl
+		s|/\.$|/|'
+    _G_collapseslashes='s|/\{1,\}|/|g'
+    _G_finalslash='s|/*$|/|'
+
+    # Start from root dir and reassemble the path.
+    func_normal_abspath_result=
+    func_normal_abspath_tpath=$1
+    func_normal_abspath_altnamespace=
+    case $func_normal_abspath_tpath in
+      "")
+        # Empty path, that just means $cwd.
+        func_stripname '' '/' "`pwd`"
+        func_normal_abspath_result=$func_stripname_result
+        return
+        ;;
+      # The next three entries are used to spot a run of precisely
+      # two leading slashes without using negated character classes;
+      # we take advantage of case's first-match behaviour.
+      ///*)
+        # Unusual form of absolute path, do nothing.
+        ;;
+      //*)
+        # Not necessarily an ordinary path; POSIX reserves leading '//'
+        # and for example Cygwin uses it to access remote file shares
+        # over CIFS/SMB, so we conserve a leading double slash if found.
+        func_normal_abspath_altnamespace=/
+        ;;
+      /*)
+        # Absolute path, do nothing.
+        ;;
+      *)
+        # Relative path, prepend $cwd.
+        func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath
+        ;;
+    esac
+
+    # Cancel out all the simple stuff to save iterations.  We also want
+    # the path to end with a slash for ease of parsing, so make sure
+    # there is one (and only one) here.
+    func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+          -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"`
+    while :; do
+      # Processed it all yet?
+      if test / = "$func_normal_abspath_tpath"; then
+        # If we ascended to the root using ".." the result may be empty now.
+        if test -z "$func_normal_abspath_result"; then
+          func_normal_abspath_result=/
+        fi
+        break
+      fi
+      func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \
+          -e "$_G_pathcar"`
+      func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+          -e "$_G_pathcdr"`
+      # Figure out what to do with it
+      case $func_normal_abspath_tcomponent in
+        "")
+          # Trailing empty path component, ignore it.
+          ;;
+        ..)
+          # Parent dir; strip last assembled component from result.
+          func_dirname "$func_normal_abspath_result"
+          func_normal_abspath_result=$func_dirname_result
+          ;;
+        *)
+          # Actual path component, append it.
+          func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent"
+          ;;
+      esac
+    done
+    # Restore leading double-slash if one was found on entry.
+    func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result
+}
+
+
+# func_notquiet ARG...
+# --------------------
+# Echo program name prefixed message only when not in quiet mode.
+func_notquiet ()
+{
+    $debug_cmd
+
+    $opt_quiet || func_echo ${1+"$@"}
+
+    # A bug in bash halts the script if the last line of a function
+    # fails when set -e is in force, so we need another command to
+    # work around that:
+    :
+}
+
+
+# func_relative_path SRCDIR DSTDIR
+# --------------------------------
+# Set func_relative_path_result to the relative path from SRCDIR to DSTDIR.
+func_relative_path ()
+{
+    $debug_cmd
+
+    func_relative_path_result=
+    func_normal_abspath "$1"
+    func_relative_path_tlibdir=$func_normal_abspath_result
+    func_normal_abspath "$2"
+    func_relative_path_tbindir=$func_normal_abspath_result
+
+    # Ascend the tree starting from libdir
+    while :; do
+      # check if we have found a prefix of bindir
+      case $func_relative_path_tbindir in
+        $func_relative_path_tlibdir)
+          # found an exact match
+          func_relative_path_tcancelled=
+          break
+          ;;
+        $func_relative_path_tlibdir*)
+          # found a matching prefix
+          func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir"
+          func_relative_path_tcancelled=$func_stripname_result
+          if test -z "$func_relative_path_result"; then
+            func_relative_path_result=.
+          fi
+          break
+          ;;
+        *)
+          func_dirname $func_relative_path_tlibdir
+          func_relative_path_tlibdir=$func_dirname_result
+          if test -z "$func_relative_path_tlibdir"; then
+            # Have to descend all the way to the root!
+            func_relative_path_result=../$func_relative_path_result
+            func_relative_path_tcancelled=$func_relative_path_tbindir
+            break
+          fi
+          func_relative_path_result=../$func_relative_path_result
+          ;;
+      esac
+    done
+
+    # Now calculate path; take care to avoid doubling-up slashes.
+    func_stripname '' '/' "$func_relative_path_result"
+    func_relative_path_result=$func_stripname_result
+    func_stripname '/' '/' "$func_relative_path_tcancelled"
+    if test -n "$func_stripname_result"; then
+      func_append func_relative_path_result "/$func_stripname_result"
+    fi
+
+    # Normalisation. If bindir is libdir, return '.' else relative path.
+    if test -n "$func_relative_path_result"; then
+      func_stripname './' '' "$func_relative_path_result"
+      func_relative_path_result=$func_stripname_result
+    fi
+
+    test -n "$func_relative_path_result" || func_relative_path_result=.
+
+    :
+}
+
+
+# func_quote_for_eval ARG...
+# --------------------------
+# Aesthetically quote ARGs to be evaled later.
+# This function returns two values:
+#   i) func_quote_for_eval_result
+#      double-quoted, suitable for a subsequent eval
+#  ii) func_quote_for_eval_unquoted_result
+#      has all characters that are still active within double
+#      quotes backslashified.
+func_quote_for_eval ()
+{
+    $debug_cmd
+
+    func_quote_for_eval_unquoted_result=
+    func_quote_for_eval_result=
+    while test 0 -lt $#; do
+      case $1 in
+        *[\\\`\"\$]*)
+	  _G_unquoted_arg=`printf '%s\n' "$1" |$SED "$sed_quote_subst"` ;;
+        *)
+          _G_unquoted_arg=$1 ;;
+      esac
+      if test -n "$func_quote_for_eval_unquoted_result"; then
+	func_append func_quote_for_eval_unquoted_result " $_G_unquoted_arg"
+      else
+        func_append func_quote_for_eval_unquoted_result "$_G_unquoted_arg"
+      fi
+
+      case $_G_unquoted_arg in
+        # Double-quote args containing shell metacharacters to delay
+        # word splitting, command substitution and variable expansion
+        # for a subsequent eval.
+        # Many Bourne shells cannot handle close brackets correctly
+        # in scan sets, so we specify it separately.
+        *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
+          _G_quoted_arg=\"$_G_unquoted_arg\"
+          ;;
+        *)
+          _G_quoted_arg=$_G_unquoted_arg
+	  ;;
+      esac
+
+      if test -n "$func_quote_for_eval_result"; then
+	func_append func_quote_for_eval_result " $_G_quoted_arg"
+      else
+        func_append func_quote_for_eval_result "$_G_quoted_arg"
+      fi
+      shift
+    done
+}
+
+
+# func_quote_for_expand ARG
+# -------------------------
+# Aesthetically quote ARG to be evaled later; same as above,
+# but do not quote variable references.
+func_quote_for_expand ()
+{
+    $debug_cmd
+
+    case $1 in
+      *[\\\`\"]*)
+	_G_arg=`$ECHO "$1" | $SED \
+	    -e "$sed_double_quote_subst" -e "$sed_double_backslash"` ;;
+      *)
+        _G_arg=$1 ;;
+    esac
+
+    case $_G_arg in
+      # Double-quote args containing shell metacharacters to delay
+      # word splitting and command substitution for a subsequent eval.
+      # Many Bourne shells cannot handle close brackets correctly
+      # in scan sets, so we specify it separately.
+      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
+        _G_arg=\"$_G_arg\"
+        ;;
+    esac
+
+    func_quote_for_expand_result=$_G_arg
+}
+
+
+# func_stripname PREFIX SUFFIX NAME
+# ---------------------------------
+# strip PREFIX and SUFFIX from NAME, and store in func_stripname_result.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+if test yes = "$_G_HAVE_XSI_OPS"; then
+  eval 'func_stripname ()
+  {
+    $debug_cmd
+
+    # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
+    # positional parameters, so assign one to ordinary variable first.
+    func_stripname_result=$3
+    func_stripname_result=${func_stripname_result#"$1"}
+    func_stripname_result=${func_stripname_result%"$2"}
+  }'
+else
+  func_stripname ()
+  {
+    $debug_cmd
+
+    case $2 in
+      .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;;
+      *)  func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;;
+    esac
+  }
+fi
+
+
+# func_show_eval CMD [FAIL_EXP]
+# -----------------------------
+# Unless opt_quiet is true, then output CMD.  Then, if opt_dryrun is
+# not true, evaluate CMD.  If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it.
+func_show_eval ()
+{
+    $debug_cmd
+
+    _G_cmd=$1
+    _G_fail_exp=${2-':'}
+
+    func_quote_for_expand "$_G_cmd"
+    eval "func_notquiet $func_quote_for_expand_result"
+
+    $opt_dry_run || {
+      eval "$_G_cmd"
+      _G_status=$?
+      if test 0 -ne "$_G_status"; then
+	eval "(exit $_G_status); $_G_fail_exp"
+      fi
+    }
+}
+
+
+# func_show_eval_locale CMD [FAIL_EXP]
+# ------------------------------------
+# Unless opt_quiet is true, then output CMD.  Then, if opt_dryrun is
+# not true, evaluate CMD.  If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it.  Use the saved locale for evaluation.
+func_show_eval_locale ()
+{
+    $debug_cmd
+
+    _G_cmd=$1
+    _G_fail_exp=${2-':'}
+
+    $opt_quiet || {
+      func_quote_for_expand "$_G_cmd"
+      eval "func_echo $func_quote_for_expand_result"
+    }
+
+    $opt_dry_run || {
+      eval "$_G_user_locale
+	    $_G_cmd"
+      _G_status=$?
+      eval "$_G_safe_locale"
+      if test 0 -ne "$_G_status"; then
+	eval "(exit $_G_status); $_G_fail_exp"
+      fi
+    }
+}
+
+
+# func_tr_sh
+# ----------
+# Turn $1 into a string suitable for a shell variable name.
+# Result is stored in $func_tr_sh_result.  All characters
+# not in the set a-zA-Z0-9_ are replaced with '_'. Further,
+# if $1 begins with a digit, a '_' is prepended as well.
+func_tr_sh ()
+{
+    $debug_cmd
+
+    case $1 in
+    [0-9]* | *[!a-zA-Z0-9_]*)
+      func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'`
+      ;;
+    * )
+      func_tr_sh_result=$1
+      ;;
+    esac
+}
+
+
+# func_verbose ARG...
+# -------------------
+# Echo program name prefixed message in verbose mode only.
+func_verbose ()
+{
+    $debug_cmd
+
+    $opt_verbose && func_echo "$*"
+
+    :
+}
+
+
+# func_warn_and_continue ARG...
+# -----------------------------
+# Echo program name prefixed warning message to standard error.
+func_warn_and_continue ()
+{
+    $debug_cmd
+
+    $require_term_colors
+
+    func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2
+}
+
+
+# func_warning CATEGORY ARG...
+# ----------------------------
+# Echo program name prefixed warning message to standard error. Warning
+# messages can be filtered according to CATEGORY, where this function
+# elides messages where CATEGORY is not listed in the global variable
+# 'opt_warning_types'.
+func_warning ()
+{
+    $debug_cmd
+
+    # CATEGORY must be in the warning_categories list!
+    case " $warning_categories " in
+      *" $1 "*) ;;
+      *) func_internal_error "invalid warning category '$1'" ;;
+    esac
+
+    _G_category=$1
+    shift
+
+    case " $opt_warning_types " in
+      *" $_G_category "*) $warning_func ${1+"$@"} ;;
+    esac
+}
+
+
+# func_sort_ver VER1 VER2
+# -----------------------
+# 'sort -V' is not generally available.
+# Note this deviates from the version comparison in automake
+# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a
+# but this should suffice as we won't be specifying old
+# version formats or redundant trailing .0 in bootstrap.conf.
+# If we did want full compatibility then we should probably
+# use m4_version_compare from autoconf.
+func_sort_ver ()
+{
+    $debug_cmd
+
+    printf '%s\n%s\n' "$1" "$2" \
+      | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n
+}
+
+# func_lt_ver PREV CURR
+# ---------------------
+# Return true if PREV and CURR are in the correct order according to
+# func_sort_ver, otherwise false.  Use it like this:
+#
+#  func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..."
+func_lt_ver ()
+{
+    $debug_cmd
+
+    test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q`
+}
+
+
+# Local variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC"
+# time-stamp-time-zone: "UTC"
+# End:
+#! /bin/sh
+
+# Set a version string for this script.
+scriptversion=2014-01-07.03; # UTC
+
+# A portable, pluggable option parser for Bourne shell.
+# Written by Gary V. Vaughan, 2010
+
+# Copyright (C) 2010-2015 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Please report bugs or propose patches to gary@gnu.org.
+
+
+## ------ ##
+## Usage. ##
+## ------ ##
+
+# This file is a library for parsing options in your shell scripts along
+# with assorted other useful supporting features that you can make use
+# of too.
+#
+# For the simplest scripts you might need only:
+#
+#   #!/bin/sh
+#   . relative/path/to/funclib.sh
+#   . relative/path/to/options-parser
+#   scriptversion=1.0
+#   func_options ${1+"$@"}
+#   eval set dummy "$func_options_result"; shift
+#   ...rest of your script...
+#
+# In order for the '--version' option to work, you will need to have a
+# suitably formatted comment like the one at the top of this file
+# starting with '# Written by ' and ending with '# warranty; '.
+#
+# For '-h' and '--help' to work, you will also need a one line
+# description of your script's purpose in a comment directly above the
+# '# Written by ' line, like the one at the top of this file.
+#
+# The default options also support '--debug', which will turn on shell
+# execution tracing (see the comment above debug_cmd below for another
+# use), and '--verbose' and the func_verbose function to allow your script
+# to display verbose messages only when your user has specified
+# '--verbose'.
+#
+# After sourcing this file, you can plug processing for additional
+# options by amending the variables from the 'Configuration' section
+# below, and following the instructions in the 'Option parsing'
+# section further down.
+
+## -------------- ##
+## Configuration. ##
+## -------------- ##
+
+# You should override these variables in your script after sourcing this
+# file so that they reflect the customisations you have added to the
+# option parser.
+
+# The usage line for option parsing errors and the start of '-h' and
+# '--help' output messages. You can embed shell variables for delayed
+# expansion at the time the message is displayed, but you will need to
+# quote other shell meta-characters carefully to prevent them being
+# expanded when the contents are evaled.
+usage='$progpath [OPTION]...'
+
+# Short help message in response to '-h' and '--help'.  Add to this or
+# override it after sourcing this library to reflect the full set of
+# options your script accepts.
+usage_message="\
+       --debug        enable verbose shell tracing
+   -W, --warnings=CATEGORY
+                      report the warnings falling in CATEGORY [all]
+   -v, --verbose      verbosely report processing
+       --version      print version information and exit
+   -h, --help         print short or long help message and exit
+"
+
+# Additional text appended to 'usage_message' in response to '--help'.
+long_help_message="
+Warning categories include:
+       'all'          show all warnings
+       'none'         turn off all the warnings
+       'error'        warnings are treated as fatal errors"
+
+# Help message printed before fatal option parsing errors.
+fatal_help="Try '\$progname --help' for more information."
+
+
+
+## ------------------------- ##
+## Hook function management. ##
+## ------------------------- ##
+
+# This section contains functions for adding, removing, and running hooks
+# to the main code.  A hook is just a named list of of function, that can
+# be run in order later on.
+
+# func_hookable FUNC_NAME
+# -----------------------
+# Declare that FUNC_NAME will run hooks added with
+# 'func_add_hook FUNC_NAME ...'.
+func_hookable ()
+{
+    $debug_cmd
+
+    func_append hookable_fns " $1"
+}
+
+
+# func_add_hook FUNC_NAME HOOK_FUNC
+# ---------------------------------
+# Request that FUNC_NAME call HOOK_FUNC before it returns.  FUNC_NAME must
+# first have been declared "hookable" by a call to 'func_hookable'.
+func_add_hook ()
+{
+    $debug_cmd
+
+    case " $hookable_fns " in
+      *" $1 "*) ;;
+      *) func_fatal_error "'$1' does not accept hook functions." ;;
+    esac
+
+    eval func_append ${1}_hooks '" $2"'
+}
+
+
+# func_remove_hook FUNC_NAME HOOK_FUNC
+# ------------------------------------
+# Remove HOOK_FUNC from the list of functions called by FUNC_NAME.
+func_remove_hook ()
+{
+    $debug_cmd
+
+    eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`'
+}
+
+
+# func_run_hooks FUNC_NAME [ARG]...
+# ---------------------------------
+# Run all hook functions registered to FUNC_NAME.
+# It is assumed that the list of hook functions contains nothing more
+# than a whitespace-delimited list of legal shell function names, and
+# no effort is wasted trying to catch shell meta-characters or preserve
+# whitespace.
+func_run_hooks ()
+{
+    $debug_cmd
+
+    case " $hookable_fns " in
+      *" $1 "*) ;;
+      *) func_fatal_error "'$1' does not support hook funcions.n" ;;
+    esac
+
+    eval _G_hook_fns=\$$1_hooks; shift
+
+    for _G_hook in $_G_hook_fns; do
+      eval $_G_hook '"$@"'
+
+      # store returned options list back into positional
+      # parameters for next 'cmd' execution.
+      eval _G_hook_result=\$${_G_hook}_result
+      eval set dummy "$_G_hook_result"; shift
+    done
+
+    func_quote_for_eval ${1+"$@"}
+    func_run_hooks_result=$func_quote_for_eval_result
+}
+
+
+
+## --------------- ##
+## Option parsing. ##
+## --------------- ##
+
+# In order to add your own option parsing hooks, you must accept the
+# full positional parameter list in your hook function, remove any
+# options that you action, and then pass back the remaining unprocessed
+# options in '<hooked_function_name>_result', escaped suitably for
+# 'eval'.  Like this:
+#
+#    my_options_prep ()
+#    {
+#        $debug_cmd
+#
+#        # Extend the existing usage message.
+#        usage_message=$usage_message'
+#      -s, --silent       don'\''t print informational messages
+#    '
+#
+#        func_quote_for_eval ${1+"$@"}
+#        my_options_prep_result=$func_quote_for_eval_result
+#    }
+#    func_add_hook func_options_prep my_options_prep
+#
+#
+#    my_silent_option ()
+#    {
+#        $debug_cmd
+#
+#        # Note that for efficiency, we parse as many options as we can
+#        # recognise in a loop before passing the remainder back to the
+#        # caller on the first unrecognised argument we encounter.
+#        while test $# -gt 0; do
+#          opt=$1; shift
+#          case $opt in
+#            --silent|-s) opt_silent=: ;;
+#            # Separate non-argument short options:
+#            -s*)         func_split_short_opt "$_G_opt"
+#                         set dummy "$func_split_short_opt_name" \
+#                             "-$func_split_short_opt_arg" ${1+"$@"}
+#                         shift
+#                         ;;
+#            *)            set dummy "$_G_opt" "$*"; shift; break ;;
+#          esac
+#        done
+#
+#        func_quote_for_eval ${1+"$@"}
+#        my_silent_option_result=$func_quote_for_eval_result
+#    }
+#    func_add_hook func_parse_options my_silent_option
+#
+#
+#    my_option_validation ()
+#    {
+#        $debug_cmd
+#
+#        $opt_silent && $opt_verbose && func_fatal_help "\
+#    '--silent' and '--verbose' options are mutually exclusive."
+#
+#        func_quote_for_eval ${1+"$@"}
+#        my_option_validation_result=$func_quote_for_eval_result
+#    }
+#    func_add_hook func_validate_options my_option_validation
+#
+# You'll alse need to manually amend $usage_message to reflect the extra
+# options you parse.  It's preferable to append if you can, so that
+# multiple option parsing hooks can be added safely.
+
+
+# func_options [ARG]...
+# ---------------------
+# All the functions called inside func_options are hookable. See the
+# individual implementations for details.
+func_hookable func_options
+func_options ()
+{
+    $debug_cmd
+
+    func_options_prep ${1+"$@"}
+    eval func_parse_options \
+        ${func_options_prep_result+"$func_options_prep_result"}
+    eval func_validate_options \
+        ${func_parse_options_result+"$func_parse_options_result"}
+
+    eval func_run_hooks func_options \
+        ${func_validate_options_result+"$func_validate_options_result"}
+
+    # save modified positional parameters for caller
+    func_options_result=$func_run_hooks_result
+}
+
+
+# func_options_prep [ARG]...
+# --------------------------
+# All initialisations required before starting the option parse loop.
+# Note that when calling hook functions, we pass through the list of
+# positional parameters.  If a hook function modifies that list, and
+# needs to propogate that back to rest of this script, then the complete
+# modified list must be put in 'func_run_hooks_result' before
+# returning.
+func_hookable func_options_prep
+func_options_prep ()
+{
+    $debug_cmd
+
+    # Option defaults:
+    opt_verbose=false
+    opt_warning_types=
+
+    func_run_hooks func_options_prep ${1+"$@"}
+
+    # save modified positional parameters for caller
+    func_options_prep_result=$func_run_hooks_result
+}
+
+
+# func_parse_options [ARG]...
+# ---------------------------
+# The main option parsing loop.
+func_hookable func_parse_options
+func_parse_options ()
+{
+    $debug_cmd
+
+    func_parse_options_result=
+
+    # this just eases exit handling
+    while test $# -gt 0; do
+      # Defer to hook functions for initial option parsing, so they
+      # get priority in the event of reusing an option name.
+      func_run_hooks func_parse_options ${1+"$@"}
+
+      # Adjust func_parse_options positional parameters to match
+      eval set dummy "$func_run_hooks_result"; shift
+
+      # Break out of the loop if we already parsed every option.
+      test $# -gt 0 || break
+
+      _G_opt=$1
+      shift
+      case $_G_opt in
+        --debug|-x)   debug_cmd='set -x'
+                      func_echo "enabling shell trace mode"
+                      $debug_cmd
+                      ;;
+
+        --no-warnings|--no-warning|--no-warn)
+                      set dummy --warnings none ${1+"$@"}
+                      shift
+		      ;;
+
+        --warnings|--warning|-W)
+                      test $# = 0 && func_missing_arg $_G_opt && break
+                      case " $warning_categories $1" in
+                        *" $1 "*)
+                          # trailing space prevents matching last $1 above
+                          func_append_uniq opt_warning_types " $1"
+                          ;;
+                        *all)
+                          opt_warning_types=$warning_categories
+                          ;;
+                        *none)
+                          opt_warning_types=none
+                          warning_func=:
+                          ;;
+                        *error)
+                          opt_warning_types=$warning_categories
+                          warning_func=func_fatal_error
+                          ;;
+                        *)
+                          func_fatal_error \
+                             "unsupported warning category: '$1'"
+                          ;;
+                      esac
+                      shift
+                      ;;
+
+        --verbose|-v) opt_verbose=: ;;
+        --version)    func_version ;;
+        -\?|-h)       func_usage ;;
+        --help)       func_help ;;
+
+	# Separate optargs to long options (plugins may need this):
+	--*=*)        func_split_equals "$_G_opt"
+	              set dummy "$func_split_equals_lhs" \
+                          "$func_split_equals_rhs" ${1+"$@"}
+                      shift
+                      ;;
+
+       # Separate optargs to short options:
+        -W*)
+                      func_split_short_opt "$_G_opt"
+                      set dummy "$func_split_short_opt_name" \
+                          "$func_split_short_opt_arg" ${1+"$@"}
+                      shift
+                      ;;
+
+        # Separate non-argument short options:
+        -\?*|-h*|-v*|-x*)
+                      func_split_short_opt "$_G_opt"
+                      set dummy "$func_split_short_opt_name" \
+                          "-$func_split_short_opt_arg" ${1+"$@"}
+                      shift
+                      ;;
+
+        --)           break ;;
+        -*)           func_fatal_help "unrecognised option: '$_G_opt'" ;;
+        *)            set dummy "$_G_opt" ${1+"$@"}; shift; break ;;
+      esac
+    done
+
+    # save modified positional parameters for caller
+    func_quote_for_eval ${1+"$@"}
+    func_parse_options_result=$func_quote_for_eval_result
+}
+
+
+# func_validate_options [ARG]...
+# ------------------------------
+# Perform any sanity checks on option settings and/or unconsumed
+# arguments.
+func_hookable func_validate_options
+func_validate_options ()
+{
+    $debug_cmd
+
+    # Display all warnings if -W was not given.
+    test -n "$opt_warning_types" || opt_warning_types=" $warning_categories"
+
+    func_run_hooks func_validate_options ${1+"$@"}
+
+    # Bail if the options were screwed!
+    $exit_cmd $EXIT_FAILURE
+
+    # save modified positional parameters for caller
+    func_validate_options_result=$func_run_hooks_result
+}
+
+
+
+## ----------------- ##
+## Helper functions. ##
+## ----------------- ##
+
+# This section contains the helper functions used by the rest of the
+# hookable option parser framework in ascii-betical order.
+
+
+# func_fatal_help ARG...
+# ----------------------
+# Echo program name prefixed message to standard error, followed by
+# a help hint, and exit.
+func_fatal_help ()
+{
+    $debug_cmd
+
+    eval \$ECHO \""Usage: $usage"\"
+    eval \$ECHO \""$fatal_help"\"
+    func_error ${1+"$@"}
+    exit $EXIT_FAILURE
+}
+
+
+# func_help
+# ---------
+# Echo long help message to standard output and exit.
+func_help ()
+{
+    $debug_cmd
+
+    func_usage_message
+    $ECHO "$long_help_message"
+    exit 0
+}
+
+
+# func_missing_arg ARGNAME
+# ------------------------
+# Echo program name prefixed message to standard error and set global
+# exit_cmd.
+func_missing_arg ()
+{
+    $debug_cmd
+
+    func_error "Missing argument for '$1'."
+    exit_cmd=exit
+}
+
+
+# func_split_equals STRING
+# ------------------------
+# Set func_split_equals_lhs and func_split_equals_rhs shell variables after
+# splitting STRING at the '=' sign.
+test -z "$_G_HAVE_XSI_OPS" \
+    && (eval 'x=a/b/c;
+      test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \
+    && _G_HAVE_XSI_OPS=yes
+
+if test yes = "$_G_HAVE_XSI_OPS"
+then
+  # This is an XSI compatible shell, allowing a faster implementation...
+  eval 'func_split_equals ()
+  {
+      $debug_cmd
+
+      func_split_equals_lhs=${1%%=*}
+      func_split_equals_rhs=${1#*=}
+      test "x$func_split_equals_lhs" = "x$1" \
+        && func_split_equals_rhs=
+  }'
+else
+  # ...otherwise fall back to using expr, which is often a shell builtin.
+  func_split_equals ()
+  {
+      $debug_cmd
+
+      func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'`
+      func_split_equals_rhs=
+      test "x$func_split_equals_lhs" = "x$1" \
+        || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'`
+  }
+fi #func_split_equals
+
+
+# func_split_short_opt SHORTOPT
+# -----------------------------
+# Set func_split_short_opt_name and func_split_short_opt_arg shell
+# variables after splitting SHORTOPT after the 2nd character.
+if test yes = "$_G_HAVE_XSI_OPS"
+then
+  # This is an XSI compatible shell, allowing a faster implementation...
+  eval 'func_split_short_opt ()
+  {
+      $debug_cmd
+
+      func_split_short_opt_arg=${1#??}
+      func_split_short_opt_name=${1%"$func_split_short_opt_arg"}
+  }'
+else
+  # ...otherwise fall back to using expr, which is often a shell builtin.
+  func_split_short_opt ()
+  {
+      $debug_cmd
+
+      func_split_short_opt_name=`expr "x$1" : 'x-\(.\)'`
+      func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'`
+  }
+fi #func_split_short_opt
+
+
+# func_usage
+# ----------
+# Echo short help message to standard output and exit.
+func_usage ()
+{
+    $debug_cmd
+
+    func_usage_message
+    $ECHO "Run '$progname --help |${PAGER-more}' for full usage"
+    exit 0
+}
+
+
+# func_usage_message
+# ------------------
+# Echo short help message to standard output.
+func_usage_message ()
+{
+    $debug_cmd
+
+    eval \$ECHO \""Usage: $usage"\"
+    echo
+    $SED -n 's|^# ||
+        /^Written by/{
+          x;p;x
+        }
+	h
+	/^Written by/q' < "$progpath"
+    echo
+    eval \$ECHO \""$usage_message"\"
+}
+
+
+# func_version
+# ------------
+# Echo version message to standard output and exit.
+func_version ()
+{
+    $debug_cmd
+
+    printf '%s\n' "$progname $scriptversion"
+    $SED -n '
+        /(C)/!b go
+        :more
+        /\./!{
+          N
+          s|\n# | |
+          b more
+        }
+        :go
+        /^# Written by /,/# warranty; / {
+          s|^# ||
+          s|^# *$||
+          s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2|
+          p
+        }
+        /^# Written by / {
+          s|^# ||
+          p
+        }
+        /^warranty; /q' < "$progpath"
+
+    exit $?
+}
+
+
+# Local variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC"
+# time-stamp-time-zone: "UTC"
+# End:
+
+# Set a version string.
+scriptversion='(GNU libtool) 2.4.6'
+
+
+# func_echo ARG...
+# ----------------
+# Libtool also displays the current mode in messages, so override
+# funclib.sh func_echo with this custom definition.
+func_echo ()
+{
+    $debug_cmd
+
+    _G_message=$*
+
+    func_echo_IFS=$IFS
+    IFS=$nl
+    for _G_line in $_G_message; do
+      IFS=$func_echo_IFS
+      $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line"
+    done
+    IFS=$func_echo_IFS
+}
+
+
+# func_warning ARG...
+# -------------------
+# Libtool warnings are not categorized, so override funclib.sh
+# func_warning with this simpler definition.
+func_warning ()
+{
+    $debug_cmd
+
+    $warning_func ${1+"$@"}
+}
+
+
+## ---------------- ##
+## Options parsing. ##
+## ---------------- ##
+
+# Hook in the functions to make sure our own options are parsed during
+# the option parsing loop.
+
+usage='$progpath [OPTION]... [MODE-ARG]...'
+
+# Short help message in response to '-h'.
+usage_message="Options:
+       --config             show all configuration variables
+       --debug              enable verbose shell tracing
+   -n, --dry-run            display commands without modifying any files
+       --features           display basic configuration information and exit
+       --mode=MODE          use operation mode MODE
+       --no-warnings        equivalent to '-Wnone'
+       --preserve-dup-deps  don't remove duplicate dependency libraries
+       --quiet, --silent    don't print informational messages
+       --tag=TAG            use configuration variables from tag TAG
+   -v, --verbose            print more informational messages than default
+       --version            print version information
+   -W, --warnings=CATEGORY  report the warnings falling in CATEGORY [all]
+   -h, --help, --help-all   print short, long, or detailed help message
+"
+
+# Additional text appended to 'usage_message' in response to '--help'.
+func_help ()
+{
+    $debug_cmd
+
+    func_usage_message
+    $ECHO "$long_help_message
+
+MODE must be one of the following:
+
+       clean           remove files from the build directory
+       compile         compile a source file into a libtool object
+       execute         automatically set library path, then run a program
+       finish          complete the installation of libtool libraries
+       install         install libraries or executables
+       link            create a library or an executable
+       uninstall       remove libraries from an installed directory
+
+MODE-ARGS vary depending on the MODE.  When passed as first option,
+'--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that.
+Try '$progname --help --mode=MODE' for a more detailed description of MODE.
+
+When reporting a bug, please describe a test case to reproduce it and
+include the following information:
+
+       host-triplet:   $host
+       shell:          $SHELL
+       compiler:       $LTCC
+       compiler flags: $LTCFLAGS
+       linker:         $LD (gnu? $with_gnu_ld)
+       version:        $progname (GNU libtool) 2.4.6
+       automake:       `($AUTOMAKE --version) 2>/dev/null |$SED 1q`
+       autoconf:       `($AUTOCONF --version) 2>/dev/null |$SED 1q`
+
+Report bugs to <bug-libtool@gnu.org>.
+GNU libtool home page: <http://www.gnu.org/software/libtool/>.
+General help using GNU software: <http://www.gnu.org/gethelp/>."
+    exit 0
+}
+
+
+# func_lo2o OBJECT-NAME
+# ---------------------
+# Transform OBJECT-NAME from a '.lo' suffix to the platform specific
+# object suffix.
+
+lo2o=s/\\.lo\$/.$objext/
+o2lo=s/\\.$objext\$/.lo/
+
+if test yes = "$_G_HAVE_XSI_OPS"; then
+  eval 'func_lo2o ()
+  {
+    case $1 in
+      *.lo) func_lo2o_result=${1%.lo}.$objext ;;
+      *   ) func_lo2o_result=$1               ;;
+    esac
+  }'
+
+  # func_xform LIBOBJ-OR-SOURCE
+  # ---------------------------
+  # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise)
+  # suffix to a '.lo' libtool-object suffix.
+  eval 'func_xform ()
+  {
+    func_xform_result=${1%.*}.lo
+  }'
+else
+  # ...otherwise fall back to using sed.
+  func_lo2o ()
+  {
+    func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"`
+  }
+
+  func_xform ()
+  {
+    func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'`
+  }
+fi
+
+
+# func_fatal_configuration ARG...
+# -------------------------------
+# Echo program name prefixed message to standard error, followed by
+# a configuration failure hint, and exit.
+func_fatal_configuration ()
+{
+    func__fatal_error ${1+"$@"} \
+      "See the $PACKAGE documentation for more information." \
+      "Fatal configuration error."
+}
+
+
+# func_config
+# -----------
+# Display the configuration for all the tags in this script.
+func_config ()
+{
+    re_begincf='^# ### BEGIN LIBTOOL'
+    re_endcf='^# ### END LIBTOOL'
+
+    # Default configuration.
+    $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath"
+
+    # Now print the configurations for the tags.
+    for tagname in $taglist; do
+      $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath"
+    done
+
+    exit $?
+}
+
+
+# func_features
+# -------------
+# Display the features supported by this script.
+func_features ()
+{
+    echo "host: $host"
+    if test yes = "$build_libtool_libs"; then
+      echo "enable shared libraries"
+    else
+      echo "disable shared libraries"
+    fi
+    if test yes = "$build_old_libs"; then
+      echo "enable static libraries"
+    else
+      echo "disable static libraries"
+    fi
+
+    exit $?
+}
+
+
+# func_enable_tag TAGNAME
+# -----------------------
+# Verify that TAGNAME is valid, and either flag an error and exit, or
+# enable the TAGNAME tag.  We also add TAGNAME to the global $taglist
+# variable here.
+func_enable_tag ()
+{
+    # Global variable:
+    tagname=$1
+
+    re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$"
+    re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$"
+    sed_extractcf=/$re_begincf/,/$re_endcf/p
+
+    # Validate tagname.
+    case $tagname in
+      *[!-_A-Za-z0-9,/]*)
+        func_fatal_error "invalid tag name: $tagname"
+        ;;
+    esac
+
+    # Don't test for the "default" C tag, as we know it's
+    # there but not specially marked.
+    case $tagname in
+        CC) ;;
+    *)
+        if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then
+	  taglist="$taglist $tagname"
+
+	  # Evaluate the configuration.  Be careful to quote the path
+	  # and the sed script, to avoid splitting on whitespace, but
+	  # also don't use non-portable quotes within backquotes within
+	  # quotes we have to do it in 2 steps:
+	  extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"`
+	  eval "$extractedcf"
+        else
+	  func_error "ignoring unknown tag $tagname"
+        fi
+        ;;
+    esac
+}
+
+
+# func_check_version_match
+# ------------------------
+# Ensure that we are using m4 macros, and libtool script from the same
+# release of libtool.
+func_check_version_match ()
+{
+    if test "$package_revision" != "$macro_revision"; then
+      if test "$VERSION" != "$macro_version"; then
+        if test -z "$macro_version"; then
+          cat >&2 <<_LT_EOF
+$progname: Version mismatch error.  This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from an older release.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+        else
+          cat >&2 <<_LT_EOF
+$progname: Version mismatch error.  This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from $PACKAGE $macro_version.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+        fi
+      else
+        cat >&2 <<_LT_EOF
+$progname: Version mismatch error.  This is $PACKAGE $VERSION, revision $package_revision,
+$progname: but the definition of this LT_INIT comes from revision $macro_revision.
+$progname: You should recreate aclocal.m4 with macros from revision $package_revision
+$progname: of $PACKAGE $VERSION and run autoconf again.
+_LT_EOF
+      fi
+
+      exit $EXIT_MISMATCH
+    fi
+}
+
+
+# libtool_options_prep [ARG]...
+# -----------------------------
+# Preparation for options parsed by libtool.
+libtool_options_prep ()
+{
+    $debug_mode
+
+    # Option defaults:
+    opt_config=false
+    opt_dlopen=
+    opt_dry_run=false
+    opt_help=false
+    opt_mode=
+    opt_preserve_dup_deps=false
+    opt_quiet=false
+
+    nonopt=
+    preserve_args=
+
+    # Shorthand for --mode=foo, only valid as the first argument
+    case $1 in
+    clean|clea|cle|cl)
+      shift; set dummy --mode clean ${1+"$@"}; shift
+      ;;
+    compile|compil|compi|comp|com|co|c)
+      shift; set dummy --mode compile ${1+"$@"}; shift
+      ;;
+    execute|execut|execu|exec|exe|ex|e)
+      shift; set dummy --mode execute ${1+"$@"}; shift
+      ;;
+    finish|finis|fini|fin|fi|f)
+      shift; set dummy --mode finish ${1+"$@"}; shift
+      ;;
+    install|instal|insta|inst|ins|in|i)
+      shift; set dummy --mode install ${1+"$@"}; shift
+      ;;
+    link|lin|li|l)
+      shift; set dummy --mode link ${1+"$@"}; shift
+      ;;
+    uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u)
+      shift; set dummy --mode uninstall ${1+"$@"}; shift
+      ;;
+    esac
+
+    # Pass back the list of options.
+    func_quote_for_eval ${1+"$@"}
+    libtool_options_prep_result=$func_quote_for_eval_result
+}
+func_add_hook func_options_prep libtool_options_prep
+
+
+# libtool_parse_options [ARG]...
+# ---------------------------------
+# Provide handling for libtool specific options.
+libtool_parse_options ()
+{
+    $debug_cmd
+
+    # Perform our own loop to consume as many options as possible in
+    # each iteration.
+    while test $# -gt 0; do
+      _G_opt=$1
+      shift
+      case $_G_opt in
+        --dry-run|--dryrun|-n)
+                        opt_dry_run=:
+                        ;;
+
+        --config)       func_config ;;
+
+        --dlopen|-dlopen)
+                        opt_dlopen="${opt_dlopen+$opt_dlopen
+}$1"
+                        shift
+                        ;;
+
+        --preserve-dup-deps)
+                        opt_preserve_dup_deps=: ;;
+
+        --features)     func_features ;;
+
+        --finish)       set dummy --mode finish ${1+"$@"}; shift ;;
+
+        --help)         opt_help=: ;;
+
+        --help-all)     opt_help=': help-all' ;;
+
+        --mode)         test $# = 0 && func_missing_arg $_G_opt && break
+                        opt_mode=$1
+                        case $1 in
+                          # Valid mode arguments:
+                          clean|compile|execute|finish|install|link|relink|uninstall) ;;
+
+                          # Catch anything else as an error
+                          *) func_error "invalid argument for $_G_opt"
+                             exit_cmd=exit
+                             break
+                             ;;
+                        esac
+                        shift
+                        ;;
+
+        --no-silent|--no-quiet)
+                        opt_quiet=false
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+        --no-warnings|--no-warning|--no-warn)
+                        opt_warning=false
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+        --no-verbose)
+                        opt_verbose=false
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+        --silent|--quiet)
+                        opt_quiet=:
+                        opt_verbose=false
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+        --tag)          test $# = 0 && func_missing_arg $_G_opt && break
+                        opt_tag=$1
+                        func_append preserve_args " $_G_opt $1"
+                        func_enable_tag "$1"
+                        shift
+                        ;;
+
+        --verbose|-v)   opt_quiet=false
+                        opt_verbose=:
+                        func_append preserve_args " $_G_opt"
+                        ;;
+
+	# An option not handled by this hook function:
+        *)		set dummy "$_G_opt" ${1+"$@"};	shift; break  ;;
+      esac
+    done
+
+
+    # save modified positional parameters for caller
+    func_quote_for_eval ${1+"$@"}
+    libtool_parse_options_result=$func_quote_for_eval_result
+}
+func_add_hook func_parse_options libtool_parse_options
+
+
+
+# libtool_validate_options [ARG]...
+# ---------------------------------
+# Perform any sanity checks on option settings and/or unconsumed
+# arguments.
+libtool_validate_options ()
+{
+    # save first non-option argument
+    if test 0 -lt $#; then
+      nonopt=$1
+      shift
+    fi
+
+    # preserve --debug
+    test : = "$debug_cmd" || func_append preserve_args " --debug"
+
+    case $host in
+      # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452
+      # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788
+      *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*)
+        # don't eliminate duplications in $postdeps and $predeps
+        opt_duplicate_compiler_generated_deps=:
+        ;;
+      *)
+        opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps
+        ;;
+    esac
+
+    $opt_help || {
+      # Sanity checks first:
+      func_check_version_match
+
+      test yes != "$build_libtool_libs" \
+        && test yes != "$build_old_libs" \
+        && func_fatal_configuration "not configured to build any kind of library"
+
+      # Darwin sucks
+      eval std_shrext=\"$shrext_cmds\"
+
+      # Only execute mode is allowed to have -dlopen flags.
+      if test -n "$opt_dlopen" && test execute != "$opt_mode"; then
+        func_error "unrecognized option '-dlopen'"
+        $ECHO "$help" 1>&2
+        exit $EXIT_FAILURE
+      fi
+
+      # Change the help message to a mode-specific one.
+      generic_help=$help
+      help="Try '$progname --help --mode=$opt_mode' for more information."
+    }
+
+    # Pass back the unparsed argument list
+    func_quote_for_eval ${1+"$@"}
+    libtool_validate_options_result=$func_quote_for_eval_result
+}
+func_add_hook func_validate_options libtool_validate_options
+
+
+# Process options as early as possible so that --help and --version
+# can return quickly.
+func_options ${1+"$@"}
+eval set dummy "$func_options_result"; shift
+
+
+
+## ----------- ##
+##    Main.    ##
+## ----------- ##
+
+magic='%%%MAGIC variable%%%'
+magic_exe='%%%MAGIC EXE variable%%%'
+
+# Global variables.
+extracted_archives=
+extracted_serial=0
+
+# If this variable is set in any of the actions, the command in it
+# will be execed at the end.  This prevents here-documents from being
+# left over by shells.
+exec_cmd=
+
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+$1
+_LTECHO_EOF'
+}
+
+# func_generated_by_libtool
+# True iff stdin has been generated by Libtool. This function is only
+# a basic sanity check; it will hardly flush out determined imposters.
+func_generated_by_libtool_p ()
+{
+  $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1
+}
+
+# func_lalib_p file
+# True iff FILE is a libtool '.la' library or '.lo' object file.
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_lalib_p ()
+{
+    test -f "$1" &&
+      $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p
+}
+
+# func_lalib_unsafe_p file
+# True iff FILE is a libtool '.la' library or '.lo' object file.
+# This function implements the same check as func_lalib_p without
+# resorting to external programs.  To this end, it redirects stdin and
+# closes it afterwards, without saving the original file descriptor.
+# As a safety measure, use it only where a negative result would be
+# fatal anyway.  Works if 'file' does not exist.
+func_lalib_unsafe_p ()
+{
+    lalib_p=no
+    if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then
+	for lalib_p_l in 1 2 3 4
+	do
+	    read lalib_p_line
+	    case $lalib_p_line in
+		\#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;;
+	    esac
+	done
+	exec 0<&5 5<&-
+    fi
+    test yes = "$lalib_p"
+}
+
+# func_ltwrapper_script_p file
+# True iff FILE is a libtool wrapper script
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_script_p ()
+{
+    test -f "$1" &&
+      $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p
+}
+
+# func_ltwrapper_executable_p file
+# True iff FILE is a libtool wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_executable_p ()
+{
+    func_ltwrapper_exec_suffix=
+    case $1 in
+    *.exe) ;;
+    *) func_ltwrapper_exec_suffix=.exe ;;
+    esac
+    $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1
+}
+
+# func_ltwrapper_scriptname file
+# Assumes file is an ltwrapper_executable
+# uses $file to determine the appropriate filename for a
+# temporary ltwrapper_script.
+func_ltwrapper_scriptname ()
+{
+    func_dirname_and_basename "$1" "" "."
+    func_stripname '' '.exe' "$func_basename_result"
+    func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper
+}
+
+# func_ltwrapper_p file
+# True iff FILE is a libtool wrapper script or wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_p ()
+{
+    func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1"
+}
+
+
+# func_execute_cmds commands fail_cmd
+# Execute tilde-delimited COMMANDS.
+# If FAIL_CMD is given, eval that upon failure.
+# FAIL_CMD may read-access the current command in variable CMD!
+func_execute_cmds ()
+{
+    $debug_cmd
+
+    save_ifs=$IFS; IFS='~'
+    for cmd in $1; do
+      IFS=$sp$nl
+      eval cmd=\"$cmd\"
+      IFS=$save_ifs
+      func_show_eval "$cmd" "${2-:}"
+    done
+    IFS=$save_ifs
+}
+
+
+# func_source file
+# Source FILE, adding directory component if necessary.
+# Note that it is not necessary on cygwin/mingw to append a dot to
+# FILE even if both FILE and FILE.exe exist: automatic-append-.exe
+# behavior happens only for exec(3), not for open(2)!  Also, sourcing
+# 'FILE.' does not work on cygwin managed mounts.
+func_source ()
+{
+    $debug_cmd
+
+    case $1 in
+    */* | *\\*)	. "$1" ;;
+    *)		. "./$1" ;;
+    esac
+}
+
+
+# func_resolve_sysroot PATH
+# Replace a leading = in PATH with a sysroot.  Store the result into
+# func_resolve_sysroot_result
+func_resolve_sysroot ()
+{
+  func_resolve_sysroot_result=$1
+  case $func_resolve_sysroot_result in
+  =*)
+    func_stripname '=' '' "$func_resolve_sysroot_result"
+    func_resolve_sysroot_result=$lt_sysroot$func_stripname_result
+    ;;
+  esac
+}
+
+# func_replace_sysroot PATH
+# If PATH begins with the sysroot, replace it with = and
+# store the result into func_replace_sysroot_result.
+func_replace_sysroot ()
+{
+  case $lt_sysroot:$1 in
+  ?*:"$lt_sysroot"*)
+    func_stripname "$lt_sysroot" '' "$1"
+    func_replace_sysroot_result='='$func_stripname_result
+    ;;
+  *)
+    # Including no sysroot.
+    func_replace_sysroot_result=$1
+    ;;
+  esac
+}
+
+# func_infer_tag arg
+# Infer tagged configuration to use if any are available and
+# if one wasn't chosen via the "--tag" command line option.
+# Only attempt this if the compiler in the base compile
+# command doesn't match the default compiler.
+# arg is usually of the form 'gcc ...'
+func_infer_tag ()
+{
+    $debug_cmd
+
+    if test -n "$available_tags" && test -z "$tagname"; then
+      CC_quoted=
+      for arg in $CC; do
+	func_append_quoted CC_quoted "$arg"
+      done
+      CC_expanded=`func_echo_all $CC`
+      CC_quoted_expanded=`func_echo_all $CC_quoted`
+      case $@ in
+      # Blanks in the command may have been stripped by the calling shell,
+      # but not from the CC environment variable when configure was run.
+      " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+      " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;;
+      # Blanks at the start of $base_compile will cause this to fail
+      # if we don't check for them as well.
+      *)
+	for z in $available_tags; do
+	  if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then
+	    # Evaluate the configuration.
+	    eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`"
+	    CC_quoted=
+	    for arg in $CC; do
+	      # Double-quote args containing other shell metacharacters.
+	      func_append_quoted CC_quoted "$arg"
+	    done
+	    CC_expanded=`func_echo_all $CC`
+	    CC_quoted_expanded=`func_echo_all $CC_quoted`
+	    case "$@ " in
+	    " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+	    " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*)
+	      # The compiler in the base compile command matches
+	      # the one in the tagged configuration.
+	      # Assume this is the tagged configuration we want.
+	      tagname=$z
+	      break
+	      ;;
+	    esac
+	  fi
+	done
+	# If $tagname still isn't set, then no tagged configuration
+	# was found and let the user know that the "--tag" command
+	# line option must be used.
+	if test -z "$tagname"; then
+	  func_echo "unable to infer tagged configuration"
+	  func_fatal_error "specify a tag with '--tag'"
+#	else
+#	  func_verbose "using $tagname tagged configuration"
+	fi
+	;;
+      esac
+    fi
+}
+
+
+
+# func_write_libtool_object output_name pic_name nonpic_name
+# Create a libtool object file (analogous to a ".la" file),
+# but don't create it if we're doing a dry run.
+func_write_libtool_object ()
+{
+    write_libobj=$1
+    if test yes = "$build_libtool_libs"; then
+      write_lobj=\'$2\'
+    else
+      write_lobj=none
+    fi
+
+    if test yes = "$build_old_libs"; then
+      write_oldobj=\'$3\'
+    else
+      write_oldobj=none
+    fi
+
+    $opt_dry_run || {
+      cat >${write_libobj}T <<EOF
+# $write_libobj - a libtool object file
+# Generated by $PROGRAM (GNU $PACKAGE) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object=$write_lobj
+
+# Name of the non-PIC object
+non_pic_object=$write_oldobj
+
+EOF
+      $MV "${write_libobj}T" "$write_libobj"
+    }
+}
+
+
+##################################################
+# FILE NAME AND PATH CONVERSION HELPER FUNCTIONS #
+##################################################
+
+# func_convert_core_file_wine_to_w32 ARG
+# Helper function used by file name conversion functions when $build is *nix,
+# and $host is mingw, cygwin, or some other w32 environment. Relies on a
+# correctly configured wine environment available, with the winepath program
+# in $build's $PATH.
+#
+# ARG is the $build file name to be converted to w32 format.
+# Result is available in $func_convert_core_file_wine_to_w32_result, and will
+# be empty on error (or when ARG is empty)
+func_convert_core_file_wine_to_w32 ()
+{
+  $debug_cmd
+
+  func_convert_core_file_wine_to_w32_result=$1
+  if test -n "$1"; then
+    # Unfortunately, winepath does not exit with a non-zero error code, so we
+    # are forced to check the contents of stdout. On the other hand, if the
+    # command is not found, the shell will set an exit code of 127 and print
+    # *an error message* to stdout. So we must check for both error code of
+    # zero AND non-empty stdout, which explains the odd construction:
+    func_convert_core_file_wine_to_w32_tmp=`winepath -w "$1" 2>/dev/null`
+    if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then
+      func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" |
+        $SED -e "$sed_naive_backslashify"`
+    else
+      func_convert_core_file_wine_to_w32_result=
+    fi
+  fi
+}
+# end: func_convert_core_file_wine_to_w32
+
+
+# func_convert_core_path_wine_to_w32 ARG
+# Helper function used by path conversion functions when $build is *nix, and
+# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly
+# configured wine environment available, with the winepath program in $build's
+# $PATH. Assumes ARG has no leading or trailing path separator characters.
+#
+# ARG is path to be converted from $build format to win32.
+# Result is available in $func_convert_core_path_wine_to_w32_result.
+# Unconvertible file (directory) names in ARG are skipped; if no directory names
+# are convertible, then the result may be empty.
+func_convert_core_path_wine_to_w32 ()
+{
+  $debug_cmd
+
+  # unfortunately, winepath doesn't convert paths, only file names
+  func_convert_core_path_wine_to_w32_result=
+  if test -n "$1"; then
+    oldIFS=$IFS
+    IFS=:
+    for func_convert_core_path_wine_to_w32_f in $1; do
+      IFS=$oldIFS
+      func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f"
+      if test -n "$func_convert_core_file_wine_to_w32_result"; then
+        if test -z "$func_convert_core_path_wine_to_w32_result"; then
+          func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result
+        else
+          func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result"
+        fi
+      fi
+    done
+    IFS=$oldIFS
+  fi
+}
+# end: func_convert_core_path_wine_to_w32
+
+
+# func_cygpath ARGS...
+# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when
+# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2)
+# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or
+# (2), returns the Cygwin file name or path in func_cygpath_result (input
+# file name or path is assumed to be in w32 format, as previously converted
+# from $build's *nix or MSYS format). In case (3), returns the w32 file name
+# or path in func_cygpath_result (input file name or path is assumed to be in
+# Cygwin format). Returns an empty string on error.
+#
+# ARGS are passed to cygpath, with the last one being the file name or path to
+# be converted.
+#
+# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH
+# environment variable; do not put it in $PATH.
+func_cygpath ()
+{
+  $debug_cmd
+
+  if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then
+    func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null`
+    if test "$?" -ne 0; then
+      # on failure, ensure result is empty
+      func_cygpath_result=
+    fi
+  else
+    func_cygpath_result=
+    func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'"
+  fi
+}
+#end: func_cygpath
+
+
+# func_convert_core_msys_to_w32 ARG
+# Convert file name or path ARG from MSYS format to w32 format.  Return
+# result in func_convert_core_msys_to_w32_result.
+func_convert_core_msys_to_w32 ()
+{
+  $debug_cmd
+
+  # awkward: cmd appends spaces to result
+  func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null |
+    $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"`
+}
+#end: func_convert_core_msys_to_w32
+
+
+# func_convert_file_check ARG1 ARG2
+# Verify that ARG1 (a file name in $build format) was converted to $host
+# format in ARG2. Otherwise, emit an error message, but continue (resetting
+# func_to_host_file_result to ARG1).
+func_convert_file_check ()
+{
+  $debug_cmd
+
+  if test -z "$2" && test -n "$1"; then
+    func_error "Could not determine host file name corresponding to"
+    func_error "  '$1'"
+    func_error "Continuing, but uninstalled executables may not work."
+    # Fallback:
+    func_to_host_file_result=$1
+  fi
+}
+# end func_convert_file_check
+
+
+# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH
+# Verify that FROM_PATH (a path in $build format) was converted to $host
+# format in TO_PATH. Otherwise, emit an error message, but continue, resetting
+# func_to_host_file_result to a simplistic fallback value (see below).
+func_convert_path_check ()
+{
+  $debug_cmd
+
+  if test -z "$4" && test -n "$3"; then
+    func_error "Could not determine the host path corresponding to"
+    func_error "  '$3'"
+    func_error "Continuing, but uninstalled executables may not work."
+    # Fallback.  This is a deliberately simplistic "conversion" and
+    # should not be "improved".  See libtool.info.
+    if test "x$1" != "x$2"; then
+      lt_replace_pathsep_chars="s|$1|$2|g"
+      func_to_host_path_result=`echo "$3" |
+        $SED -e "$lt_replace_pathsep_chars"`
+    else
+      func_to_host_path_result=$3
+    fi
+  fi
+}
+# end func_convert_path_check
+
+
+# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG
+# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT
+# and appending REPL if ORIG matches BACKPAT.
+func_convert_path_front_back_pathsep ()
+{
+  $debug_cmd
+
+  case $4 in
+  $1 ) func_to_host_path_result=$3$func_to_host_path_result
+    ;;
+  esac
+  case $4 in
+  $2 ) func_append func_to_host_path_result "$3"
+    ;;
+  esac
+}
+# end func_convert_path_front_back_pathsep
+
+
+##################################################
+# $build to $host FILE NAME CONVERSION FUNCTIONS #
+##################################################
+# invoked via '$to_host_file_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# Result will be available in $func_to_host_file_result.
+
+
+# func_to_host_file ARG
+# Converts the file name ARG from $build format to $host format. Return result
+# in func_to_host_file_result.
+func_to_host_file ()
+{
+  $debug_cmd
+
+  $to_host_file_cmd "$1"
+}
+# end func_to_host_file
+
+
+# func_to_tool_file ARG LAZY
+# converts the file name ARG from $build format to toolchain format. Return
+# result in func_to_tool_file_result.  If the conversion in use is listed
+# in (the comma separated) LAZY, no conversion takes place.
+func_to_tool_file ()
+{
+  $debug_cmd
+
+  case ,$2, in
+    *,"$to_tool_file_cmd",*)
+      func_to_tool_file_result=$1
+      ;;
+    *)
+      $to_tool_file_cmd "$1"
+      func_to_tool_file_result=$func_to_host_file_result
+      ;;
+  esac
+}
+# end func_to_tool_file
+
+
+# func_convert_file_noop ARG
+# Copy ARG to func_to_host_file_result.
+func_convert_file_noop ()
+{
+  func_to_host_file_result=$1
+}
+# end func_convert_file_noop
+
+
+# func_convert_file_msys_to_w32 ARG
+# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper.  Returns result in
+# func_to_host_file_result.
+func_convert_file_msys_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    func_convert_core_msys_to_w32 "$1"
+    func_to_host_file_result=$func_convert_core_msys_to_w32_result
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_w32
+
+
+# func_convert_file_cygwin_to_w32 ARG
+# Convert file name ARG from Cygwin to w32 format.  Returns result in
+# func_to_host_file_result.
+func_convert_file_cygwin_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    # because $build is cygwin, we call "the" cygpath in $PATH; no need to use
+    # LT_CYGPATH in this case.
+    func_to_host_file_result=`cygpath -m "$1"`
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_cygwin_to_w32
+
+
+# func_convert_file_nix_to_w32 ARG
+# Convert file name ARG from *nix to w32 format.  Requires a wine environment
+# and a working winepath. Returns result in func_to_host_file_result.
+func_convert_file_nix_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    func_convert_core_file_wine_to_w32 "$1"
+    func_to_host_file_result=$func_convert_core_file_wine_to_w32_result
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_w32
+
+
+# func_convert_file_msys_to_cygwin ARG
+# Convert file name ARG from MSYS to Cygwin format.  Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_file_msys_to_cygwin ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    func_convert_core_msys_to_w32 "$1"
+    func_cygpath -u "$func_convert_core_msys_to_w32_result"
+    func_to_host_file_result=$func_cygpath_result
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_cygwin
+
+
+# func_convert_file_nix_to_cygwin ARG
+# Convert file name ARG from *nix to Cygwin format.  Requires Cygwin installed
+# in a wine environment, working winepath, and LT_CYGPATH set.  Returns result
+# in func_to_host_file_result.
+func_convert_file_nix_to_cygwin ()
+{
+  $debug_cmd
+
+  func_to_host_file_result=$1
+  if test -n "$1"; then
+    # convert from *nix to w32, then use cygpath to convert from w32 to cygwin.
+    func_convert_core_file_wine_to_w32 "$1"
+    func_cygpath -u "$func_convert_core_file_wine_to_w32_result"
+    func_to_host_file_result=$func_cygpath_result
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_cygwin
+
+
+#############################################
+# $build to $host PATH CONVERSION FUNCTIONS #
+#############################################
+# invoked via '$to_host_path_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# The result will be available in $func_to_host_path_result.
+#
+# Path separators are also converted from $build format to $host format.  If
+# ARG begins or ends with a path separator character, it is preserved (but
+# converted to $host format) on output.
+#
+# All path conversion functions are named using the following convention:
+#   file name conversion function    : func_convert_file_X_to_Y ()
+#   path conversion function         : func_convert_path_X_to_Y ()
+# where, for any given $build/$host combination the 'X_to_Y' value is the
+# same.  If conversion functions are added for new $build/$host combinations,
+# the two new functions must follow this pattern, or func_init_to_host_path_cmd
+# will break.
+
+
+# func_init_to_host_path_cmd
+# Ensures that function "pointer" variable $to_host_path_cmd is set to the
+# appropriate value, based on the value of $to_host_file_cmd.
+to_host_path_cmd=
+func_init_to_host_path_cmd ()
+{
+  $debug_cmd
+
+  if test -z "$to_host_path_cmd"; then
+    func_stripname 'func_convert_file_' '' "$to_host_file_cmd"
+    to_host_path_cmd=func_convert_path_$func_stripname_result
+  fi
+}
+
+
+# func_to_host_path ARG
+# Converts the path ARG from $build format to $host format. Return result
+# in func_to_host_path_result.
+func_to_host_path ()
+{
+  $debug_cmd
+
+  func_init_to_host_path_cmd
+  $to_host_path_cmd "$1"
+}
+# end func_to_host_path
+
+
+# func_convert_path_noop ARG
+# Copy ARG to func_to_host_path_result.
+func_convert_path_noop ()
+{
+  func_to_host_path_result=$1
+}
+# end func_convert_path_noop
+
+
+# func_convert_path_msys_to_w32 ARG
+# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper.  Returns result in
+# func_to_host_path_result.
+func_convert_path_msys_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # Remove leading and trailing path separator characters from ARG.  MSYS
+    # behavior is inconsistent here; cygpath turns them into '.;' and ';.';
+    # and winepath ignores them completely.
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+    func_to_host_path_result=$func_convert_core_msys_to_w32_result
+    func_convert_path_check : ";" \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+  fi
+}
+# end func_convert_path_msys_to_w32
+
+
+# func_convert_path_cygwin_to_w32 ARG
+# Convert path ARG from Cygwin to w32 format.  Returns result in
+# func_to_host_file_result.
+func_convert_path_cygwin_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # See func_convert_path_msys_to_w32:
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"`
+    func_convert_path_check : ";" \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+  fi
+}
+# end func_convert_path_cygwin_to_w32
+
+
+# func_convert_path_nix_to_w32 ARG
+# Convert path ARG from *nix to w32 format.  Requires a wine environment and
+# a working winepath.  Returns result in func_to_host_file_result.
+func_convert_path_nix_to_w32 ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # See func_convert_path_msys_to_w32:
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+    func_to_host_path_result=$func_convert_core_path_wine_to_w32_result
+    func_convert_path_check : ";" \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+  fi
+}
+# end func_convert_path_nix_to_w32
+
+
+# func_convert_path_msys_to_cygwin ARG
+# Convert path ARG from MSYS to Cygwin format.  Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_path_msys_to_cygwin ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # See func_convert_path_msys_to_w32:
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+    func_cygpath -u -p "$func_convert_core_msys_to_w32_result"
+    func_to_host_path_result=$func_cygpath_result
+    func_convert_path_check : : \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+  fi
+}
+# end func_convert_path_msys_to_cygwin
+
+
+# func_convert_path_nix_to_cygwin ARG
+# Convert path ARG from *nix to Cygwin format.  Requires Cygwin installed in a
+# a wine environment, working winepath, and LT_CYGPATH set.  Returns result in
+# func_to_host_file_result.
+func_convert_path_nix_to_cygwin ()
+{
+  $debug_cmd
+
+  func_to_host_path_result=$1
+  if test -n "$1"; then
+    # Remove leading and trailing path separator characters from
+    # ARG. msys behavior is inconsistent here, cygpath turns them
+    # into '.;' and ';.', and winepath ignores them completely.
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+    func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result"
+    func_to_host_path_result=$func_cygpath_result
+    func_convert_path_check : : \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+  fi
+}
+# end func_convert_path_nix_to_cygwin
+
+
+# func_dll_def_p FILE
+# True iff FILE is a Windows DLL '.def' file.
+# Keep in sync with _LT_DLL_DEF_P in libtool.m4
+func_dll_def_p ()
+{
+  $debug_cmd
+
+  func_dll_def_p_tmp=`$SED -n \
+    -e 's/^[	 ]*//' \
+    -e '/^\(;.*\)*$/d' \
+    -e 's/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p' \
+    -e q \
+    "$1"`
+  test DEF = "$func_dll_def_p_tmp"
+}
+
+
+# func_mode_compile arg...
+func_mode_compile ()
+{
+    $debug_cmd
+
+    # Get the compilation command and the source file.
+    base_compile=
+    srcfile=$nonopt  #  always keep a non-empty value in "srcfile"
+    suppress_opt=yes
+    suppress_output=
+    arg_mode=normal
+    libobj=
+    later=
+    pie_flag=
+
+    for arg
+    do
+      case $arg_mode in
+      arg  )
+	# do not "continue".  Instead, add this to base_compile
+	lastarg=$arg
+	arg_mode=normal
+	;;
+
+      target )
+	libobj=$arg
+	arg_mode=normal
+	continue
+	;;
+
+      normal )
+	# Accept any command-line options.
+	case $arg in
+	-o)
+	  test -n "$libobj" && \
+	    func_fatal_error "you cannot specify '-o' more than once"
+	  arg_mode=target
+	  continue
+	  ;;
+
+	-pie | -fpie | -fPIE)
+          func_append pie_flag " $arg"
+	  continue
+	  ;;
+
+	-shared | -static | -prefer-pic | -prefer-non-pic)
+	  func_append later " $arg"
+	  continue
+	  ;;
+
+	-no-suppress)
+	  suppress_opt=no
+	  continue
+	  ;;
+
+	-Xcompiler)
+	  arg_mode=arg  #  the next one goes into the "base_compile" arg list
+	  continue      #  The current "srcfile" will either be retained or
+	  ;;            #  replaced later.  I would guess that would be a bug.
+
+	-Wc,*)
+	  func_stripname '-Wc,' '' "$arg"
+	  args=$func_stripname_result
+	  lastarg=
+	  save_ifs=$IFS; IFS=,
+	  for arg in $args; do
+	    IFS=$save_ifs
+	    func_append_quoted lastarg "$arg"
+	  done
+	  IFS=$save_ifs
+	  func_stripname ' ' '' "$lastarg"
+	  lastarg=$func_stripname_result
+
+	  # Add the arguments to base_compile.
+	  func_append base_compile " $lastarg"
+	  continue
+	  ;;
+
+	*)
+	  # Accept the current argument as the source file.
+	  # The previous "srcfile" becomes the current argument.
+	  #
+	  lastarg=$srcfile
+	  srcfile=$arg
+	  ;;
+	esac  #  case $arg
+	;;
+      esac    #  case $arg_mode
+
+      # Aesthetically quote the previous argument.
+      func_append_quoted base_compile "$lastarg"
+    done # for arg
+
+    case $arg_mode in
+    arg)
+      func_fatal_error "you must specify an argument for -Xcompile"
+      ;;
+    target)
+      func_fatal_error "you must specify a target with '-o'"
+      ;;
+    *)
+      # Get the name of the library object.
+      test -z "$libobj" && {
+	func_basename "$srcfile"
+	libobj=$func_basename_result
+      }
+      ;;
+    esac
+
+    # Recognize several different file suffixes.
+    # If the user specifies -o file.o, it is replaced with file.lo
+    case $libobj in
+    *.[cCFSifmso] | \
+    *.ada | *.adb | *.ads | *.asm | \
+    *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \
+    *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup)
+      func_xform "$libobj"
+      libobj=$func_xform_result
+      ;;
+    esac
+
+    case $libobj in
+    *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;;
+    *)
+      func_fatal_error "cannot determine name of library object from '$libobj'"
+      ;;
+    esac
+
+    func_infer_tag $base_compile
+
+    for arg in $later; do
+      case $arg in
+      -shared)
+	test yes = "$build_libtool_libs" \
+	  || func_fatal_configuration "cannot build a shared library"
+	build_old_libs=no
+	continue
+	;;
+
+      -static)
+	build_libtool_libs=no
+	build_old_libs=yes
+	continue
+	;;
+
+      -prefer-pic)
+	pic_mode=yes
+	continue
+	;;
+
+      -prefer-non-pic)
+	pic_mode=no
+	continue
+	;;
+      esac
+    done
+
+    func_quote_for_eval "$libobj"
+    test "X$libobj" != "X$func_quote_for_eval_result" \
+      && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"'	 &()|`$[]' \
+      && func_warning "libobj name '$libobj' may not contain shell special characters."
+    func_dirname_and_basename "$obj" "/" ""
+    objname=$func_basename_result
+    xdir=$func_dirname_result
+    lobj=$xdir$objdir/$objname
+
+    test -z "$base_compile" && \
+      func_fatal_help "you must specify a compilation command"
+
+    # Delete any leftover library objects.
+    if test yes = "$build_old_libs"; then
+      removelist="$obj $lobj $libobj ${libobj}T"
+    else
+      removelist="$lobj $libobj ${libobj}T"
+    fi
+
+    # On Cygwin there's no "real" PIC flag so we must build both object types
+    case $host_os in
+    cygwin* | mingw* | pw32* | os2* | cegcc*)
+      pic_mode=default
+      ;;
+    esac
+    if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then
+      # non-PIC code in shared libraries is not supported
+      pic_mode=default
+    fi
+
+    # Calculate the filename of the output object if compiler does
+    # not support -o with -c
+    if test no = "$compiler_c_o"; then
+      output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext
+      lockfile=$output_obj.lock
+    else
+      output_obj=
+      need_locks=no
+      lockfile=
+    fi
+
+    # Lock this critical section if it is needed
+    # We use this script file to make the link, it avoids creating a new file
+    if test yes = "$need_locks"; then
+      until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+	func_echo "Waiting for $lockfile to be removed"
+	sleep 2
+      done
+    elif test warn = "$need_locks"; then
+      if test -f "$lockfile"; then
+	$ECHO "\
+*** ERROR, $lockfile exists and contains:
+`cat $lockfile 2>/dev/null`
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support '-c' and '-o' together.  If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+	$opt_dry_run || $RM $removelist
+	exit $EXIT_FAILURE
+      fi
+      func_append removelist " $output_obj"
+      $ECHO "$srcfile" > "$lockfile"
+    fi
+
+    $opt_dry_run || $RM $removelist
+    func_append removelist " $lockfile"
+    trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15
+
+    func_to_tool_file "$srcfile" func_convert_file_msys_to_w32
+    srcfile=$func_to_tool_file_result
+    func_quote_for_eval "$srcfile"
+    qsrcfile=$func_quote_for_eval_result
+
+    # Only build a PIC object if we are building libtool libraries.
+    if test yes = "$build_libtool_libs"; then
+      # Without this assignment, base_compile gets emptied.
+      fbsd_hideous_sh_bug=$base_compile
+
+      if test no != "$pic_mode"; then
+	command="$base_compile $qsrcfile $pic_flag"
+      else
+	# Don't build PIC code
+	command="$base_compile $qsrcfile"
+      fi
+
+      func_mkdir_p "$xdir$objdir"
+
+      if test -z "$output_obj"; then
+	# Place PIC objects in $objdir
+	func_append command " -o $lobj"
+      fi
+
+      func_show_eval_locale "$command"	\
+          'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE'
+
+      if test warn = "$need_locks" &&
+	 test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+	$ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support '-c' and '-o' together.  If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+	$opt_dry_run || $RM $removelist
+	exit $EXIT_FAILURE
+      fi
+
+      # Just move the object if needed, then go on to compile the next one
+      if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then
+	func_show_eval '$MV "$output_obj" "$lobj"' \
+	  'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+      fi
+
+      # Allow error messages only from the first compilation.
+      if test yes = "$suppress_opt"; then
+	suppress_output=' >/dev/null 2>&1'
+      fi
+    fi
+
+    # Only build a position-dependent object if we build old libraries.
+    if test yes = "$build_old_libs"; then
+      if test yes != "$pic_mode"; then
+	# Don't build PIC code
+	command="$base_compile $qsrcfile$pie_flag"
+      else
+	command="$base_compile $qsrcfile $pic_flag"
+      fi
+      if test yes = "$compiler_c_o"; then
+	func_append command " -o $obj"
+      fi
+
+      # Suppress compiler output if we already did a PIC compilation.
+      func_append command "$suppress_output"
+      func_show_eval_locale "$command" \
+        '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE'
+
+      if test warn = "$need_locks" &&
+	 test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+	$ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support '-c' and '-o' together.  If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+	$opt_dry_run || $RM $removelist
+	exit $EXIT_FAILURE
+      fi
+
+      # Just move the object if needed
+      if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then
+	func_show_eval '$MV "$output_obj" "$obj"' \
+	  'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+      fi
+    fi
+
+    $opt_dry_run || {
+      func_write_libtool_object "$libobj" "$objdir/$objname" "$objname"
+
+      # Unlock the critical section if it was locked
+      if test no != "$need_locks"; then
+	removelist=$lockfile
+        $RM "$lockfile"
+      fi
+    }
+
+    exit $EXIT_SUCCESS
+}
+
+$opt_help || {
+  test compile = "$opt_mode" && func_mode_compile ${1+"$@"}
+}
+
+func_mode_help ()
+{
+    # We need to display help for each of the modes.
+    case $opt_mode in
+      "")
+        # Generic help is extracted from the usage comments
+        # at the start of this file.
+        func_help
+        ;;
+
+      clean)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
+
+Remove files from the build directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically '/bin/rm').  RM-OPTIONS are options (such as '-f') to be passed
+to RM.
+
+If FILE is a libtool library, object or program, all the files associated
+with it are deleted. Otherwise, only FILE itself is deleted using RM."
+        ;;
+
+      compile)
+      $ECHO \
+"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
+
+Compile a source file into a libtool library object.
+
+This mode accepts the following additional options:
+
+  -o OUTPUT-FILE    set the output file name to OUTPUT-FILE
+  -no-suppress      do not suppress compiler output for multiple passes
+  -prefer-pic       try to build PIC objects only
+  -prefer-non-pic   try to build non-PIC objects only
+  -shared           do not build a '.o' file suitable for static linking
+  -static           only build a '.o' file suitable for static linking
+  -Wc,FLAG          pass FLAG directly to the compiler
+
+COMPILE-COMMAND is a command to be used in creating a 'standard' object file
+from the given SOURCEFILE.
+
+The output file name is determined by removing the directory component from
+SOURCEFILE, then substituting the C source code suffix '.c' with the
+library object suffix, '.lo'."
+        ;;
+
+      execute)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]...
+
+Automatically set library path, then run a program.
+
+This mode accepts the following additional options:
+
+  -dlopen FILE      add the directory containing FILE to the library path
+
+This mode sets the library path environment variable according to '-dlopen'
+flags.
+
+If any of the ARGS are libtool executable wrappers, then they are translated
+into their corresponding uninstalled binary, and any of their required library
+directories are added to the library path.
+
+Then, COMMAND is executed, with ARGS as arguments."
+        ;;
+
+      finish)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=finish [LIBDIR]...
+
+Complete the installation of libtool libraries.
+
+Each LIBDIR is a directory that contains libtool libraries.
+
+The commands that this mode executes may require superuser privileges.  Use
+the '--dry-run' option if you just want to see what would be executed."
+        ;;
+
+      install)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND...
+
+Install executables or libraries.
+
+INSTALL-COMMAND is the installation command.  The first component should be
+either the 'install' or 'cp' program.
+
+The following components of INSTALL-COMMAND are treated specially:
+
+  -inst-prefix-dir PREFIX-DIR  Use PREFIX-DIR as a staging area for installation
+
+The rest of the components are interpreted as arguments to that command (only
+BSD-compatible install options are recognized)."
+        ;;
+
+      link)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=link LINK-COMMAND...
+
+Link object files or libraries together to form another library, or to
+create an executable program.
+
+LINK-COMMAND is a command using the C compiler that you would use to create
+a program from several object files.
+
+The following components of LINK-COMMAND are treated specially:
+
+  -all-static       do not do any dynamic linking at all
+  -avoid-version    do not add a version suffix if possible
+  -bindir BINDIR    specify path to binaries directory (for systems where
+                    libraries must be found in the PATH setting at runtime)
+  -dlopen FILE      '-dlpreopen' FILE if it cannot be dlopened at runtime
+  -dlpreopen FILE   link in FILE and add its symbols to lt_preloaded_symbols
+  -export-dynamic   allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
+  -export-symbols SYMFILE
+                    try to export only the symbols listed in SYMFILE
+  -export-symbols-regex REGEX
+                    try to export only the symbols matching REGEX
+  -LLIBDIR          search LIBDIR for required installed libraries
+  -lNAME            OUTPUT-FILE requires the installed library libNAME
+  -module           build a library that can dlopened
+  -no-fast-install  disable the fast-install mode
+  -no-install       link a not-installable executable
+  -no-undefined     declare that a library does not refer to external symbols
+  -o OUTPUT-FILE    create OUTPUT-FILE from the specified objects
+  -objectlist FILE  use a list of object files found in FILE to specify objects
+  -os2dllname NAME  force a short DLL name on OS/2 (no effect on other OSes)
+  -precious-files-regex REGEX
+                    don't remove output files matching REGEX
+  -release RELEASE  specify package release information
+  -rpath LIBDIR     the created library will eventually be installed in LIBDIR
+  -R[ ]LIBDIR       add LIBDIR to the runtime path of programs and libraries
+  -shared           only do dynamic linking of libtool libraries
+  -shrext SUFFIX    override the standard shared library file extension
+  -static           do not do any dynamic linking of uninstalled libtool libraries
+  -static-libtool-libs
+                    do not do any dynamic linking of libtool libraries
+  -version-info CURRENT[:REVISION[:AGE]]
+                    specify library version info [each variable defaults to 0]
+  -weak LIBNAME     declare that the target provides the LIBNAME interface
+  -Wc,FLAG
+  -Xcompiler FLAG   pass linker-specific FLAG directly to the compiler
+  -Wl,FLAG
+  -Xlinker FLAG     pass linker-specific FLAG directly to the linker
+  -XCClinker FLAG   pass link-specific FLAG to the compiler driver (CC)
+
+All other options (arguments beginning with '-') are ignored.
+
+Every other argument is treated as a filename.  Files ending in '.la' are
+treated as uninstalled libtool libraries, other files are standard or library
+object files.
+
+If the OUTPUT-FILE ends in '.la', then a libtool library is created,
+only library objects ('.lo' files) may be specified, and '-rpath' is
+required, except when creating a convenience library.
+
+If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created
+using 'ar' and 'ranlib', or on Windows using 'lib'.
+
+If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file
+is created, otherwise an executable program is created."
+        ;;
+
+      uninstall)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
+
+Remove libraries from an installation directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically '/bin/rm').  RM-OPTIONS are options (such as '-f') to be passed
+to RM.
+
+If FILE is a libtool library, all the files associated with it are deleted.
+Otherwise, only FILE itself is deleted using RM."
+        ;;
+
+      *)
+        func_fatal_help "invalid operation mode '$opt_mode'"
+        ;;
+    esac
+
+    echo
+    $ECHO "Try '$progname --help' for more information about other modes."
+}
+
+# Now that we've collected a possible --mode arg, show help if necessary
+if $opt_help; then
+  if test : = "$opt_help"; then
+    func_mode_help
+  else
+    {
+      func_help noexit
+      for opt_mode in compile link execute install finish uninstall clean; do
+	func_mode_help
+      done
+    } | $SED -n '1p; 2,$s/^Usage:/  or: /p'
+    {
+      func_help noexit
+      for opt_mode in compile link execute install finish uninstall clean; do
+	echo
+	func_mode_help
+      done
+    } |
+    $SED '1d
+      /^When reporting/,/^Report/{
+	H
+	d
+      }
+      $x
+      /information about other modes/d
+      /more detailed .*MODE/d
+      s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/'
+  fi
+  exit $?
+fi
+
+
+# func_mode_execute arg...
+func_mode_execute ()
+{
+    $debug_cmd
+
+    # The first argument is the command name.
+    cmd=$nonopt
+    test -z "$cmd" && \
+      func_fatal_help "you must specify a COMMAND"
+
+    # Handle -dlopen flags immediately.
+    for file in $opt_dlopen; do
+      test -f "$file" \
+	|| func_fatal_help "'$file' is not a file"
+
+      dir=
+      case $file in
+      *.la)
+	func_resolve_sysroot "$file"
+	file=$func_resolve_sysroot_result
+
+	# Check to see that this really is a libtool archive.
+	func_lalib_unsafe_p "$file" \
+	  || func_fatal_help "'$lib' is not a valid libtool archive"
+
+	# Read the libtool library.
+	dlname=
+	library_names=
+	func_source "$file"
+
+	# Skip this library if it cannot be dlopened.
+	if test -z "$dlname"; then
+	  # Warn if it was a shared library.
+	  test -n "$library_names" && \
+	    func_warning "'$file' was not linked with '-export-dynamic'"
+	  continue
+	fi
+
+	func_dirname "$file" "" "."
+	dir=$func_dirname_result
+
+	if test -f "$dir/$objdir/$dlname"; then
+	  func_append dir "/$objdir"
+	else
+	  if test ! -f "$dir/$dlname"; then
+	    func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'"
+	  fi
+	fi
+	;;
+
+      *.lo)
+	# Just add the directory containing the .lo file.
+	func_dirname "$file" "" "."
+	dir=$func_dirname_result
+	;;
+
+      *)
+	func_warning "'-dlopen' is ignored for non-libtool libraries and objects"
+	continue
+	;;
+      esac
+
+      # Get the absolute pathname.
+      absdir=`cd "$dir" && pwd`
+      test -n "$absdir" && dir=$absdir
+
+      # Now add the directory to shlibpath_var.
+      if eval "test -z \"\$$shlibpath_var\""; then
+	eval "$shlibpath_var=\"\$dir\""
+      else
+	eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
+      fi
+    done
+
+    # This variable tells wrapper scripts just to set shlibpath_var
+    # rather than running their programs.
+    libtool_execute_magic=$magic
+
+    # Check if any of the arguments is a wrapper script.
+    args=
+    for file
+    do
+      case $file in
+      -* | *.la | *.lo ) ;;
+      *)
+	# Do a test to see if this is really a libtool program.
+	if func_ltwrapper_script_p "$file"; then
+	  func_source "$file"
+	  # Transform arg to wrapped name.
+	  file=$progdir/$program
+	elif func_ltwrapper_executable_p "$file"; then
+	  func_ltwrapper_scriptname "$file"
+	  func_source "$func_ltwrapper_scriptname_result"
+	  # Transform arg to wrapped name.
+	  file=$progdir/$program
+	fi
+	;;
+      esac
+      # Quote arguments (to preserve shell metacharacters).
+      func_append_quoted args "$file"
+    done
+
+    if $opt_dry_run; then
+      # Display what would be done.
+      if test -n "$shlibpath_var"; then
+	eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\""
+	echo "export $shlibpath_var"
+      fi
+      $ECHO "$cmd$args"
+      exit $EXIT_SUCCESS
+    else
+      if test -n "$shlibpath_var"; then
+	# Export the shlibpath_var.
+	eval "export $shlibpath_var"
+      fi
+
+      # Restore saved environment variables
+      for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+      do
+	eval "if test \"\${save_$lt_var+set}\" = set; then
+                $lt_var=\$save_$lt_var; export $lt_var
+	      else
+		$lt_unset $lt_var
+	      fi"
+      done
+
+      # Now prepare to actually exec the command.
+      exec_cmd=\$cmd$args
+    fi
+}
+
+test execute = "$opt_mode" && func_mode_execute ${1+"$@"}
+
+
+# func_mode_finish arg...
+func_mode_finish ()
+{
+    $debug_cmd
+
+    libs=
+    libdirs=
+    admincmds=
+
+    for opt in "$nonopt" ${1+"$@"}
+    do
+      if test -d "$opt"; then
+	func_append libdirs " $opt"
+
+      elif test -f "$opt"; then
+	if func_lalib_unsafe_p "$opt"; then
+	  func_append libs " $opt"
+	else
+	  func_warning "'$opt' is not a valid libtool archive"
+	fi
+
+      else
+	func_fatal_error "invalid argument '$opt'"
+      fi
+    done
+
+    if test -n "$libs"; then
+      if test -n "$lt_sysroot"; then
+        sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"`
+        sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;"
+      else
+        sysroot_cmd=
+      fi
+
+      # Remove sysroot references
+      if $opt_dry_run; then
+        for lib in $libs; do
+          echo "removing references to $lt_sysroot and '=' prefixes from $lib"
+        done
+      else
+        tmpdir=`func_mktempdir`
+        for lib in $libs; do
+	  $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \
+	    > $tmpdir/tmp-la
+	  mv -f $tmpdir/tmp-la $lib
+	done
+        ${RM}r "$tmpdir"
+      fi
+    fi
+
+    if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+      for libdir in $libdirs; do
+	if test -n "$finish_cmds"; then
+	  # Do each command in the finish commands.
+	  func_execute_cmds "$finish_cmds" 'admincmds="$admincmds
+'"$cmd"'"'
+	fi
+	if test -n "$finish_eval"; then
+	  # Do the single finish_eval.
+	  eval cmds=\"$finish_eval\"
+	  $opt_dry_run || eval "$cmds" || func_append admincmds "
+       $cmds"
+	fi
+      done
+    fi
+
+    # Exit here if they wanted silent mode.
+    $opt_quiet && exit $EXIT_SUCCESS
+
+    if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+      echo "----------------------------------------------------------------------"
+      echo "Libraries have been installed in:"
+      for libdir in $libdirs; do
+	$ECHO "   $libdir"
+      done
+      echo
+      echo "If you ever happen to want to link against installed libraries"
+      echo "in a given directory, LIBDIR, you must either use libtool, and"
+      echo "specify the full pathname of the library, or use the '-LLIBDIR'"
+      echo "flag during linking and do at least one of the following:"
+      if test -n "$shlibpath_var"; then
+	echo "   - add LIBDIR to the '$shlibpath_var' environment variable"
+	echo "     during execution"
+      fi
+      if test -n "$runpath_var"; then
+	echo "   - add LIBDIR to the '$runpath_var' environment variable"
+	echo "     during linking"
+      fi
+      if test -n "$hardcode_libdir_flag_spec"; then
+	libdir=LIBDIR
+	eval flag=\"$hardcode_libdir_flag_spec\"
+
+	$ECHO "   - use the '$flag' linker flag"
+      fi
+      if test -n "$admincmds"; then
+	$ECHO "   - have your system administrator run these commands:$admincmds"
+      fi
+      if test -f /etc/ld.so.conf; then
+	echo "   - have your system administrator add LIBDIR to '/etc/ld.so.conf'"
+      fi
+      echo
+
+      echo "See any operating system documentation about shared libraries for"
+      case $host in
+	solaris2.[6789]|solaris2.1[0-9])
+	  echo "more information, such as the ld(1), crle(1) and ld.so(8) manual"
+	  echo "pages."
+	  ;;
+	*)
+	  echo "more information, such as the ld(1) and ld.so(8) manual pages."
+	  ;;
+      esac
+      echo "----------------------------------------------------------------------"
+    fi
+    exit $EXIT_SUCCESS
+}
+
+test finish = "$opt_mode" && func_mode_finish ${1+"$@"}
+
+
+# func_mode_install arg...
+func_mode_install ()
+{
+    $debug_cmd
+
+    # There may be an optional sh(1) argument at the beginning of
+    # install_prog (especially on Windows NT).
+    if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" ||
+       # Allow the use of GNU shtool's install command.
+       case $nonopt in *shtool*) :;; *) false;; esac
+    then
+      # Aesthetically quote it.
+      func_quote_for_eval "$nonopt"
+      install_prog="$func_quote_for_eval_result "
+      arg=$1
+      shift
+    else
+      install_prog=
+      arg=$nonopt
+    fi
+
+    # The real first argument should be the name of the installation program.
+    # Aesthetically quote it.
+    func_quote_for_eval "$arg"
+    func_append install_prog "$func_quote_for_eval_result"
+    install_shared_prog=$install_prog
+    case " $install_prog " in
+      *[\\\ /]cp\ *) install_cp=: ;;
+      *) install_cp=false ;;
+    esac
+
+    # We need to accept at least all the BSD install flags.
+    dest=
+    files=
+    opts=
+    prev=
+    install_type=
+    isdir=false
+    stripme=
+    no_mode=:
+    for arg
+    do
+      arg2=
+      if test -n "$dest"; then
+	func_append files " $dest"
+	dest=$arg
+	continue
+      fi
+
+      case $arg in
+      -d) isdir=: ;;
+      -f)
+	if $install_cp; then :; else
+	  prev=$arg
+	fi
+	;;
+      -g | -m | -o)
+	prev=$arg
+	;;
+      -s)
+	stripme=" -s"
+	continue
+	;;
+      -*)
+	;;
+      *)
+	# If the previous option needed an argument, then skip it.
+	if test -n "$prev"; then
+	  if test X-m = "X$prev" && test -n "$install_override_mode"; then
+	    arg2=$install_override_mode
+	    no_mode=false
+	  fi
+	  prev=
+	else
+	  dest=$arg
+	  continue
+	fi
+	;;
+      esac
+
+      # Aesthetically quote the argument.
+      func_quote_for_eval "$arg"
+      func_append install_prog " $func_quote_for_eval_result"
+      if test -n "$arg2"; then
+	func_quote_for_eval "$arg2"
+      fi
+      func_append install_shared_prog " $func_quote_for_eval_result"
+    done
+
+    test -z "$install_prog" && \
+      func_fatal_help "you must specify an install program"
+
+    test -n "$prev" && \
+      func_fatal_help "the '$prev' option requires an argument"
+
+    if test -n "$install_override_mode" && $no_mode; then
+      if $install_cp; then :; else
+	func_quote_for_eval "$install_override_mode"
+	func_append install_shared_prog " -m $func_quote_for_eval_result"
+      fi
+    fi
+
+    if test -z "$files"; then
+      if test -z "$dest"; then
+	func_fatal_help "no file or destination specified"
+      else
+	func_fatal_help "you must specify a destination"
+      fi
+    fi
+
+    # Strip any trailing slash from the destination.
+    func_stripname '' '/' "$dest"
+    dest=$func_stripname_result
+
+    # Check to see that the destination is a directory.
+    test -d "$dest" && isdir=:
+    if $isdir; then
+      destdir=$dest
+      destname=
+    else
+      func_dirname_and_basename "$dest" "" "."
+      destdir=$func_dirname_result
+      destname=$func_basename_result
+
+      # Not a directory, so check to see that there is only one file specified.
+      set dummy $files; shift
+      test "$#" -gt 1 && \
+	func_fatal_help "'$dest' is not a directory"
+    fi
+    case $destdir in
+    [\\/]* | [A-Za-z]:[\\/]*) ;;
+    *)
+      for file in $files; do
+	case $file in
+	*.lo) ;;
+	*)
+	  func_fatal_help "'$destdir' must be an absolute directory name"
+	  ;;
+	esac
+      done
+      ;;
+    esac
+
+    # This variable tells wrapper scripts just to set variables rather
+    # than running their programs.
+    libtool_install_magic=$magic
+
+    staticlibs=
+    future_libdirs=
+    current_libdirs=
+    for file in $files; do
+
+      # Do each installation.
+      case $file in
+      *.$libext)
+	# Do the static libraries later.
+	func_append staticlibs " $file"
+	;;
+
+      *.la)
+	func_resolve_sysroot "$file"
+	file=$func_resolve_sysroot_result
+
+	# Check to see that this really is a libtool archive.
+	func_lalib_unsafe_p "$file" \
+	  || func_fatal_help "'$file' is not a valid libtool archive"
+
+	library_names=
+	old_library=
+	relink_command=
+	func_source "$file"
+
+	# Add the libdir to current_libdirs if it is the destination.
+	if test "X$destdir" = "X$libdir"; then
+	  case "$current_libdirs " in
+	  *" $libdir "*) ;;
+	  *) func_append current_libdirs " $libdir" ;;
+	  esac
+	else
+	  # Note the libdir as a future libdir.
+	  case "$future_libdirs " in
+	  *" $libdir "*) ;;
+	  *) func_append future_libdirs " $libdir" ;;
+	  esac
+	fi
+
+	func_dirname "$file" "/" ""
+	dir=$func_dirname_result
+	func_append dir "$objdir"
+
+	if test -n "$relink_command"; then
+	  # Determine the prefix the user has applied to our future dir.
+	  inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"`
+
+	  # Don't allow the user to place us outside of our expected
+	  # location b/c this prevents finding dependent libraries that
+	  # are installed to the same prefix.
+	  # At present, this check doesn't affect windows .dll's that
+	  # are installed into $libdir/../bin (currently, that works fine)
+	  # but it's something to keep an eye on.
+	  test "$inst_prefix_dir" = "$destdir" && \
+	    func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir"
+
+	  if test -n "$inst_prefix_dir"; then
+	    # Stick the inst_prefix_dir data into the link command.
+	    relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
+	  else
+	    relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"`
+	  fi
+
+	  func_warning "relinking '$file'"
+	  func_show_eval "$relink_command" \
+	    'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"'
+	fi
+
+	# See the names of the shared library.
+	set dummy $library_names; shift
+	if test -n "$1"; then
+	  realname=$1
+	  shift
+
+	  srcname=$realname
+	  test -n "$relink_command" && srcname=${realname}T
+
+	  # Install the shared library and build the symlinks.
+	  func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \
+	      'exit $?'
+	  tstripme=$stripme
+	  case $host_os in
+	  cygwin* | mingw* | pw32* | cegcc*)
+	    case $realname in
+	    *.dll.a)
+	      tstripme=
+	      ;;
+	    esac
+	    ;;
+	  os2*)
+	    case $realname in
+	    *_dll.a)
+	      tstripme=
+	      ;;
+	    esac
+	    ;;
+	  esac
+	  if test -n "$tstripme" && test -n "$striplib"; then
+	    func_show_eval "$striplib $destdir/$realname" 'exit $?'
+	  fi
+
+	  if test "$#" -gt 0; then
+	    # Delete the old symlinks, and create new ones.
+	    # Try 'ln -sf' first, because the 'ln' binary might depend on
+	    # the symlink we replace!  Solaris /bin/ln does not understand -f,
+	    # so we also need to try rm && ln -s.
+	    for linkname
+	    do
+	      test "$linkname" != "$realname" \
+		&& func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })"
+	    done
+	  fi
+
+	  # Do each command in the postinstall commands.
+	  lib=$destdir/$realname
+	  func_execute_cmds "$postinstall_cmds" 'exit $?'
+	fi
+
+	# Install the pseudo-library for information purposes.
+	func_basename "$file"
+	name=$func_basename_result
+	instname=$dir/${name}i
+	func_show_eval "$install_prog $instname $destdir/$name" 'exit $?'
+
+	# Maybe install the static library, too.
+	test -n "$old_library" && func_append staticlibs " $dir/$old_library"
+	;;
+
+      *.lo)
+	# Install (i.e. copy) a libtool object.
+
+	# Figure out destination file name, if it wasn't already specified.
+	if test -n "$destname"; then
+	  destfile=$destdir/$destname
+	else
+	  func_basename "$file"
+	  destfile=$func_basename_result
+	  destfile=$destdir/$destfile
+	fi
+
+	# Deduce the name of the destination old-style object file.
+	case $destfile in
+	*.lo)
+	  func_lo2o "$destfile"
+	  staticdest=$func_lo2o_result
+	  ;;
+	*.$objext)
+	  staticdest=$destfile
+	  destfile=
+	  ;;
+	*)
+	  func_fatal_help "cannot copy a libtool object to '$destfile'"
+	  ;;
+	esac
+
+	# Install the libtool object if requested.
+	test -n "$destfile" && \
+	  func_show_eval "$install_prog $file $destfile" 'exit $?'
+
+	# Install the old object if enabled.
+	if test yes = "$build_old_libs"; then
+	  # Deduce the name of the old-style object file.
+	  func_lo2o "$file"
+	  staticobj=$func_lo2o_result
+	  func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?'
+	fi
+	exit $EXIT_SUCCESS
+	;;
+
+      *)
+	# Figure out destination file name, if it wasn't already specified.
+	if test -n "$destname"; then
+	  destfile=$destdir/$destname
+	else
+	  func_basename "$file"
+	  destfile=$func_basename_result
+	  destfile=$destdir/$destfile
+	fi
+
+	# If the file is missing, and there is a .exe on the end, strip it
+	# because it is most likely a libtool script we actually want to
+	# install
+	stripped_ext=
+	case $file in
+	  *.exe)
+	    if test ! -f "$file"; then
+	      func_stripname '' '.exe' "$file"
+	      file=$func_stripname_result
+	      stripped_ext=.exe
+	    fi
+	    ;;
+	esac
+
+	# Do a test to see if this is really a libtool program.
+	case $host in
+	*cygwin* | *mingw*)
+	    if func_ltwrapper_executable_p "$file"; then
+	      func_ltwrapper_scriptname "$file"
+	      wrapper=$func_ltwrapper_scriptname_result
+	    else
+	      func_stripname '' '.exe' "$file"
+	      wrapper=$func_stripname_result
+	    fi
+	    ;;
+	*)
+	    wrapper=$file
+	    ;;
+	esac
+	if func_ltwrapper_script_p "$wrapper"; then
+	  notinst_deplibs=
+	  relink_command=
+
+	  func_source "$wrapper"
+
+	  # Check the variables that should have been set.
+	  test -z "$generated_by_libtool_version" && \
+	    func_fatal_error "invalid libtool wrapper script '$wrapper'"
+
+	  finalize=:
+	  for lib in $notinst_deplibs; do
+	    # Check to see that each library is installed.
+	    libdir=
+	    if test -f "$lib"; then
+	      func_source "$lib"
+	    fi
+	    libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'`
+	    if test -n "$libdir" && test ! -f "$libfile"; then
+	      func_warning "'$lib' has not been installed in '$libdir'"
+	      finalize=false
+	    fi
+	  done
+
+	  relink_command=
+	  func_source "$wrapper"
+
+	  outputname=
+	  if test no = "$fast_install" && test -n "$relink_command"; then
+	    $opt_dry_run || {
+	      if $finalize; then
+	        tmpdir=`func_mktempdir`
+		func_basename "$file$stripped_ext"
+		file=$func_basename_result
+	        outputname=$tmpdir/$file
+	        # Replace the output file specification.
+	        relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'`
+
+	        $opt_quiet || {
+	          func_quote_for_expand "$relink_command"
+		  eval "func_echo $func_quote_for_expand_result"
+	        }
+	        if eval "$relink_command"; then :
+	          else
+		  func_error "error: relink '$file' with the above command before installing it"
+		  $opt_dry_run || ${RM}r "$tmpdir"
+		  continue
+	        fi
+	        file=$outputname
+	      else
+	        func_warning "cannot relink '$file'"
+	      fi
+	    }
+	  else
+	    # Install the binary that we compiled earlier.
+	    file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"`
+	  fi
+	fi
+
+	# remove .exe since cygwin /usr/bin/install will append another
+	# one anyway
+	case $install_prog,$host in
+	*/usr/bin/install*,*cygwin*)
+	  case $file:$destfile in
+	  *.exe:*.exe)
+	    # this is ok
+	    ;;
+	  *.exe:*)
+	    destfile=$destfile.exe
+	    ;;
+	  *:*.exe)
+	    func_stripname '' '.exe' "$destfile"
+	    destfile=$func_stripname_result
+	    ;;
+	  esac
+	  ;;
+	esac
+	func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?'
+	$opt_dry_run || if test -n "$outputname"; then
+	  ${RM}r "$tmpdir"
+	fi
+	;;
+      esac
+    done
+
+    for file in $staticlibs; do
+      func_basename "$file"
+      name=$func_basename_result
+
+      # Set up the ranlib parameters.
+      oldlib=$destdir/$name
+      func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+      tool_oldlib=$func_to_tool_file_result
+
+      func_show_eval "$install_prog \$file \$oldlib" 'exit $?'
+
+      if test -n "$stripme" && test -n "$old_striplib"; then
+	func_show_eval "$old_striplib $tool_oldlib" 'exit $?'
+      fi
+
+      # Do each command in the postinstall commands.
+      func_execute_cmds "$old_postinstall_cmds" 'exit $?'
+    done
+
+    test -n "$future_libdirs" && \
+      func_warning "remember to run '$progname --finish$future_libdirs'"
+
+    if test -n "$current_libdirs"; then
+      # Maybe just do a dry run.
+      $opt_dry_run && current_libdirs=" -n$current_libdirs"
+      exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs'
+    else
+      exit $EXIT_SUCCESS
+    fi
+}
+
+test install = "$opt_mode" && func_mode_install ${1+"$@"}
+
+
+# func_generate_dlsyms outputname originator pic_p
+# Extract symbols from dlprefiles and create ${outputname}S.o with
+# a dlpreopen symbol table.
+func_generate_dlsyms ()
+{
+    $debug_cmd
+
+    my_outputname=$1
+    my_originator=$2
+    my_pic_p=${3-false}
+    my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'`
+    my_dlsyms=
+
+    if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then
+      if test -n "$NM" && test -n "$global_symbol_pipe"; then
+	my_dlsyms=${my_outputname}S.c
+      else
+	func_error "not configured to extract global symbols from dlpreopened files"
+      fi
+    fi
+
+    if test -n "$my_dlsyms"; then
+      case $my_dlsyms in
+      "") ;;
+      *.c)
+	# Discover the nlist of each of the dlfiles.
+	nlist=$output_objdir/$my_outputname.nm
+
+	func_show_eval "$RM $nlist ${nlist}S ${nlist}T"
+
+	# Parse the name list into a source file.
+	func_verbose "creating $output_objdir/$my_dlsyms"
+
+	$opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\
+/* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */
+/* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */
+
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4))
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#endif
+
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
+#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE
+/* DATA imports from DLLs on WIN32 can't be const, because runtime
+   relocations are performed -- see ld's documentation on pseudo-relocs.  */
+# define LT_DLSYM_CONST
+#elif defined __osf__
+/* This system does not cope well with relocations in const data.  */
+# define LT_DLSYM_CONST
+#else
+# define LT_DLSYM_CONST const
+#endif
+
+#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0)
+
+/* External symbol declarations for the compiler. */\
+"
+
+	if test yes = "$dlself"; then
+	  func_verbose "generating symbol list for '$output'"
+
+	  $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist"
+
+	  # Add our own program objects to the symbol list.
+	  progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP`
+	  for progfile in $progfiles; do
+	    func_to_tool_file "$progfile" func_convert_file_msys_to_w32
+	    func_verbose "extracting global C symbols from '$func_to_tool_file_result'"
+	    $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'"
+	  done
+
+	  if test -n "$exclude_expsyms"; then
+	    $opt_dry_run || {
+	      eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
+	      eval '$MV "$nlist"T "$nlist"'
+	    }
+	  fi
+
+	  if test -n "$export_symbols_regex"; then
+	    $opt_dry_run || {
+	      eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
+	      eval '$MV "$nlist"T "$nlist"'
+	    }
+	  fi
+
+	  # Prepare the list of exported symbols
+	  if test -z "$export_symbols"; then
+	    export_symbols=$output_objdir/$outputname.exp
+	    $opt_dry_run || {
+	      $RM $export_symbols
+	      eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
+	      case $host in
+	      *cygwin* | *mingw* | *cegcc* )
+                eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+                eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"'
+	        ;;
+	      esac
+	    }
+	  else
+	    $opt_dry_run || {
+	      eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"'
+	      eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
+	      eval '$MV "$nlist"T "$nlist"'
+	      case $host in
+	        *cygwin* | *mingw* | *cegcc* )
+	          eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+	          eval 'cat "$nlist" >> "$output_objdir/$outputname.def"'
+	          ;;
+	      esac
+	    }
+	  fi
+	fi
+
+	for dlprefile in $dlprefiles; do
+	  func_verbose "extracting global C symbols from '$dlprefile'"
+	  func_basename "$dlprefile"
+	  name=$func_basename_result
+          case $host in
+	    *cygwin* | *mingw* | *cegcc* )
+	      # if an import library, we need to obtain dlname
+	      if func_win32_import_lib_p "$dlprefile"; then
+	        func_tr_sh "$dlprefile"
+	        eval "curr_lafile=\$libfile_$func_tr_sh_result"
+	        dlprefile_dlbasename=
+	        if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then
+	          # Use subshell, to avoid clobbering current variable values
+	          dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"`
+	          if test -n "$dlprefile_dlname"; then
+	            func_basename "$dlprefile_dlname"
+	            dlprefile_dlbasename=$func_basename_result
+	          else
+	            # no lafile. user explicitly requested -dlpreopen <import library>.
+	            $sharedlib_from_linklib_cmd "$dlprefile"
+	            dlprefile_dlbasename=$sharedlib_from_linklib_result
+	          fi
+	        fi
+	        $opt_dry_run || {
+	          if test -n "$dlprefile_dlbasename"; then
+	            eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"'
+	          else
+	            func_warning "Could not compute DLL name from $name"
+	            eval '$ECHO ": $name " >> "$nlist"'
+	          fi
+	          func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+	          eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe |
+	            $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'"
+	        }
+	      else # not an import lib
+	        $opt_dry_run || {
+	          eval '$ECHO ": $name " >> "$nlist"'
+	          func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+	          eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+	        }
+	      fi
+	    ;;
+	    *)
+	      $opt_dry_run || {
+	        eval '$ECHO ": $name " >> "$nlist"'
+	        func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+	        eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+	      }
+	    ;;
+          esac
+	done
+
+	$opt_dry_run || {
+	  # Make sure we have at least an empty file.
+	  test -f "$nlist" || : > "$nlist"
+
+	  if test -n "$exclude_expsyms"; then
+	    $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
+	    $MV "$nlist"T "$nlist"
+	  fi
+
+	  # Try sorting and uniquifying the output.
+	  if $GREP -v "^: " < "$nlist" |
+	      if sort -k 3 </dev/null >/dev/null 2>&1; then
+		sort -k 3
+	      else
+		sort +2
+	      fi |
+	      uniq > "$nlist"S; then
+	    :
+	  else
+	    $GREP -v "^: " < "$nlist" > "$nlist"S
+	  fi
+
+	  if test -f "$nlist"S; then
+	    eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"'
+	  else
+	    echo '/* NONE */' >> "$output_objdir/$my_dlsyms"
+	  fi
+
+	  func_show_eval '$RM "${nlist}I"'
+	  if test -n "$global_symbol_to_import"; then
+	    eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I'
+	  fi
+
+	  echo >> "$output_objdir/$my_dlsyms" "\
+
+/* The mapping between symbol names and symbols.  */
+typedef struct {
+  const char *name;
+  void *address;
+} lt_dlsymlist;
+extern LT_DLSYM_CONST lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[];\
+"
+
+	  if test -s "$nlist"I; then
+	    echo >> "$output_objdir/$my_dlsyms" "\
+static void lt_syminit(void)
+{
+  LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols;
+  for (; symbol->name; ++symbol)
+    {"
+	    $SED 's/.*/      if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms"
+	    echo >> "$output_objdir/$my_dlsyms" "\
+    }
+}"
+	  fi
+	  echo >> "$output_objdir/$my_dlsyms" "\
+LT_DLSYM_CONST lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[] =
+{ {\"$my_originator\", (void *) 0},"
+
+	  if test -s "$nlist"I; then
+	    echo >> "$output_objdir/$my_dlsyms" "\
+  {\"@INIT@\", (void *) &lt_syminit},"
+	  fi
+
+	  case $need_lib_prefix in
+	  no)
+	    eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms"
+	    ;;
+	  *)
+	    eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms"
+	    ;;
+	  esac
+	  echo >> "$output_objdir/$my_dlsyms" "\
+  {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+  return lt_${my_prefix}_LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif\
+"
+	} # !$opt_dry_run
+
+	pic_flag_for_symtable=
+	case "$compile_command " in
+	*" -static "*) ;;
+	*)
+	  case $host in
+	  # compiling the symbol table file with pic_flag works around
+	  # a FreeBSD bug that causes programs to crash when -lm is
+	  # linked before any other PIC object.  But we must not use
+	  # pic_flag when linking with -static.  The problem exists in
+	  # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
+	  *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
+	    pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;;
+	  *-*-hpux*)
+	    pic_flag_for_symtable=" $pic_flag"  ;;
+	  *)
+	    $my_pic_p && pic_flag_for_symtable=" $pic_flag"
+	    ;;
+	  esac
+	  ;;
+	esac
+	symtab_cflags=
+	for arg in $LTCFLAGS; do
+	  case $arg in
+	  -pie | -fpie | -fPIE) ;;
+	  *) func_append symtab_cflags " $arg" ;;
+	  esac
+	done
+
+	# Now compile the dynamic symbol file.
+	func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?'
+
+	# Clean up the generated files.
+	func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"'
+
+	# Transform the symbol file into the correct name.
+	symfileobj=$output_objdir/${my_outputname}S.$objext
+	case $host in
+	*cygwin* | *mingw* | *cegcc* )
+	  if test -f "$output_objdir/$my_outputname.def"; then
+	    compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+	    finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+	  else
+	    compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	    finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	  fi
+	  ;;
+	*)
+	  compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	  finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	  ;;
+	esac
+	;;
+      *)
+	func_fatal_error "unknown suffix for '$my_dlsyms'"
+	;;
+      esac
+    else
+      # We keep going just in case the user didn't refer to
+      # lt_preloaded_symbols.  The linker will fail if global_symbol_pipe
+      # really was required.
+
+      # Nullify the symbol file.
+      compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"`
+      finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"`
+    fi
+}
+
+# func_cygming_gnu_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is a GNU/binutils-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_gnu_implib_p ()
+{
+  $debug_cmd
+
+  func_to_tool_file "$1" func_convert_file_msys_to_w32
+  func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'`
+  test -n "$func_cygming_gnu_implib_tmp"
+}
+
+# func_cygming_ms_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is an MS-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_ms_implib_p ()
+{
+  $debug_cmd
+
+  func_to_tool_file "$1" func_convert_file_msys_to_w32
+  func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'`
+  test -n "$func_cygming_ms_implib_tmp"
+}
+
+# func_win32_libid arg
+# return the library type of file 'arg'
+#
+# Need a lot of goo to handle *both* DLLs and import libs
+# Has to be a shell function in order to 'eat' the argument
+# that is supplied when $file_magic_command is called.
+# Despite the name, also deal with 64 bit binaries.
+func_win32_libid ()
+{
+  $debug_cmd
+
+  win32_libid_type=unknown
+  win32_fileres=`file -L $1 2>/dev/null`
+  case $win32_fileres in
+  *ar\ archive\ import\ library*) # definitely import
+    win32_libid_type="x86 archive import"
+    ;;
+  *ar\ archive*) # could be an import, or static
+    # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD.
+    if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null |
+       $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then
+      case $nm_interface in
+      "MS dumpbin")
+	if func_cygming_ms_implib_p "$1" ||
+	   func_cygming_gnu_implib_p "$1"
+	then
+	  win32_nmres=import
+	else
+	  win32_nmres=
+	fi
+	;;
+      *)
+	func_to_tool_file "$1" func_convert_file_msys_to_w32
+	win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" |
+	  $SED -n -e '
+	    1,100{
+		/ I /{
+		    s|.*|import|
+		    p
+		    q
+		}
+	    }'`
+	;;
+      esac
+      case $win32_nmres in
+      import*)  win32_libid_type="x86 archive import";;
+      *)        win32_libid_type="x86 archive static";;
+      esac
+    fi
+    ;;
+  *DLL*)
+    win32_libid_type="x86 DLL"
+    ;;
+  *executable*) # but shell scripts are "executable" too...
+    case $win32_fileres in
+    *MS\ Windows\ PE\ Intel*)
+      win32_libid_type="x86 DLL"
+      ;;
+    esac
+    ;;
+  esac
+  $ECHO "$win32_libid_type"
+}
+
+# func_cygming_dll_for_implib ARG
+#
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+# Invoked by eval'ing the libtool variable
+#    $sharedlib_from_linklib_cmd
+# Result is available in the variable
+#    $sharedlib_from_linklib_result
+func_cygming_dll_for_implib ()
+{
+  $debug_cmd
+
+  sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"`
+}
+
+# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs
+#
+# The is the core of a fallback implementation of a
+# platform-specific function to extract the name of the
+# DLL associated with the specified import library LIBNAME.
+#
+# SECTION_NAME is either .idata$6 or .idata$7, depending
+# on the platform and compiler that created the implib.
+#
+# Echos the name of the DLL associated with the
+# specified import library.
+func_cygming_dll_for_implib_fallback_core ()
+{
+  $debug_cmd
+
+  match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"`
+  $OBJDUMP -s --section "$1" "$2" 2>/dev/null |
+    $SED '/^Contents of section '"$match_literal"':/{
+      # Place marker at beginning of archive member dllname section
+      s/.*/====MARK====/
+      p
+      d
+    }
+    # These lines can sometimes be longer than 43 characters, but
+    # are always uninteresting
+    /:[	 ]*file format pe[i]\{,1\}-/d
+    /^In archive [^:]*:/d
+    # Ensure marker is printed
+    /^====MARK====/p
+    # Remove all lines with less than 43 characters
+    /^.\{43\}/!d
+    # From remaining lines, remove first 43 characters
+    s/^.\{43\}//' |
+    $SED -n '
+      # Join marker and all lines until next marker into a single line
+      /^====MARK====/ b para
+      H
+      $ b para
+      b
+      :para
+      x
+      s/\n//g
+      # Remove the marker
+      s/^====MARK====//
+      # Remove trailing dots and whitespace
+      s/[\. \t]*$//
+      # Print
+      /./p' |
+    # we now have a list, one entry per line, of the stringified
+    # contents of the appropriate section of all members of the
+    # archive that possess that section. Heuristic: eliminate
+    # all those that have a first or second character that is
+    # a '.' (that is, objdump's representation of an unprintable
+    # character.) This should work for all archives with less than
+    # 0x302f exports -- but will fail for DLLs whose name actually
+    # begins with a literal '.' or a single character followed by
+    # a '.'.
+    #
+    # Of those that remain, print the first one.
+    $SED -e '/^\./d;/^.\./d;q'
+}
+
+# func_cygming_dll_for_implib_fallback ARG
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+#
+# This fallback implementation is for use when $DLLTOOL
+# does not support the --identify-strict option.
+# Invoked by eval'ing the libtool variable
+#    $sharedlib_from_linklib_cmd
+# Result is available in the variable
+#    $sharedlib_from_linklib_result
+func_cygming_dll_for_implib_fallback ()
+{
+  $debug_cmd
+
+  if func_cygming_gnu_implib_p "$1"; then
+    # binutils import library
+    sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"`
+  elif func_cygming_ms_implib_p "$1"; then
+    # ms-generated import library
+    sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"`
+  else
+    # unknown
+    sharedlib_from_linklib_result=
+  fi
+}
+
+
+# func_extract_an_archive dir oldlib
+func_extract_an_archive ()
+{
+    $debug_cmd
+
+    f_ex_an_ar_dir=$1; shift
+    f_ex_an_ar_oldlib=$1
+    if test yes = "$lock_old_archive_extraction"; then
+      lockfile=$f_ex_an_ar_oldlib.lock
+      until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+	func_echo "Waiting for $lockfile to be removed"
+	sleep 2
+      done
+    fi
+    func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \
+		   'stat=$?; rm -f "$lockfile"; exit $stat'
+    if test yes = "$lock_old_archive_extraction"; then
+      $opt_dry_run || rm -f "$lockfile"
+    fi
+    if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then
+     :
+    else
+      func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib"
+    fi
+}
+
+
+# func_extract_archives gentop oldlib ...
+func_extract_archives ()
+{
+    $debug_cmd
+
+    my_gentop=$1; shift
+    my_oldlibs=${1+"$@"}
+    my_oldobjs=
+    my_xlib=
+    my_xabs=
+    my_xdir=
+
+    for my_xlib in $my_oldlibs; do
+      # Extract the objects.
+      case $my_xlib in
+	[\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;;
+	*) my_xabs=`pwd`"/$my_xlib" ;;
+      esac
+      func_basename "$my_xlib"
+      my_xlib=$func_basename_result
+      my_xlib_u=$my_xlib
+      while :; do
+        case " $extracted_archives " in
+	*" $my_xlib_u "*)
+	  func_arith $extracted_serial + 1
+	  extracted_serial=$func_arith_result
+	  my_xlib_u=lt$extracted_serial-$my_xlib ;;
+	*) break ;;
+	esac
+      done
+      extracted_archives="$extracted_archives $my_xlib_u"
+      my_xdir=$my_gentop/$my_xlib_u
+
+      func_mkdir_p "$my_xdir"
+
+      case $host in
+      *-darwin*)
+	func_verbose "Extracting $my_xabs"
+	# Do not bother doing anything if just a dry run
+	$opt_dry_run || {
+	  darwin_orig_dir=`pwd`
+	  cd $my_xdir || exit $?
+	  darwin_archive=$my_xabs
+	  darwin_curdir=`pwd`
+	  func_basename "$darwin_archive"
+	  darwin_base_archive=$func_basename_result
+	  darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true`
+	  if test -n "$darwin_arches"; then
+	    darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'`
+	    darwin_arch=
+	    func_verbose "$darwin_base_archive has multiple architectures $darwin_arches"
+	    for darwin_arch in  $darwin_arches; do
+	      func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch"
+	      $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive"
+	      cd "unfat-$$/$darwin_base_archive-$darwin_arch"
+	      func_extract_an_archive "`pwd`" "$darwin_base_archive"
+	      cd "$darwin_curdir"
+	      $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive"
+	    done # $darwin_arches
+            ## Okay now we've a bunch of thin objects, gotta fatten them up :)
+	    darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u`
+	    darwin_file=
+	    darwin_files=
+	    for darwin_file in $darwin_filelist; do
+	      darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP`
+	      $LIPO -create -output "$darwin_file" $darwin_files
+	    done # $darwin_filelist
+	    $RM -rf unfat-$$
+	    cd "$darwin_orig_dir"
+	  else
+	    cd $darwin_orig_dir
+	    func_extract_an_archive "$my_xdir" "$my_xabs"
+	  fi # $darwin_arches
+	} # !$opt_dry_run
+	;;
+      *)
+        func_extract_an_archive "$my_xdir" "$my_xabs"
+	;;
+      esac
+      my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP`
+    done
+
+    func_extract_archives_result=$my_oldobjs
+}
+
+
+# func_emit_wrapper [arg=no]
+#
+# Emit a libtool wrapper script on stdout.
+# Don't directly open a file because we may want to
+# incorporate the script contents within a cygwin/mingw
+# wrapper executable.  Must ONLY be called from within
+# func_mode_link because it depends on a number of variables
+# set therein.
+#
+# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR
+# variable will take.  If 'yes', then the emitted script
+# will assume that the directory where it is stored is
+# the $objdir directory.  This is a cygwin/mingw-specific
+# behavior.
+func_emit_wrapper ()
+{
+	func_emit_wrapper_arg1=${1-no}
+
+	$ECHO "\
+#! $SHELL
+
+# $output - temporary wrapper script for $objdir/$outputname
+# Generated by $PROGRAM (GNU $PACKAGE) $VERSION
+#
+# The $output program cannot be directly executed until all the libtool
+# libraries that it depends on are installed.
+#
+# This wrapper script should never be moved out of the build directory.
+# If it is, it will not operate correctly.
+
+# Sed substitution that helps us do robust quoting.  It backslashifies
+# metacharacters that are still active within double-quoted strings.
+sed_quote_subst='$sed_quote_subst'
+
+# Be Bourne compatible
+if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then
+  emulate sh
+  NULLCMD=:
+  # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '\${1+\"\$@\"}'='\"\$@\"'
+  setopt NO_GLOB_SUBST
+else
+  case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+relink_command=\"$relink_command\"
+
+# This environment variable determines our operation mode.
+if test \"\$libtool_install_magic\" = \"$magic\"; then
+  # install mode needs the following variables:
+  generated_by_libtool_version='$macro_version'
+  notinst_deplibs='$notinst_deplibs'
+else
+  # When we are sourced in execute mode, \$file and \$ECHO are already set.
+  if test \"\$libtool_execute_magic\" != \"$magic\"; then
+    file=\"\$0\""
+
+    qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"`
+    $ECHO "\
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+\$1
+_LTECHO_EOF'
+}
+    ECHO=\"$qECHO\"
+  fi
+
+# Very basic option parsing. These options are (a) specific to
+# the libtool wrapper, (b) are identical between the wrapper
+# /script/ and the wrapper /executable/ that is used only on
+# windows platforms, and (c) all begin with the string "--lt-"
+# (application programs are unlikely to have options that match
+# this pattern).
+#
+# There are only two supported options: --lt-debug and
+# --lt-dump-script. There is, deliberately, no --lt-help.
+#
+# The first argument to this parsing function should be the
+# script's $0 value, followed by "$@".
+lt_option_debug=
+func_parse_lt_options ()
+{
+  lt_script_arg0=\$0
+  shift
+  for lt_opt
+  do
+    case \"\$lt_opt\" in
+    --lt-debug) lt_option_debug=1 ;;
+    --lt-dump-script)
+        lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\`
+        test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=.
+        lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\`
+        cat \"\$lt_dump_D/\$lt_dump_F\"
+        exit 0
+      ;;
+    --lt-*)
+        \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2
+        exit 1
+      ;;
+    esac
+  done
+
+  # Print the debug banner immediately:
+  if test -n \"\$lt_option_debug\"; then
+    echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2
+  fi
+}
+
+# Used when --lt-debug. Prints its arguments to stdout
+# (redirection is the responsibility of the caller)
+func_lt_dump_args ()
+{
+  lt_dump_args_N=1;
+  for lt_arg
+  do
+    \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\"
+    lt_dump_args_N=\`expr \$lt_dump_args_N + 1\`
+  done
+}
+
+# Core function for launching the target application
+func_exec_program_core ()
+{
+"
+  case $host in
+  # Backslashes separate directories on plain windows
+  *-*-mingw | *-*-os2* | *-cegcc*)
+    $ECHO "\
+      if test -n \"\$lt_option_debug\"; then
+        \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2
+        func_lt_dump_args \${1+\"\$@\"} 1>&2
+      fi
+      exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
+"
+    ;;
+
+  *)
+    $ECHO "\
+      if test -n \"\$lt_option_debug\"; then
+        \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2
+        func_lt_dump_args \${1+\"\$@\"} 1>&2
+      fi
+      exec \"\$progdir/\$program\" \${1+\"\$@\"}
+"
+    ;;
+  esac
+  $ECHO "\
+      \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2
+      exit 1
+}
+
+# A function to encapsulate launching the target application
+# Strips options in the --lt-* namespace from \$@ and
+# launches target application with the remaining arguments.
+func_exec_program ()
+{
+  case \" \$* \" in
+  *\\ --lt-*)
+    for lt_wr_arg
+    do
+      case \$lt_wr_arg in
+      --lt-*) ;;
+      *) set x \"\$@\" \"\$lt_wr_arg\"; shift;;
+      esac
+      shift
+    done ;;
+  esac
+  func_exec_program_core \${1+\"\$@\"}
+}
+
+  # Parse options
+  func_parse_lt_options \"\$0\" \${1+\"\$@\"}
+
+  # Find the directory that this script lives in.
+  thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\`
+  test \"x\$thisdir\" = \"x\$file\" && thisdir=.
+
+  # Follow symbolic links until we get to the real thisdir.
+  file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\`
+  while test -n \"\$file\"; do
+    destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\`
+
+    # If there was a directory component, then change thisdir.
+    if test \"x\$destdir\" != \"x\$file\"; then
+      case \"\$destdir\" in
+      [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
+      *) thisdir=\"\$thisdir/\$destdir\" ;;
+      esac
+    fi
+
+    file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\`
+    file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\`
+  done
+
+  # Usually 'no', except on cygwin/mingw when embedded into
+  # the cwrapper.
+  WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1
+  if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then
+    # special case for '.'
+    if test \"\$thisdir\" = \".\"; then
+      thisdir=\`pwd\`
+    fi
+    # remove .libs from thisdir
+    case \"\$thisdir\" in
+    *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;;
+    $objdir )   thisdir=. ;;
+    esac
+  fi
+
+  # Try to get the absolute directory name.
+  absdir=\`cd \"\$thisdir\" && pwd\`
+  test -n \"\$absdir\" && thisdir=\"\$absdir\"
+"
+
+	if test yes = "$fast_install"; then
+	  $ECHO "\
+  program=lt-'$outputname'$exeext
+  progdir=\"\$thisdir/$objdir\"
+
+  if test ! -f \"\$progdir/\$program\" ||
+     { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\
+       test \"X\$file\" != \"X\$progdir/\$program\"; }; then
+
+    file=\"\$\$-\$program\"
+
+    if test ! -d \"\$progdir\"; then
+      $MKDIR \"\$progdir\"
+    else
+      $RM \"\$progdir/\$file\"
+    fi"
+
+	  $ECHO "\
+
+    # relink executable if necessary
+    if test -n \"\$relink_command\"; then
+      if relink_command_output=\`eval \$relink_command 2>&1\`; then :
+      else
+	\$ECHO \"\$relink_command_output\" >&2
+	$RM \"\$progdir/\$file\"
+	exit 1
+      fi
+    fi
+
+    $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
+    { $RM \"\$progdir/\$program\";
+      $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; }
+    $RM \"\$progdir/\$file\"
+  fi"
+	else
+	  $ECHO "\
+  program='$outputname'
+  progdir=\"\$thisdir/$objdir\"
+"
+	fi
+
+	$ECHO "\
+
+  if test -f \"\$progdir/\$program\"; then"
+
+	# fixup the dll searchpath if we need to.
+	#
+	# Fix the DLL searchpath if we need to.  Do this before prepending
+	# to shlibpath, because on Windows, both are PATH and uninstalled
+	# libraries must come first.
+	if test -n "$dllsearchpath"; then
+	  $ECHO "\
+    # Add the dll search path components to the executable PATH
+    PATH=$dllsearchpath:\$PATH
+"
+	fi
+
+	# Export our shlibpath_var if we have one.
+	if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+	  $ECHO "\
+    # Add our own library path to $shlibpath_var
+    $shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
+
+    # Some systems cannot cope with colon-terminated $shlibpath_var
+    # The second colon is a workaround for a bug in BeOS R4 sed
+    $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\`
+
+    export $shlibpath_var
+"
+	fi
+
+	$ECHO "\
+    if test \"\$libtool_execute_magic\" != \"$magic\"; then
+      # Run the actual program with our arguments.
+      func_exec_program \${1+\"\$@\"}
+    fi
+  else
+    # The program doesn't exist.
+    \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2
+    \$ECHO \"This script is just a wrapper for \$program.\" 1>&2
+    \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2
+    exit 1
+  fi
+fi\
+"
+}
+
+
+# func_emit_cwrapperexe_src
+# emit the source code for a wrapper executable on stdout
+# Must ONLY be called from within func_mode_link because
+# it depends on a number of variable set therein.
+func_emit_cwrapperexe_src ()
+{
+	cat <<EOF
+
+/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
+   Generated by $PROGRAM (GNU $PACKAGE) $VERSION
+
+   The $output program cannot be directly executed until all the libtool
+   libraries that it depends on are installed.
+
+   This wrapper executable should never be moved out of the build directory.
+   If it is, it will not operate correctly.
+*/
+EOF
+	    cat <<"EOF"
+#ifdef _MSC_VER
+# define _CRT_SECURE_NO_DEPRECATE 1
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _MSC_VER
+# include <direct.h>
+# include <process.h>
+# include <io.h>
+#else
+# include <unistd.h>
+# include <stdint.h>
+# ifdef __CYGWIN__
+#  include <io.h>
+# endif
+#endif
+#include <malloc.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0)
+
+/* declarations of non-ANSI functions */
+#if defined __MINGW32__
+# ifdef __STRICT_ANSI__
+int _putenv (const char *);
+# endif
+#elif defined __CYGWIN__
+# ifdef __STRICT_ANSI__
+char *realpath (const char *, char *);
+int putenv (char *);
+int setenv (const char *, const char *, int);
+# endif
+/* #elif defined other_platform || defined ... */
+#endif
+
+/* portability defines, excluding path handling macros */
+#if defined _MSC_VER
+# define setmode _setmode
+# define stat    _stat
+# define chmod   _chmod
+# define getcwd  _getcwd
+# define putenv  _putenv
+# define S_IXUSR _S_IEXEC
+#elif defined __MINGW32__
+# define setmode _setmode
+# define stat    _stat
+# define chmod   _chmod
+# define getcwd  _getcwd
+# define putenv  _putenv
+#elif defined __CYGWIN__
+# define HAVE_SETENV
+# define FOPEN_WB "wb"
+/* #elif defined other platforms ... */
+#endif
+
+#if defined PATH_MAX
+# define LT_PATHMAX PATH_MAX
+#elif defined MAXPATHLEN
+# define LT_PATHMAX MAXPATHLEN
+#else
+# define LT_PATHMAX 1024
+#endif
+
+#ifndef S_IXOTH
+# define S_IXOTH 0
+#endif
+#ifndef S_IXGRP
+# define S_IXGRP 0
+#endif
+
+/* path handling portability macros */
+#ifndef DIR_SEPARATOR
+# define DIR_SEPARATOR '/'
+# define PATH_SEPARATOR ':'
+#endif
+
+#if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \
+  defined __OS2__
+# define HAVE_DOS_BASED_FILE_SYSTEM
+# define FOPEN_WB "wb"
+# ifndef DIR_SEPARATOR_2
+#  define DIR_SEPARATOR_2 '\\'
+# endif
+# ifndef PATH_SEPARATOR_2
+#  define PATH_SEPARATOR_2 ';'
+# endif
+#endif
+
+#ifndef DIR_SEPARATOR_2
+# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
+#else /* DIR_SEPARATOR_2 */
+# define IS_DIR_SEPARATOR(ch) \
+	(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
+#endif /* DIR_SEPARATOR_2 */
+
+#ifndef PATH_SEPARATOR_2
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR)
+#else /* PATH_SEPARATOR_2 */
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2)
+#endif /* PATH_SEPARATOR_2 */
+
+#ifndef FOPEN_WB
+# define FOPEN_WB "w"
+#endif
+#ifndef _O_BINARY
+# define _O_BINARY 0
+#endif
+
+#define XMALLOC(type, num)      ((type *) xmalloc ((num) * sizeof(type)))
+#define XFREE(stale) do { \
+  if (stale) { free (stale); stale = 0; } \
+} while (0)
+
+#if defined LT_DEBUGWRAPPER
+static int lt_debug = 1;
+#else
+static int lt_debug = 0;
+#endif
+
+const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */
+
+void *xmalloc (size_t num);
+char *xstrdup (const char *string);
+const char *base_name (const char *name);
+char *find_executable (const char *wrapper);
+char *chase_symlinks (const char *pathspec);
+int make_executable (const char *path);
+int check_executable (const char *path);
+char *strendzap (char *str, const char *pat);
+void lt_debugprintf (const char *file, int line, const char *fmt, ...);
+void lt_fatal (const char *file, int line, const char *message, ...);
+static const char *nonnull (const char *s);
+static const char *nonempty (const char *s);
+void lt_setenv (const char *name, const char *value);
+char *lt_extend_str (const char *orig_value, const char *add, int to_end);
+void lt_update_exe_path (const char *name, const char *value);
+void lt_update_lib_path (const char *name, const char *value);
+char **prepare_spawn (char **argv);
+void lt_dump_script (FILE *f);
+EOF
+
+	    cat <<EOF
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5)
+# define externally_visible volatile
+#else
+# define externally_visible __attribute__((externally_visible)) volatile
+#endif
+externally_visible const char * MAGIC_EXE = "$magic_exe";
+const char * LIB_PATH_VARNAME = "$shlibpath_var";
+EOF
+
+	    if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+              func_to_host_path "$temp_rpath"
+	      cat <<EOF
+const char * LIB_PATH_VALUE   = "$func_to_host_path_result";
+EOF
+	    else
+	      cat <<"EOF"
+const char * LIB_PATH_VALUE   = "";
+EOF
+	    fi
+
+	    if test -n "$dllsearchpath"; then
+              func_to_host_path "$dllsearchpath:"
+	      cat <<EOF
+const char * EXE_PATH_VARNAME = "PATH";
+const char * EXE_PATH_VALUE   = "$func_to_host_path_result";
+EOF
+	    else
+	      cat <<"EOF"
+const char * EXE_PATH_VARNAME = "";
+const char * EXE_PATH_VALUE   = "";
+EOF
+	    fi
+
+	    if test yes = "$fast_install"; then
+	      cat <<EOF
+const char * TARGET_PROGRAM_NAME = "lt-$outputname"; /* hopefully, no .exe */
+EOF
+	    else
+	      cat <<EOF
+const char * TARGET_PROGRAM_NAME = "$outputname"; /* hopefully, no .exe */
+EOF
+	    fi
+
+
+	    cat <<"EOF"
+
+#define LTWRAPPER_OPTION_PREFIX         "--lt-"
+
+static const char *ltwrapper_option_prefix = LTWRAPPER_OPTION_PREFIX;
+static const char *dumpscript_opt       = LTWRAPPER_OPTION_PREFIX "dump-script";
+static const char *debug_opt            = LTWRAPPER_OPTION_PREFIX "debug";
+
+int
+main (int argc, char *argv[])
+{
+  char **newargz;
+  int  newargc;
+  char *tmp_pathspec;
+  char *actual_cwrapper_path;
+  char *actual_cwrapper_name;
+  char *target_name;
+  char *lt_argv_zero;
+  int rval = 127;
+
+  int i;
+
+  program_name = (char *) xstrdup (base_name (argv[0]));
+  newargz = XMALLOC (char *, (size_t) argc + 1);
+
+  /* very simple arg parsing; don't want to rely on getopt
+   * also, copy all non cwrapper options to newargz, except
+   * argz[0], which is handled differently
+   */
+  newargc=0;
+  for (i = 1; i < argc; i++)
+    {
+      if (STREQ (argv[i], dumpscript_opt))
+	{
+EOF
+	    case $host in
+	      *mingw* | *cygwin* )
+		# make stdout use "unix" line endings
+		echo "          setmode(1,_O_BINARY);"
+		;;
+	      esac
+
+	    cat <<"EOF"
+	  lt_dump_script (stdout);
+	  return 0;
+	}
+      if (STREQ (argv[i], debug_opt))
+	{
+          lt_debug = 1;
+          continue;
+	}
+      if (STREQ (argv[i], ltwrapper_option_prefix))
+        {
+          /* however, if there is an option in the LTWRAPPER_OPTION_PREFIX
+             namespace, but it is not one of the ones we know about and
+             have already dealt with, above (inluding dump-script), then
+             report an error. Otherwise, targets might begin to believe
+             they are allowed to use options in the LTWRAPPER_OPTION_PREFIX
+             namespace. The first time any user complains about this, we'll
+             need to make LTWRAPPER_OPTION_PREFIX a configure-time option
+             or a configure.ac-settable value.
+           */
+          lt_fatal (__FILE__, __LINE__,
+		    "unrecognized %s option: '%s'",
+                    ltwrapper_option_prefix, argv[i]);
+        }
+      /* otherwise ... */
+      newargz[++newargc] = xstrdup (argv[i]);
+    }
+  newargz[++newargc] = NULL;
+
+EOF
+	    cat <<EOF
+  /* The GNU banner must be the first non-error debug message */
+  lt_debugprintf (__FILE__, __LINE__, "libtool wrapper (GNU $PACKAGE) $VERSION\n");
+EOF
+	    cat <<"EOF"
+  lt_debugprintf (__FILE__, __LINE__, "(main) argv[0]: %s\n", argv[0]);
+  lt_debugprintf (__FILE__, __LINE__, "(main) program_name: %s\n", program_name);
+
+  tmp_pathspec = find_executable (argv[0]);
+  if (tmp_pathspec == NULL)
+    lt_fatal (__FILE__, __LINE__, "couldn't find %s", argv[0]);
+  lt_debugprintf (__FILE__, __LINE__,
+                  "(main) found exe (before symlink chase) at: %s\n",
+		  tmp_pathspec);
+
+  actual_cwrapper_path = chase_symlinks (tmp_pathspec);
+  lt_debugprintf (__FILE__, __LINE__,
+                  "(main) found exe (after symlink chase) at: %s\n",
+		  actual_cwrapper_path);
+  XFREE (tmp_pathspec);
+
+  actual_cwrapper_name = xstrdup (base_name (actual_cwrapper_path));
+  strendzap (actual_cwrapper_path, actual_cwrapper_name);
+
+  /* wrapper name transforms */
+  strendzap (actual_cwrapper_name, ".exe");
+  tmp_pathspec = lt_extend_str (actual_cwrapper_name, ".exe", 1);
+  XFREE (actual_cwrapper_name);
+  actual_cwrapper_name = tmp_pathspec;
+  tmp_pathspec = 0;
+
+  /* target_name transforms -- use actual target program name; might have lt- prefix */
+  target_name = xstrdup (base_name (TARGET_PROGRAM_NAME));
+  strendzap (target_name, ".exe");
+  tmp_pathspec = lt_extend_str (target_name, ".exe", 1);
+  XFREE (target_name);
+  target_name = tmp_pathspec;
+  tmp_pathspec = 0;
+
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(main) libtool target name: %s\n",
+		  target_name);
+EOF
+
+	    cat <<EOF
+  newargz[0] =
+    XMALLOC (char, (strlen (actual_cwrapper_path) +
+		    strlen ("$objdir") + 1 + strlen (actual_cwrapper_name) + 1));
+  strcpy (newargz[0], actual_cwrapper_path);
+  strcat (newargz[0], "$objdir");
+  strcat (newargz[0], "/");
+EOF
+
+	    cat <<"EOF"
+  /* stop here, and copy so we don't have to do this twice */
+  tmp_pathspec = xstrdup (newargz[0]);
+
+  /* do NOT want the lt- prefix here, so use actual_cwrapper_name */
+  strcat (newargz[0], actual_cwrapper_name);
+
+  /* DO want the lt- prefix here if it exists, so use target_name */
+  lt_argv_zero = lt_extend_str (tmp_pathspec, target_name, 1);
+  XFREE (tmp_pathspec);
+  tmp_pathspec = NULL;
+EOF
+
+	    case $host_os in
+	      mingw*)
+	    cat <<"EOF"
+  {
+    char* p;
+    while ((p = strchr (newargz[0], '\\')) != NULL)
+      {
+	*p = '/';
+      }
+    while ((p = strchr (lt_argv_zero, '\\')) != NULL)
+      {
+	*p = '/';
+      }
+  }
+EOF
+	    ;;
+	    esac
+
+	    cat <<"EOF"
+  XFREE (target_name);
+  XFREE (actual_cwrapper_path);
+  XFREE (actual_cwrapper_name);
+
+  lt_setenv ("BIN_SH", "xpg4"); /* for Tru64 */
+  lt_setenv ("DUALCASE", "1");  /* for MSK sh */
+  /* Update the DLL searchpath.  EXE_PATH_VALUE ($dllsearchpath) must
+     be prepended before (that is, appear after) LIB_PATH_VALUE ($temp_rpath)
+     because on Windows, both *_VARNAMEs are PATH but uninstalled
+     libraries must come first. */
+  lt_update_exe_path (EXE_PATH_VARNAME, EXE_PATH_VALUE);
+  lt_update_lib_path (LIB_PATH_VARNAME, LIB_PATH_VALUE);
+
+  lt_debugprintf (__FILE__, __LINE__, "(main) lt_argv_zero: %s\n",
+		  nonnull (lt_argv_zero));
+  for (i = 0; i < newargc; i++)
+    {
+      lt_debugprintf (__FILE__, __LINE__, "(main) newargz[%d]: %s\n",
+		      i, nonnull (newargz[i]));
+    }
+
+EOF
+
+	    case $host_os in
+	      mingw*)
+		cat <<"EOF"
+  /* execv doesn't actually work on mingw as expected on unix */
+  newargz = prepare_spawn (newargz);
+  rval = (int) _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz);
+  if (rval == -1)
+    {
+      /* failed to start process */
+      lt_debugprintf (__FILE__, __LINE__,
+		      "(main) failed to launch target \"%s\": %s\n",
+		      lt_argv_zero, nonnull (strerror (errno)));
+      return 127;
+    }
+  return rval;
+EOF
+		;;
+	      *)
+		cat <<"EOF"
+  execv (lt_argv_zero, newargz);
+  return rval; /* =127, but avoids unused variable warning */
+EOF
+		;;
+	    esac
+
+	    cat <<"EOF"
+}
+
+void *
+xmalloc (size_t num)
+{
+  void *p = (void *) malloc (num);
+  if (!p)
+    lt_fatal (__FILE__, __LINE__, "memory exhausted");
+
+  return p;
+}
+
+char *
+xstrdup (const char *string)
+{
+  return string ? strcpy ((char *) xmalloc (strlen (string) + 1),
+			  string) : NULL;
+}
+
+const char *
+base_name (const char *name)
+{
+  const char *base;
+
+#if defined HAVE_DOS_BASED_FILE_SYSTEM
+  /* Skip over the disk name in MSDOS pathnames. */
+  if (isalpha ((unsigned char) name[0]) && name[1] == ':')
+    name += 2;
+#endif
+
+  for (base = name; *name; name++)
+    if (IS_DIR_SEPARATOR (*name))
+      base = name + 1;
+  return base;
+}
+
+int
+check_executable (const char *path)
+{
+  struct stat st;
+
+  lt_debugprintf (__FILE__, __LINE__, "(check_executable): %s\n",
+                  nonempty (path));
+  if ((!path) || (!*path))
+    return 0;
+
+  if ((stat (path, &st) >= 0)
+      && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)))
+    return 1;
+  else
+    return 0;
+}
+
+int
+make_executable (const char *path)
+{
+  int rval = 0;
+  struct stat st;
+
+  lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n",
+                  nonempty (path));
+  if ((!path) || (!*path))
+    return 0;
+
+  if (stat (path, &st) >= 0)
+    {
+      rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR);
+    }
+  return rval;
+}
+
+/* Searches for the full path of the wrapper.  Returns
+   newly allocated full path name if found, NULL otherwise
+   Does not chase symlinks, even on platforms that support them.
+*/
+char *
+find_executable (const char *wrapper)
+{
+  int has_slash = 0;
+  const char *p;
+  const char *p_next;
+  /* static buffer for getcwd */
+  char tmp[LT_PATHMAX + 1];
+  size_t tmp_len;
+  char *concat_name;
+
+  lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n",
+                  nonempty (wrapper));
+
+  if ((wrapper == NULL) || (*wrapper == '\0'))
+    return NULL;
+
+  /* Absolute path? */
+#if defined HAVE_DOS_BASED_FILE_SYSTEM
+  if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':')
+    {
+      concat_name = xstrdup (wrapper);
+      if (check_executable (concat_name))
+	return concat_name;
+      XFREE (concat_name);
+    }
+  else
+    {
+#endif
+      if (IS_DIR_SEPARATOR (wrapper[0]))
+	{
+	  concat_name = xstrdup (wrapper);
+	  if (check_executable (concat_name))
+	    return concat_name;
+	  XFREE (concat_name);
+	}
+#if defined HAVE_DOS_BASED_FILE_SYSTEM
+    }
+#endif
+
+  for (p = wrapper; *p; p++)
+    if (*p == '/')
+      {
+	has_slash = 1;
+	break;
+      }
+  if (!has_slash)
+    {
+      /* no slashes; search PATH */
+      const char *path = getenv ("PATH");
+      if (path != NULL)
+	{
+	  for (p = path; *p; p = p_next)
+	    {
+	      const char *q;
+	      size_t p_len;
+	      for (q = p; *q; q++)
+		if (IS_PATH_SEPARATOR (*q))
+		  break;
+	      p_len = (size_t) (q - p);
+	      p_next = (*q == '\0' ? q : q + 1);
+	      if (p_len == 0)
+		{
+		  /* empty path: current directory */
+		  if (getcwd (tmp, LT_PATHMAX) == NULL)
+		    lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+                              nonnull (strerror (errno)));
+		  tmp_len = strlen (tmp);
+		  concat_name =
+		    XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+		  memcpy (concat_name, tmp, tmp_len);
+		  concat_name[tmp_len] = '/';
+		  strcpy (concat_name + tmp_len + 1, wrapper);
+		}
+	      else
+		{
+		  concat_name =
+		    XMALLOC (char, p_len + 1 + strlen (wrapper) + 1);
+		  memcpy (concat_name, p, p_len);
+		  concat_name[p_len] = '/';
+		  strcpy (concat_name + p_len + 1, wrapper);
+		}
+	      if (check_executable (concat_name))
+		return concat_name;
+	      XFREE (concat_name);
+	    }
+	}
+      /* not found in PATH; assume curdir */
+    }
+  /* Relative path | not found in path: prepend cwd */
+  if (getcwd (tmp, LT_PATHMAX) == NULL)
+    lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+              nonnull (strerror (errno)));
+  tmp_len = strlen (tmp);
+  concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+  memcpy (concat_name, tmp, tmp_len);
+  concat_name[tmp_len] = '/';
+  strcpy (concat_name + tmp_len + 1, wrapper);
+
+  if (check_executable (concat_name))
+    return concat_name;
+  XFREE (concat_name);
+  return NULL;
+}
+
+char *
+chase_symlinks (const char *pathspec)
+{
+#ifndef S_ISLNK
+  return xstrdup (pathspec);
+#else
+  char buf[LT_PATHMAX];
+  struct stat s;
+  char *tmp_pathspec = xstrdup (pathspec);
+  char *p;
+  int has_symlinks = 0;
+  while (strlen (tmp_pathspec) && !has_symlinks)
+    {
+      lt_debugprintf (__FILE__, __LINE__,
+		      "checking path component for symlinks: %s\n",
+		      tmp_pathspec);
+      if (lstat (tmp_pathspec, &s) == 0)
+	{
+	  if (S_ISLNK (s.st_mode) != 0)
+	    {
+	      has_symlinks = 1;
+	      break;
+	    }
+
+	  /* search backwards for last DIR_SEPARATOR */
+	  p = tmp_pathspec + strlen (tmp_pathspec) - 1;
+	  while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+	    p--;
+	  if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+	    {
+	      /* no more DIR_SEPARATORS left */
+	      break;
+	    }
+	  *p = '\0';
+	}
+      else
+	{
+	  lt_fatal (__FILE__, __LINE__,
+		    "error accessing file \"%s\": %s",
+		    tmp_pathspec, nonnull (strerror (errno)));
+	}
+    }
+  XFREE (tmp_pathspec);
+
+  if (!has_symlinks)
+    {
+      return xstrdup (pathspec);
+    }
+
+  tmp_pathspec = realpath (pathspec, buf);
+  if (tmp_pathspec == 0)
+    {
+      lt_fatal (__FILE__, __LINE__,
+		"could not follow symlinks for %s", pathspec);
+    }
+  return xstrdup (tmp_pathspec);
+#endif
+}
+
+char *
+strendzap (char *str, const char *pat)
+{
+  size_t len, patlen;
+
+  assert (str != NULL);
+  assert (pat != NULL);
+
+  len = strlen (str);
+  patlen = strlen (pat);
+
+  if (patlen <= len)
+    {
+      str += len - patlen;
+      if (STREQ (str, pat))
+	*str = '\0';
+    }
+  return str;
+}
+
+void
+lt_debugprintf (const char *file, int line, const char *fmt, ...)
+{
+  va_list args;
+  if (lt_debug)
+    {
+      (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line);
+      va_start (args, fmt);
+      (void) vfprintf (stderr, fmt, args);
+      va_end (args);
+    }
+}
+
+static void
+lt_error_core (int exit_status, const char *file,
+	       int line, const char *mode,
+	       const char *message, va_list ap)
+{
+  fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode);
+  vfprintf (stderr, message, ap);
+  fprintf (stderr, ".\n");
+
+  if (exit_status >= 0)
+    exit (exit_status);
+}
+
+void
+lt_fatal (const char *file, int line, const char *message, ...)
+{
+  va_list ap;
+  va_start (ap, message);
+  lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap);
+  va_end (ap);
+}
+
+static const char *
+nonnull (const char *s)
+{
+  return s ? s : "(null)";
+}
+
+static const char *
+nonempty (const char *s)
+{
+  return (s && !*s) ? "(empty)" : nonnull (s);
+}
+
+void
+lt_setenv (const char *name, const char *value)
+{
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(lt_setenv) setting '%s' to '%s'\n",
+                  nonnull (name), nonnull (value));
+  {
+#ifdef HAVE_SETENV
+    /* always make a copy, for consistency with !HAVE_SETENV */
+    char *str = xstrdup (value);
+    setenv (name, str, 1);
+#else
+    size_t len = strlen (name) + 1 + strlen (value) + 1;
+    char *str = XMALLOC (char, len);
+    sprintf (str, "%s=%s", name, value);
+    if (putenv (str) != EXIT_SUCCESS)
+      {
+        XFREE (str);
+      }
+#endif
+  }
+}
+
+char *
+lt_extend_str (const char *orig_value, const char *add, int to_end)
+{
+  char *new_value;
+  if (orig_value && *orig_value)
+    {
+      size_t orig_value_len = strlen (orig_value);
+      size_t add_len = strlen (add);
+      new_value = XMALLOC (char, add_len + orig_value_len + 1);
+      if (to_end)
+        {
+          strcpy (new_value, orig_value);
+          strcpy (new_value + orig_value_len, add);
+        }
+      else
+        {
+          strcpy (new_value, add);
+          strcpy (new_value + add_len, orig_value);
+        }
+    }
+  else
+    {
+      new_value = xstrdup (add);
+    }
+  return new_value;
+}
+
+void
+lt_update_exe_path (const char *name, const char *value)
+{
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(lt_update_exe_path) modifying '%s' by prepending '%s'\n",
+                  nonnull (name), nonnull (value));
+
+  if (name && *name && value && *value)
+    {
+      char *new_value = lt_extend_str (getenv (name), value, 0);
+      /* some systems can't cope with a ':'-terminated path #' */
+      size_t len = strlen (new_value);
+      while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1]))
+        {
+          new_value[--len] = '\0';
+        }
+      lt_setenv (name, new_value);
+      XFREE (new_value);
+    }
+}
+
+void
+lt_update_lib_path (const char *name, const char *value)
+{
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(lt_update_lib_path) modifying '%s' by prepending '%s'\n",
+                  nonnull (name), nonnull (value));
+
+  if (name && *name && value && *value)
+    {
+      char *new_value = lt_extend_str (getenv (name), value, 0);
+      lt_setenv (name, new_value);
+      XFREE (new_value);
+    }
+}
+
+EOF
+	    case $host_os in
+	      mingw*)
+		cat <<"EOF"
+
+/* Prepares an argument vector before calling spawn().
+   Note that spawn() does not by itself call the command interpreter
+     (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") :
+      ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+         GetVersionEx(&v);
+         v.dwPlatformId == VER_PLATFORM_WIN32_NT;
+      }) ? "cmd.exe" : "command.com").
+   Instead it simply concatenates the arguments, separated by ' ', and calls
+   CreateProcess().  We must quote the arguments since Win32 CreateProcess()
+   interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a
+   special way:
+   - Space and tab are interpreted as delimiters. They are not treated as
+     delimiters if they are surrounded by double quotes: "...".
+   - Unescaped double quotes are removed from the input. Their only effect is
+     that within double quotes, space and tab are treated like normal
+     characters.
+   - Backslashes not followed by double quotes are not special.
+   - But 2*n+1 backslashes followed by a double quote become
+     n backslashes followed by a double quote (n >= 0):
+       \" -> "
+       \\\" -> \"
+       \\\\\" -> \\"
+ */
+#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+char **
+prepare_spawn (char **argv)
+{
+  size_t argc;
+  char **new_argv;
+  size_t i;
+
+  /* Count number of arguments.  */
+  for (argc = 0; argv[argc] != NULL; argc++)
+    ;
+
+  /* Allocate new argument vector.  */
+  new_argv = XMALLOC (char *, argc + 1);
+
+  /* Put quoted arguments into the new argument vector.  */
+  for (i = 0; i < argc; i++)
+    {
+      const char *string = argv[i];
+
+      if (string[0] == '\0')
+	new_argv[i] = xstrdup ("\"\"");
+      else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL)
+	{
+	  int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL);
+	  size_t length;
+	  unsigned int backslashes;
+	  const char *s;
+	  char *quoted_string;
+	  char *p;
+
+	  length = 0;
+	  backslashes = 0;
+	  if (quote_around)
+	    length++;
+	  for (s = string; *s != '\0'; s++)
+	    {
+	      char c = *s;
+	      if (c == '"')
+		length += backslashes + 1;
+	      length++;
+	      if (c == '\\')
+		backslashes++;
+	      else
+		backslashes = 0;
+	    }
+	  if (quote_around)
+	    length += backslashes + 1;
+
+	  quoted_string = XMALLOC (char, length + 1);
+
+	  p = quoted_string;
+	  backslashes = 0;
+	  if (quote_around)
+	    *p++ = '"';
+	  for (s = string; *s != '\0'; s++)
+	    {
+	      char c = *s;
+	      if (c == '"')
+		{
+		  unsigned int j;
+		  for (j = backslashes + 1; j > 0; j--)
+		    *p++ = '\\';
+		}
+	      *p++ = c;
+	      if (c == '\\')
+		backslashes++;
+	      else
+		backslashes = 0;
+	    }
+	  if (quote_around)
+	    {
+	      unsigned int j;
+	      for (j = backslashes; j > 0; j--)
+		*p++ = '\\';
+	      *p++ = '"';
+	    }
+	  *p = '\0';
+
+	  new_argv[i] = quoted_string;
+	}
+      else
+	new_argv[i] = (char *) string;
+    }
+  new_argv[argc] = NULL;
+
+  return new_argv;
+}
+EOF
+		;;
+	    esac
+
+            cat <<"EOF"
+void lt_dump_script (FILE* f)
+{
+EOF
+	    func_emit_wrapper yes |
+	      $SED -n -e '
+s/^\(.\{79\}\)\(..*\)/\1\
+\2/
+h
+s/\([\\"]\)/\\\1/g
+s/$/\\n/
+s/\([^\n]*\).*/  fputs ("\1", f);/p
+g
+D'
+            cat <<"EOF"
+}
+EOF
+}
+# end: func_emit_cwrapperexe_src
+
+# func_win32_import_lib_p ARG
+# True if ARG is an import lib, as indicated by $file_magic_cmd
+func_win32_import_lib_p ()
+{
+    $debug_cmd
+
+    case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in
+    *import*) : ;;
+    *) false ;;
+    esac
+}
+
+# func_suncc_cstd_abi
+# !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!!
+# Several compiler flags select an ABI that is incompatible with the
+# Cstd library. Avoid specifying it if any are in CXXFLAGS.
+func_suncc_cstd_abi ()
+{
+    $debug_cmd
+
+    case " $compile_command " in
+    *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*)
+      suncc_use_cstd_abi=no
+      ;;
+    *)
+      suncc_use_cstd_abi=yes
+      ;;
+    esac
+}
+
+# func_mode_link arg...
+func_mode_link ()
+{
+    $debug_cmd
+
+    case $host in
+    *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+      # It is impossible to link a dll without this setting, and
+      # we shouldn't force the makefile maintainer to figure out
+      # what system we are compiling for in order to pass an extra
+      # flag for every libtool invocation.
+      # allow_undefined=no
+
+      # FIXME: Unfortunately, there are problems with the above when trying
+      # to make a dll that has undefined symbols, in which case not
+      # even a static library is built.  For now, we need to specify
+      # -no-undefined on the libtool link line when we can be certain
+      # that all symbols are satisfied, otherwise we get a static library.
+      allow_undefined=yes
+      ;;
+    *)
+      allow_undefined=yes
+      ;;
+    esac
+    libtool_args=$nonopt
+    base_compile="$nonopt $@"
+    compile_command=$nonopt
+    finalize_command=$nonopt
+
+    compile_rpath=
+    finalize_rpath=
+    compile_shlibpath=
+    finalize_shlibpath=
+    convenience=
+    old_convenience=
+    deplibs=
+    old_deplibs=
+    compiler_flags=
+    linker_flags=
+    dllsearchpath=
+    lib_search_path=`pwd`
+    inst_prefix_dir=
+    new_inherited_linker_flags=
+
+    avoid_version=no
+    bindir=
+    dlfiles=
+    dlprefiles=
+    dlself=no
+    export_dynamic=no
+    export_symbols=
+    export_symbols_regex=
+    generated=
+    libobjs=
+    ltlibs=
+    module=no
+    no_install=no
+    objs=
+    os2dllname=
+    non_pic_objects=
+    precious_files_regex=
+    prefer_static_libs=no
+    preload=false
+    prev=
+    prevarg=
+    release=
+    rpath=
+    xrpath=
+    perm_rpath=
+    temp_rpath=
+    thread_safe=no
+    vinfo=
+    vinfo_number=no
+    weak_libs=
+    single_module=$wl-single_module
+    func_infer_tag $base_compile
+
+    # We need to know -static, to get the right output filenames.
+    for arg
+    do
+      case $arg in
+      -shared)
+	test yes != "$build_libtool_libs" \
+	  && func_fatal_configuration "cannot build a shared library"
+	build_old_libs=no
+	break
+	;;
+      -all-static | -static | -static-libtool-libs)
+	case $arg in
+	-all-static)
+	  if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then
+	    func_warning "complete static linking is impossible in this configuration"
+	  fi
+	  if test -n "$link_static_flag"; then
+	    dlopen_self=$dlopen_self_static
+	  fi
+	  prefer_static_libs=yes
+	  ;;
+	-static)
+	  if test -z "$pic_flag" && test -n "$link_static_flag"; then
+	    dlopen_self=$dlopen_self_static
+	  fi
+	  prefer_static_libs=built
+	  ;;
+	-static-libtool-libs)
+	  if test -z "$pic_flag" && test -n "$link_static_flag"; then
+	    dlopen_self=$dlopen_self_static
+	  fi
+	  prefer_static_libs=yes
+	  ;;
+	esac
+	build_libtool_libs=no
+	build_old_libs=yes
+	break
+	;;
+      esac
+    done
+
+    # See if our shared archives depend on static archives.
+    test -n "$old_archive_from_new_cmds" && build_old_libs=yes
+
+    # Go through the arguments, transforming them on the way.
+    while test "$#" -gt 0; do
+      arg=$1
+      shift
+      func_quote_for_eval "$arg"
+      qarg=$func_quote_for_eval_unquoted_result
+      func_append libtool_args " $func_quote_for_eval_result"
+
+      # If the previous option needs an argument, assign it.
+      if test -n "$prev"; then
+	case $prev in
+	output)
+	  func_append compile_command " @OUTPUT@"
+	  func_append finalize_command " @OUTPUT@"
+	  ;;
+	esac
+
+	case $prev in
+	bindir)
+	  bindir=$arg
+	  prev=
+	  continue
+	  ;;
+	dlfiles|dlprefiles)
+	  $preload || {
+	    # Add the symbol object into the linking commands.
+	    func_append compile_command " @SYMFILE@"
+	    func_append finalize_command " @SYMFILE@"
+	    preload=:
+	  }
+	  case $arg in
+	  *.la | *.lo) ;;  # We handle these cases below.
+	  force)
+	    if test no = "$dlself"; then
+	      dlself=needless
+	      export_dynamic=yes
+	    fi
+	    prev=
+	    continue
+	    ;;
+	  self)
+	    if test dlprefiles = "$prev"; then
+	      dlself=yes
+	    elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then
+	      dlself=yes
+	    else
+	      dlself=needless
+	      export_dynamic=yes
+	    fi
+	    prev=
+	    continue
+	    ;;
+	  *)
+	    if test dlfiles = "$prev"; then
+	      func_append dlfiles " $arg"
+	    else
+	      func_append dlprefiles " $arg"
+	    fi
+	    prev=
+	    continue
+	    ;;
+	  esac
+	  ;;
+	expsyms)
+	  export_symbols=$arg
+	  test -f "$arg" \
+	    || func_fatal_error "symbol file '$arg' does not exist"
+	  prev=
+	  continue
+	  ;;
+	expsyms_regex)
+	  export_symbols_regex=$arg
+	  prev=
+	  continue
+	  ;;
+	framework)
+	  case $host in
+	    *-*-darwin*)
+	      case "$deplibs " in
+		*" $qarg.ltframework "*) ;;
+		*) func_append deplibs " $qarg.ltframework" # this is fixed later
+		   ;;
+	      esac
+	      ;;
+	  esac
+	  prev=
+	  continue
+	  ;;
+	inst_prefix)
+	  inst_prefix_dir=$arg
+	  prev=
+	  continue
+	  ;;
+	mllvm)
+	  # Clang does not use LLVM to link, so we can simply discard any
+	  # '-mllvm $arg' options when doing the link step.
+	  prev=
+	  continue
+	  ;;
+	objectlist)
+	  if test -f "$arg"; then
+	    save_arg=$arg
+	    moreargs=
+	    for fil in `cat "$save_arg"`
+	    do
+#	      func_append moreargs " $fil"
+	      arg=$fil
+	      # A libtool-controlled object.
+
+	      # Check to see that this really is a libtool object.
+	      if func_lalib_unsafe_p "$arg"; then
+		pic_object=
+		non_pic_object=
+
+		# Read the .lo file
+		func_source "$arg"
+
+		if test -z "$pic_object" ||
+		   test -z "$non_pic_object" ||
+		   test none = "$pic_object" &&
+		   test none = "$non_pic_object"; then
+		  func_fatal_error "cannot find name of object for '$arg'"
+		fi
+
+		# Extract subdirectory from the argument.
+		func_dirname "$arg" "/" ""
+		xdir=$func_dirname_result
+
+		if test none != "$pic_object"; then
+		  # Prepend the subdirectory the object is found in.
+		  pic_object=$xdir$pic_object
+
+		  if test dlfiles = "$prev"; then
+		    if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then
+		      func_append dlfiles " $pic_object"
+		      prev=
+		      continue
+		    else
+		      # If libtool objects are unsupported, then we need to preload.
+		      prev=dlprefiles
+		    fi
+		  fi
+
+		  # CHECK ME:  I think I busted this.  -Ossama
+		  if test dlprefiles = "$prev"; then
+		    # Preload the old-style object.
+		    func_append dlprefiles " $pic_object"
+		    prev=
+		  fi
+
+		  # A PIC object.
+		  func_append libobjs " $pic_object"
+		  arg=$pic_object
+		fi
+
+		# Non-PIC object.
+		if test none != "$non_pic_object"; then
+		  # Prepend the subdirectory the object is found in.
+		  non_pic_object=$xdir$non_pic_object
+
+		  # A standard non-PIC object
+		  func_append non_pic_objects " $non_pic_object"
+		  if test -z "$pic_object" || test none = "$pic_object"; then
+		    arg=$non_pic_object
+		  fi
+		else
+		  # If the PIC object exists, use it instead.
+		  # $xdir was prepended to $pic_object above.
+		  non_pic_object=$pic_object
+		  func_append non_pic_objects " $non_pic_object"
+		fi
+	      else
+		# Only an error if not doing a dry-run.
+		if $opt_dry_run; then
+		  # Extract subdirectory from the argument.
+		  func_dirname "$arg" "/" ""
+		  xdir=$func_dirname_result
+
+		  func_lo2o "$arg"
+		  pic_object=$xdir$objdir/$func_lo2o_result
+		  non_pic_object=$xdir$func_lo2o_result
+		  func_append libobjs " $pic_object"
+		  func_append non_pic_objects " $non_pic_object"
+	        else
+		  func_fatal_error "'$arg' is not a valid libtool object"
+		fi
+	      fi
+	    done
+	  else
+	    func_fatal_error "link input file '$arg' does not exist"
+	  fi
+	  arg=$save_arg
+	  prev=
+	  continue
+	  ;;
+	os2dllname)
+	  os2dllname=$arg
+	  prev=
+	  continue
+	  ;;
+	precious_regex)
+	  precious_files_regex=$arg
+	  prev=
+	  continue
+	  ;;
+	release)
+	  release=-$arg
+	  prev=
+	  continue
+	  ;;
+	rpath | xrpath)
+	  # We need an absolute path.
+	  case $arg in
+	  [\\/]* | [A-Za-z]:[\\/]*) ;;
+	  *)
+	    func_fatal_error "only absolute run-paths are allowed"
+	    ;;
+	  esac
+	  if test rpath = "$prev"; then
+	    case "$rpath " in
+	    *" $arg "*) ;;
+	    *) func_append rpath " $arg" ;;
+	    esac
+	  else
+	    case "$xrpath " in
+	    *" $arg "*) ;;
+	    *) func_append xrpath " $arg" ;;
+	    esac
+	  fi
+	  prev=
+	  continue
+	  ;;
+	shrext)
+	  shrext_cmds=$arg
+	  prev=
+	  continue
+	  ;;
+	weak)
+	  func_append weak_libs " $arg"
+	  prev=
+	  continue
+	  ;;
+	xcclinker)
+	  func_append linker_flags " $qarg"
+	  func_append compiler_flags " $qarg"
+	  prev=
+	  func_append compile_command " $qarg"
+	  func_append finalize_command " $qarg"
+	  continue
+	  ;;
+	xcompiler)
+	  func_append compiler_flags " $qarg"
+	  prev=
+	  func_append compile_command " $qarg"
+	  func_append finalize_command " $qarg"
+	  continue
+	  ;;
+	xlinker)
+	  func_append linker_flags " $qarg"
+	  func_append compiler_flags " $wl$qarg"
+	  prev=
+	  func_append compile_command " $wl$qarg"
+	  func_append finalize_command " $wl$qarg"
+	  continue
+	  ;;
+	*)
+	  eval "$prev=\"\$arg\""
+	  prev=
+	  continue
+	  ;;
+	esac
+      fi # test -n "$prev"
+
+      prevarg=$arg
+
+      case $arg in
+      -all-static)
+	if test -n "$link_static_flag"; then
+	  # See comment for -static flag below, for more details.
+	  func_append compile_command " $link_static_flag"
+	  func_append finalize_command " $link_static_flag"
+	fi
+	continue
+	;;
+
+      -allow-undefined)
+	# FIXME: remove this flag sometime in the future.
+	func_fatal_error "'-allow-undefined' must not be used because it is the default"
+	;;
+
+      -avoid-version)
+	avoid_version=yes
+	continue
+	;;
+
+      -bindir)
+	prev=bindir
+	continue
+	;;
+
+      -dlopen)
+	prev=dlfiles
+	continue
+	;;
+
+      -dlpreopen)
+	prev=dlprefiles
+	continue
+	;;
+
+      -export-dynamic)
+	export_dynamic=yes
+	continue
+	;;
+
+      -export-symbols | -export-symbols-regex)
+	if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+	  func_fatal_error "more than one -exported-symbols argument is not allowed"
+	fi
+	if test X-export-symbols = "X$arg"; then
+	  prev=expsyms
+	else
+	  prev=expsyms_regex
+	fi
+	continue
+	;;
+
+      -framework)
+	prev=framework
+	continue
+	;;
+
+      -inst-prefix-dir)
+	prev=inst_prefix
+	continue
+	;;
+
+      # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
+      # so, if we see these flags be careful not to treat them like -L
+      -L[A-Z][A-Z]*:*)
+	case $with_gcc/$host in
+	no/*-*-irix* | /*-*-irix*)
+	  func_append compile_command " $arg"
+	  func_append finalize_command " $arg"
+	  ;;
+	esac
+	continue
+	;;
+
+      -L*)
+	func_stripname "-L" '' "$arg"
+	if test -z "$func_stripname_result"; then
+	  if test "$#" -gt 0; then
+	    func_fatal_error "require no space between '-L' and '$1'"
+	  else
+	    func_fatal_error "need path for '-L' option"
+	  fi
+	fi
+	func_resolve_sysroot "$func_stripname_result"
+	dir=$func_resolve_sysroot_result
+	# We need an absolute path.
+	case $dir in
+	[\\/]* | [A-Za-z]:[\\/]*) ;;
+	*)
+	  absdir=`cd "$dir" && pwd`
+	  test -z "$absdir" && \
+	    func_fatal_error "cannot determine absolute directory name of '$dir'"
+	  dir=$absdir
+	  ;;
+	esac
+	case "$deplibs " in
+	*" -L$dir "* | *" $arg "*)
+	  # Will only happen for absolute or sysroot arguments
+	  ;;
+	*)
+	  # Preserve sysroot, but never include relative directories
+	  case $dir in
+	    [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;;
+	    *) func_append deplibs " -L$dir" ;;
+	  esac
+	  func_append lib_search_path " $dir"
+	  ;;
+	esac
+	case $host in
+	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+	  testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'`
+	  case :$dllsearchpath: in
+	  *":$dir:"*) ;;
+	  ::) dllsearchpath=$dir;;
+	  *) func_append dllsearchpath ":$dir";;
+	  esac
+	  case :$dllsearchpath: in
+	  *":$testbindir:"*) ;;
+	  ::) dllsearchpath=$testbindir;;
+	  *) func_append dllsearchpath ":$testbindir";;
+	  esac
+	  ;;
+	esac
+	continue
+	;;
+
+      -l*)
+	if test X-lc = "X$arg" || test X-lm = "X$arg"; then
+	  case $host in
+	  *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*)
+	    # These systems don't actually have a C or math library (as such)
+	    continue
+	    ;;
+	  *-*-os2*)
+	    # These systems don't actually have a C library (as such)
+	    test X-lc = "X$arg" && continue
+	    ;;
+	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*)
+	    # Do not include libc due to us having libc/libc_r.
+	    test X-lc = "X$arg" && continue
+	    ;;
+	  *-*-rhapsody* | *-*-darwin1.[012])
+	    # Rhapsody C and math libraries are in the System framework
+	    func_append deplibs " System.ltframework"
+	    continue
+	    ;;
+	  *-*-sco3.2v5* | *-*-sco5v6*)
+	    # Causes problems with __ctype
+	    test X-lc = "X$arg" && continue
+	    ;;
+	  *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+	    # Compiler inserts libc in the correct place for threads to work
+	    test X-lc = "X$arg" && continue
+	    ;;
+	  esac
+	elif test X-lc_r = "X$arg"; then
+	 case $host in
+	 *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*)
+	   # Do not include libc_r directly, use -pthread flag.
+	   continue
+	   ;;
+	 esac
+	fi
+	func_append deplibs " $arg"
+	continue
+	;;
+
+      -mllvm)
+	prev=mllvm
+	continue
+	;;
+
+      -module)
+	module=yes
+	continue
+	;;
+
+      # Tru64 UNIX uses -model [arg] to determine the layout of C++
+      # classes, name mangling, and exception handling.
+      # Darwin uses the -arch flag to determine output architecture.
+      -model|-arch|-isysroot|--sysroot)
+	func_append compiler_flags " $arg"
+	func_append compile_command " $arg"
+	func_append finalize_command " $arg"
+	prev=xcompiler
+	continue
+	;;
+
+      -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+      |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
+	func_append compiler_flags " $arg"
+	func_append compile_command " $arg"
+	func_append finalize_command " $arg"
+	case "$new_inherited_linker_flags " in
+	    *" $arg "*) ;;
+	    * ) func_append new_inherited_linker_flags " $arg" ;;
+	esac
+	continue
+	;;
+
+      -multi_module)
+	single_module=$wl-multi_module
+	continue
+	;;
+
+      -no-fast-install)
+	fast_install=no
+	continue
+	;;
+
+      -no-install)
+	case $host in
+	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*)
+	  # The PATH hackery in wrapper scripts is required on Windows
+	  # and Darwin in order for the loader to find any dlls it needs.
+	  func_warning "'-no-install' is ignored for $host"
+	  func_warning "assuming '-no-fast-install' instead"
+	  fast_install=no
+	  ;;
+	*) no_install=yes ;;
+	esac
+	continue
+	;;
+
+      -no-undefined)
+	allow_undefined=no
+	continue
+	;;
+
+      -objectlist)
+	prev=objectlist
+	continue
+	;;
+
+      -os2dllname)
+	prev=os2dllname
+	continue
+	;;
+
+      -o) prev=output ;;
+
+      -precious-files-regex)
+	prev=precious_regex
+	continue
+	;;
+
+      -release)
+	prev=release
+	continue
+	;;
+
+      -rpath)
+	prev=rpath
+	continue
+	;;
+
+      -R)
+	prev=xrpath
+	continue
+	;;
+
+      -R*)
+	func_stripname '-R' '' "$arg"
+	dir=$func_stripname_result
+	# We need an absolute path.
+	case $dir in
+	[\\/]* | [A-Za-z]:[\\/]*) ;;
+	=*)
+	  func_stripname '=' '' "$dir"
+	  dir=$lt_sysroot$func_stripname_result
+	  ;;
+	*)
+	  func_fatal_error "only absolute run-paths are allowed"
+	  ;;
+	esac
+	case "$xrpath " in
+	*" $dir "*) ;;
+	*) func_append xrpath " $dir" ;;
+	esac
+	continue
+	;;
+
+      -shared)
+	# The effects of -shared are defined in a previous loop.
+	continue
+	;;
+
+      -shrext)
+	prev=shrext
+	continue
+	;;
+
+      -static | -static-libtool-libs)
+	# The effects of -static are defined in a previous loop.
+	# We used to do the same as -all-static on platforms that
+	# didn't have a PIC flag, but the assumption that the effects
+	# would be equivalent was wrong.  It would break on at least
+	# Digital Unix and AIX.
+	continue
+	;;
+
+      -thread-safe)
+	thread_safe=yes
+	continue
+	;;
+
+      -version-info)
+	prev=vinfo
+	continue
+	;;
+
+      -version-number)
+	prev=vinfo
+	vinfo_number=yes
+	continue
+	;;
+
+      -weak)
+        prev=weak
+	continue
+	;;
+
+      -Wc,*)
+	func_stripname '-Wc,' '' "$arg"
+	args=$func_stripname_result
+	arg=
+	save_ifs=$IFS; IFS=,
+	for flag in $args; do
+	  IFS=$save_ifs
+          func_quote_for_eval "$flag"
+	  func_append arg " $func_quote_for_eval_result"
+	  func_append compiler_flags " $func_quote_for_eval_result"
+	done
+	IFS=$save_ifs
+	func_stripname ' ' '' "$arg"
+	arg=$func_stripname_result
+	;;
+
+      -Wl,*)
+	func_stripname '-Wl,' '' "$arg"
+	args=$func_stripname_result
+	arg=
+	save_ifs=$IFS; IFS=,
+	for flag in $args; do
+	  IFS=$save_ifs
+          func_quote_for_eval "$flag"
+	  func_append arg " $wl$func_quote_for_eval_result"
+	  func_append compiler_flags " $wl$func_quote_for_eval_result"
+	  func_append linker_flags " $func_quote_for_eval_result"
+	done
+	IFS=$save_ifs
+	func_stripname ' ' '' "$arg"
+	arg=$func_stripname_result
+	;;
+
+      -Xcompiler)
+	prev=xcompiler
+	continue
+	;;
+
+      -Xlinker)
+	prev=xlinker
+	continue
+	;;
+
+      -XCClinker)
+	prev=xcclinker
+	continue
+	;;
+
+      # -msg_* for osf cc
+      -msg_*)
+	func_quote_for_eval "$arg"
+	arg=$func_quote_for_eval_result
+	;;
+
+      # Flags to be passed through unchanged, with rationale:
+      # -64, -mips[0-9]      enable 64-bit mode for the SGI compiler
+      # -r[0-9][0-9]*        specify processor for the SGI compiler
+      # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler
+      # +DA*, +DD*           enable 64-bit mode for the HP compiler
+      # -q*                  compiler args for the IBM compiler
+      # -m*, -t[45]*, -txscale* architecture-specific flags for GCC
+      # -F/path              path to uninstalled frameworks, gcc on darwin
+      # -p, -pg, --coverage, -fprofile-*  profiling flags for GCC
+      # -fstack-protector*   stack protector flags for GCC
+      # @file                GCC response files
+      # -tp=*                Portland pgcc target processor selection
+      # --sysroot=*          for sysroot support
+      # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization
+      # -stdlib=*            select c++ std lib with clang
+      -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \
+      -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \
+      -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*)
+        func_quote_for_eval "$arg"
+	arg=$func_quote_for_eval_result
+        func_append compile_command " $arg"
+        func_append finalize_command " $arg"
+        func_append compiler_flags " $arg"
+        continue
+        ;;
+
+      -Z*)
+        if test os2 = "`expr $host : '.*\(os2\)'`"; then
+          # OS/2 uses -Zxxx to specify OS/2-specific options
+	  compiler_flags="$compiler_flags $arg"
+	  func_append compile_command " $arg"
+	  func_append finalize_command " $arg"
+	  case $arg in
+	  -Zlinker | -Zstack)
+	    prev=xcompiler
+	    ;;
+	  esac
+	  continue
+        else
+	  # Otherwise treat like 'Some other compiler flag' below
+	  func_quote_for_eval "$arg"
+	  arg=$func_quote_for_eval_result
+        fi
+	;;
+
+      # Some other compiler flag.
+      -* | +*)
+        func_quote_for_eval "$arg"
+	arg=$func_quote_for_eval_result
+	;;
+
+      *.$objext)
+	# A standard object.
+	func_append objs " $arg"
+	;;
+
+      *.lo)
+	# A libtool-controlled object.
+
+	# Check to see that this really is a libtool object.
+	if func_lalib_unsafe_p "$arg"; then
+	  pic_object=
+	  non_pic_object=
+
+	  # Read the .lo file
+	  func_source "$arg"
+
+	  if test -z "$pic_object" ||
+	     test -z "$non_pic_object" ||
+	     test none = "$pic_object" &&
+	     test none = "$non_pic_object"; then
+	    func_fatal_error "cannot find name of object for '$arg'"
+	  fi
+
+	  # Extract subdirectory from the argument.
+	  func_dirname "$arg" "/" ""
+	  xdir=$func_dirname_result
+
+	  test none = "$pic_object" || {
+	    # Prepend the subdirectory the object is found in.
+	    pic_object=$xdir$pic_object
+
+	    if test dlfiles = "$prev"; then
+	      if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then
+		func_append dlfiles " $pic_object"
+		prev=
+		continue
+	      else
+		# If libtool objects are unsupported, then we need to preload.
+		prev=dlprefiles
+	      fi
+	    fi
+
+	    # CHECK ME:  I think I busted this.  -Ossama
+	    if test dlprefiles = "$prev"; then
+	      # Preload the old-style object.
+	      func_append dlprefiles " $pic_object"
+	      prev=
+	    fi
+
+	    # A PIC object.
+	    func_append libobjs " $pic_object"
+	    arg=$pic_object
+	  }
+
+	  # Non-PIC object.
+	  if test none != "$non_pic_object"; then
+	    # Prepend the subdirectory the object is found in.
+	    non_pic_object=$xdir$non_pic_object
+
+	    # A standard non-PIC object
+	    func_append non_pic_objects " $non_pic_object"
+	    if test -z "$pic_object" || test none = "$pic_object"; then
+	      arg=$non_pic_object
+	    fi
+	  else
+	    # If the PIC object exists, use it instead.
+	    # $xdir was prepended to $pic_object above.
+	    non_pic_object=$pic_object
+	    func_append non_pic_objects " $non_pic_object"
+	  fi
+	else
+	  # Only an error if not doing a dry-run.
+	  if $opt_dry_run; then
+	    # Extract subdirectory from the argument.
+	    func_dirname "$arg" "/" ""
+	    xdir=$func_dirname_result
+
+	    func_lo2o "$arg"
+	    pic_object=$xdir$objdir/$func_lo2o_result
+	    non_pic_object=$xdir$func_lo2o_result
+	    func_append libobjs " $pic_object"
+	    func_append non_pic_objects " $non_pic_object"
+	  else
+	    func_fatal_error "'$arg' is not a valid libtool object"
+	  fi
+	fi
+	;;
+
+      *.$libext)
+	# An archive.
+	func_append deplibs " $arg"
+	func_append old_deplibs " $arg"
+	continue
+	;;
+
+      *.la)
+	# A libtool-controlled library.
+
+	func_resolve_sysroot "$arg"
+	if test dlfiles = "$prev"; then
+	  # This library was specified with -dlopen.
+	  func_append dlfiles " $func_resolve_sysroot_result"
+	  prev=
+	elif test dlprefiles = "$prev"; then
+	  # The library was specified with -dlpreopen.
+	  func_append dlprefiles " $func_resolve_sysroot_result"
+	  prev=
+	else
+	  func_append deplibs " $func_resolve_sysroot_result"
+	fi
+	continue
+	;;
+
+      # Some other compiler argument.
+      *)
+	# Unknown arguments in both finalize_command and compile_command need
+	# to be aesthetically quoted because they are evaled later.
+	func_quote_for_eval "$arg"
+	arg=$func_quote_for_eval_result
+	;;
+      esac # arg
+
+      # Now actually substitute the argument into the commands.
+      if test -n "$arg"; then
+	func_append compile_command " $arg"
+	func_append finalize_command " $arg"
+      fi
+    done # argument parsing loop
+
+    test -n "$prev" && \
+      func_fatal_help "the '$prevarg' option requires an argument"
+
+    if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then
+      eval arg=\"$export_dynamic_flag_spec\"
+      func_append compile_command " $arg"
+      func_append finalize_command " $arg"
+    fi
+
+    oldlibs=
+    # calculate the name of the file, without its directory
+    func_basename "$output"
+    outputname=$func_basename_result
+    libobjs_save=$libobjs
+
+    if test -n "$shlibpath_var"; then
+      # get the directories listed in $shlibpath_var
+      eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\`
+    else
+      shlib_search_path=
+    fi
+    eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
+    eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
+
+    # Definition is injected by LT_CONFIG during libtool generation.
+    func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH"
+
+    func_dirname "$output" "/" ""
+    output_objdir=$func_dirname_result$objdir
+    func_to_tool_file "$output_objdir/"
+    tool_output_objdir=$func_to_tool_file_result
+    # Create the object directory.
+    func_mkdir_p "$output_objdir"
+
+    # Determine the type of output
+    case $output in
+    "")
+      func_fatal_help "you must specify an output file"
+      ;;
+    *.$libext) linkmode=oldlib ;;
+    *.lo | *.$objext) linkmode=obj ;;
+    *.la) linkmode=lib ;;
+    *) linkmode=prog ;; # Anything else should be a program.
+    esac
+
+    specialdeplibs=
+
+    libs=
+    # Find all interdependent deplibs by searching for libraries
+    # that are linked more than once (e.g. -la -lb -la)
+    for deplib in $deplibs; do
+      if $opt_preserve_dup_deps; then
+	case "$libs " in
+	*" $deplib "*) func_append specialdeplibs " $deplib" ;;
+	esac
+      fi
+      func_append libs " $deplib"
+    done
+
+    if test lib = "$linkmode"; then
+      libs="$predeps $libs $compiler_lib_search_path $postdeps"
+
+      # Compute libraries that are listed more than once in $predeps
+      # $postdeps and mark them as special (i.e., whose duplicates are
+      # not to be eliminated).
+      pre_post_deps=
+      if $opt_duplicate_compiler_generated_deps; then
+	for pre_post_dep in $predeps $postdeps; do
+	  case "$pre_post_deps " in
+	  *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;;
+	  esac
+	  func_append pre_post_deps " $pre_post_dep"
+	done
+      fi
+      pre_post_deps=
+    fi
+
+    deplibs=
+    newdependency_libs=
+    newlib_search_path=
+    need_relink=no # whether we're linking any uninstalled libtool libraries
+    notinst_deplibs= # not-installed libtool libraries
+    notinst_path= # paths that contain not-installed libtool libraries
+
+    case $linkmode in
+    lib)
+	passes="conv dlpreopen link"
+	for file in $dlfiles $dlprefiles; do
+	  case $file in
+	  *.la) ;;
+	  *)
+	    func_fatal_help "libraries can '-dlopen' only libtool libraries: $file"
+	    ;;
+	  esac
+	done
+	;;
+    prog)
+	compile_deplibs=
+	finalize_deplibs=
+	alldeplibs=false
+	newdlfiles=
+	newdlprefiles=
+	passes="conv scan dlopen dlpreopen link"
+	;;
+    *)  passes="conv"
+	;;
+    esac
+
+    for pass in $passes; do
+      # The preopen pass in lib mode reverses $deplibs; put it back here
+      # so that -L comes before libs that need it for instance...
+      if test lib,link = "$linkmode,$pass"; then
+	## FIXME: Find the place where the list is rebuilt in the wrong
+	##        order, and fix it there properly
+        tmp_deplibs=
+	for deplib in $deplibs; do
+	  tmp_deplibs="$deplib $tmp_deplibs"
+	done
+	deplibs=$tmp_deplibs
+      fi
+
+      if test lib,link = "$linkmode,$pass" ||
+	 test prog,scan = "$linkmode,$pass"; then
+	libs=$deplibs
+	deplibs=
+      fi
+      if test prog = "$linkmode"; then
+	case $pass in
+	dlopen) libs=$dlfiles ;;
+	dlpreopen) libs=$dlprefiles ;;
+	link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
+	esac
+      fi
+      if test lib,dlpreopen = "$linkmode,$pass"; then
+	# Collect and forward deplibs of preopened libtool libs
+	for lib in $dlprefiles; do
+	  # Ignore non-libtool-libs
+	  dependency_libs=
+	  func_resolve_sysroot "$lib"
+	  case $lib in
+	  *.la)	func_source "$func_resolve_sysroot_result" ;;
+	  esac
+
+	  # Collect preopened libtool deplibs, except any this library
+	  # has declared as weak libs
+	  for deplib in $dependency_libs; do
+	    func_basename "$deplib"
+            deplib_base=$func_basename_result
+	    case " $weak_libs " in
+	    *" $deplib_base "*) ;;
+	    *) func_append deplibs " $deplib" ;;
+	    esac
+	  done
+	done
+	libs=$dlprefiles
+      fi
+      if test dlopen = "$pass"; then
+	# Collect dlpreopened libraries
+	save_deplibs=$deplibs
+	deplibs=
+      fi
+
+      for deplib in $libs; do
+	lib=
+	found=false
+	case $deplib in
+	-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+        |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
+	  if test prog,link = "$linkmode,$pass"; then
+	    compile_deplibs="$deplib $compile_deplibs"
+	    finalize_deplibs="$deplib $finalize_deplibs"
+	  else
+	    func_append compiler_flags " $deplib"
+	    if test lib = "$linkmode"; then
+		case "$new_inherited_linker_flags " in
+		    *" $deplib "*) ;;
+		    * ) func_append new_inherited_linker_flags " $deplib" ;;
+		esac
+	    fi
+	  fi
+	  continue
+	  ;;
+	-l*)
+	  if test lib != "$linkmode" && test prog != "$linkmode"; then
+	    func_warning "'-l' is ignored for archives/objects"
+	    continue
+	  fi
+	  func_stripname '-l' '' "$deplib"
+	  name=$func_stripname_result
+	  if test lib = "$linkmode"; then
+	    searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path"
+	  else
+	    searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path"
+	  fi
+	  for searchdir in $searchdirs; do
+	    for search_ext in .la $std_shrext .so .a; do
+	      # Search the libtool library
+	      lib=$searchdir/lib$name$search_ext
+	      if test -f "$lib"; then
+		if test .la = "$search_ext"; then
+		  found=:
+		else
+		  found=false
+		fi
+		break 2
+	      fi
+	    done
+	  done
+	  if $found; then
+	    # deplib is a libtool library
+	    # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
+	    # We need to do some special things here, and not later.
+	    if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+	      case " $predeps $postdeps " in
+	      *" $deplib "*)
+		if func_lalib_p "$lib"; then
+		  library_names=
+		  old_library=
+		  func_source "$lib"
+		  for l in $old_library $library_names; do
+		    ll=$l
+		  done
+		  if test "X$ll" = "X$old_library"; then # only static version available
+		    found=false
+		    func_dirname "$lib" "" "."
+		    ladir=$func_dirname_result
+		    lib=$ladir/$old_library
+		    if test prog,link = "$linkmode,$pass"; then
+		      compile_deplibs="$deplib $compile_deplibs"
+		      finalize_deplibs="$deplib $finalize_deplibs"
+		    else
+		      deplibs="$deplib $deplibs"
+		      test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs"
+		    fi
+		    continue
+		  fi
+		fi
+		;;
+	      *) ;;
+	      esac
+	    fi
+	  else
+	    # deplib doesn't seem to be a libtool library
+	    if test prog,link = "$linkmode,$pass"; then
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    else
+	      deplibs="$deplib $deplibs"
+	      test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs"
+	    fi
+	    continue
+	  fi
+	  ;; # -l
+	*.ltframework)
+	  if test prog,link = "$linkmode,$pass"; then
+	    compile_deplibs="$deplib $compile_deplibs"
+	    finalize_deplibs="$deplib $finalize_deplibs"
+	  else
+	    deplibs="$deplib $deplibs"
+	    if test lib = "$linkmode"; then
+		case "$new_inherited_linker_flags " in
+		    *" $deplib "*) ;;
+		    * ) func_append new_inherited_linker_flags " $deplib" ;;
+		esac
+	    fi
+	  fi
+	  continue
+	  ;;
+	-L*)
+	  case $linkmode in
+	  lib)
+	    deplibs="$deplib $deplibs"
+	    test conv = "$pass" && continue
+	    newdependency_libs="$deplib $newdependency_libs"
+	    func_stripname '-L' '' "$deplib"
+	    func_resolve_sysroot "$func_stripname_result"
+	    func_append newlib_search_path " $func_resolve_sysroot_result"
+	    ;;
+	  prog)
+	    if test conv = "$pass"; then
+	      deplibs="$deplib $deplibs"
+	      continue
+	    fi
+	    if test scan = "$pass"; then
+	      deplibs="$deplib $deplibs"
+	    else
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    fi
+	    func_stripname '-L' '' "$deplib"
+	    func_resolve_sysroot "$func_stripname_result"
+	    func_append newlib_search_path " $func_resolve_sysroot_result"
+	    ;;
+	  *)
+	    func_warning "'-L' is ignored for archives/objects"
+	    ;;
+	  esac # linkmode
+	  continue
+	  ;; # -L
+	-R*)
+	  if test link = "$pass"; then
+	    func_stripname '-R' '' "$deplib"
+	    func_resolve_sysroot "$func_stripname_result"
+	    dir=$func_resolve_sysroot_result
+	    # Make sure the xrpath contains only unique directories.
+	    case "$xrpath " in
+	    *" $dir "*) ;;
+	    *) func_append xrpath " $dir" ;;
+	    esac
+	  fi
+	  deplibs="$deplib $deplibs"
+	  continue
+	  ;;
+	*.la)
+	  func_resolve_sysroot "$deplib"
+	  lib=$func_resolve_sysroot_result
+	  ;;
+	*.$libext)
+	  if test conv = "$pass"; then
+	    deplibs="$deplib $deplibs"
+	    continue
+	  fi
+	  case $linkmode in
+	  lib)
+	    # Linking convenience modules into shared libraries is allowed,
+	    # but linking other static libraries is non-portable.
+	    case " $dlpreconveniencelibs " in
+	    *" $deplib "*) ;;
+	    *)
+	      valid_a_lib=false
+	      case $deplibs_check_method in
+		match_pattern*)
+		  set dummy $deplibs_check_method; shift
+		  match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+		  if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \
+		    | $EGREP "$match_pattern_regex" > /dev/null; then
+		    valid_a_lib=:
+		  fi
+		;;
+		pass_all)
+		  valid_a_lib=:
+		;;
+	      esac
+	      if $valid_a_lib; then
+		echo
+		$ECHO "*** Warning: Linking the shared library $output against the"
+		$ECHO "*** static library $deplib is not portable!"
+		deplibs="$deplib $deplibs"
+	      else
+		echo
+		$ECHO "*** Warning: Trying to link with static lib archive $deplib."
+		echo "*** I have the capability to make that library automatically link in when"
+		echo "*** you link to this library.  But I can only do this if you have a"
+		echo "*** shared version of the library, which you do not appear to have"
+		echo "*** because the file extensions .$libext of this argument makes me believe"
+		echo "*** that it is just a static archive that I should not use here."
+	      fi
+	      ;;
+	    esac
+	    continue
+	    ;;
+	  prog)
+	    if test link != "$pass"; then
+	      deplibs="$deplib $deplibs"
+	    else
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    fi
+	    continue
+	    ;;
+	  esac # linkmode
+	  ;; # *.$libext
+	*.lo | *.$objext)
+	  if test conv = "$pass"; then
+	    deplibs="$deplib $deplibs"
+	  elif test prog = "$linkmode"; then
+	    if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then
+	      # If there is no dlopen support or we're linking statically,
+	      # we need to preload.
+	      func_append newdlprefiles " $deplib"
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    else
+	      func_append newdlfiles " $deplib"
+	    fi
+	  fi
+	  continue
+	  ;;
+	%DEPLIBS%)
+	  alldeplibs=:
+	  continue
+	  ;;
+	esac # case $deplib
+
+	$found || test -f "$lib" \
+	  || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'"
+
+	# Check to see that this really is a libtool archive.
+	func_lalib_unsafe_p "$lib" \
+	  || func_fatal_error "'$lib' is not a valid libtool archive"
+
+	func_dirname "$lib" "" "."
+	ladir=$func_dirname_result
+
+	dlname=
+	dlopen=
+	dlpreopen=
+	libdir=
+	library_names=
+	old_library=
+	inherited_linker_flags=
+	# If the library was installed with an old release of libtool,
+	# it will not redefine variables installed, or shouldnotlink
+	installed=yes
+	shouldnotlink=no
+	avoidtemprpath=
+
+
+	# Read the .la file
+	func_source "$lib"
+
+	# Convert "-framework foo" to "foo.ltframework"
+	if test -n "$inherited_linker_flags"; then
+	  tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'`
+	  for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do
+	    case " $new_inherited_linker_flags " in
+	      *" $tmp_inherited_linker_flag "*) ;;
+	      *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";;
+	    esac
+	  done
+	fi
+	dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	if test lib,link = "$linkmode,$pass" ||
+	   test prog,scan = "$linkmode,$pass" ||
+	   { test prog != "$linkmode" && test lib != "$linkmode"; }; then
+	  test -n "$dlopen" && func_append dlfiles " $dlopen"
+	  test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen"
+	fi
+
+	if test conv = "$pass"; then
+	  # Only check for convenience libraries
+	  deplibs="$lib $deplibs"
+	  if test -z "$libdir"; then
+	    if test -z "$old_library"; then
+	      func_fatal_error "cannot find name of link library for '$lib'"
+	    fi
+	    # It is a libtool convenience library, so add in its objects.
+	    func_append convenience " $ladir/$objdir/$old_library"
+	    func_append old_convenience " $ladir/$objdir/$old_library"
+	  elif test prog != "$linkmode" && test lib != "$linkmode"; then
+	    func_fatal_error "'$lib' is not a convenience library"
+	  fi
+	  tmp_libs=
+	  for deplib in $dependency_libs; do
+	    deplibs="$deplib $deplibs"
+	    if $opt_preserve_dup_deps; then
+	      case "$tmp_libs " in
+	      *" $deplib "*) func_append specialdeplibs " $deplib" ;;
+	      esac
+	    fi
+	    func_append tmp_libs " $deplib"
+	  done
+	  continue
+	fi # $pass = conv
+
+
+	# Get the name of the library we link against.
+	linklib=
+	if test -n "$old_library" &&
+	   { test yes = "$prefer_static_libs" ||
+	     test built,no = "$prefer_static_libs,$installed"; }; then
+	  linklib=$old_library
+	else
+	  for l in $old_library $library_names; do
+	    linklib=$l
+	  done
+	fi
+	if test -z "$linklib"; then
+	  func_fatal_error "cannot find name of link library for '$lib'"
+	fi
+
+	# This library was specified with -dlopen.
+	if test dlopen = "$pass"; then
+	  test -z "$libdir" \
+	    && func_fatal_error "cannot -dlopen a convenience library: '$lib'"
+	  if test -z "$dlname" ||
+	     test yes != "$dlopen_support" ||
+	     test no = "$build_libtool_libs"
+	  then
+	    # If there is no dlname, no dlopen support or we're linking
+	    # statically, we need to preload.  We also need to preload any
+	    # dependent libraries so libltdl's deplib preloader doesn't
+	    # bomb out in the load deplibs phase.
+	    func_append dlprefiles " $lib $dependency_libs"
+	  else
+	    func_append newdlfiles " $lib"
+	  fi
+	  continue
+	fi # $pass = dlopen
+
+	# We need an absolute path.
+	case $ladir in
+	[\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;;
+	*)
+	  abs_ladir=`cd "$ladir" && pwd`
+	  if test -z "$abs_ladir"; then
+	    func_warning "cannot determine absolute directory name of '$ladir'"
+	    func_warning "passing it literally to the linker, although it might fail"
+	    abs_ladir=$ladir
+	  fi
+	  ;;
+	esac
+	func_basename "$lib"
+	laname=$func_basename_result
+
+	# Find the relevant object directory and library name.
+	if test yes = "$installed"; then
+	  if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+	    func_warning "library '$lib' was moved."
+	    dir=$ladir
+	    absdir=$abs_ladir
+	    libdir=$abs_ladir
+	  else
+	    dir=$lt_sysroot$libdir
+	    absdir=$lt_sysroot$libdir
+	  fi
+	  test yes = "$hardcode_automatic" && avoidtemprpath=yes
+	else
+	  if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+	    dir=$ladir
+	    absdir=$abs_ladir
+	    # Remove this search path later
+	    func_append notinst_path " $abs_ladir"
+	  else
+	    dir=$ladir/$objdir
+	    absdir=$abs_ladir/$objdir
+	    # Remove this search path later
+	    func_append notinst_path " $abs_ladir"
+	  fi
+	fi # $installed = yes
+	func_stripname 'lib' '.la' "$laname"
+	name=$func_stripname_result
+
+	# This library was specified with -dlpreopen.
+	if test dlpreopen = "$pass"; then
+	  if test -z "$libdir" && test prog = "$linkmode"; then
+	    func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'"
+	  fi
+	  case $host in
+	    # special handling for platforms with PE-DLLs.
+	    *cygwin* | *mingw* | *cegcc* )
+	      # Linker will automatically link against shared library if both
+	      # static and shared are present.  Therefore, ensure we extract
+	      # symbols from the import library if a shared library is present
+	      # (otherwise, the dlopen module name will be incorrect).  We do
+	      # this by putting the import library name into $newdlprefiles.
+	      # We recover the dlopen module name by 'saving' the la file
+	      # name in a special purpose variable, and (later) extracting the
+	      # dlname from the la file.
+	      if test -n "$dlname"; then
+	        func_tr_sh "$dir/$linklib"
+	        eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname"
+	        func_append newdlprefiles " $dir/$linklib"
+	      else
+	        func_append newdlprefiles " $dir/$old_library"
+	        # Keep a list of preopened convenience libraries to check
+	        # that they are being used correctly in the link pass.
+	        test -z "$libdir" && \
+	          func_append dlpreconveniencelibs " $dir/$old_library"
+	      fi
+	    ;;
+	    * )
+	      # Prefer using a static library (so that no silly _DYNAMIC symbols
+	      # are required to link).
+	      if test -n "$old_library"; then
+	        func_append newdlprefiles " $dir/$old_library"
+	        # Keep a list of preopened convenience libraries to check
+	        # that they are being used correctly in the link pass.
+	        test -z "$libdir" && \
+	          func_append dlpreconveniencelibs " $dir/$old_library"
+	      # Otherwise, use the dlname, so that lt_dlopen finds it.
+	      elif test -n "$dlname"; then
+	        func_append newdlprefiles " $dir/$dlname"
+	      else
+	        func_append newdlprefiles " $dir/$linklib"
+	      fi
+	    ;;
+	  esac
+	fi # $pass = dlpreopen
+
+	if test -z "$libdir"; then
+	  # Link the convenience library
+	  if test lib = "$linkmode"; then
+	    deplibs="$dir/$old_library $deplibs"
+	  elif test prog,link = "$linkmode,$pass"; then
+	    compile_deplibs="$dir/$old_library $compile_deplibs"
+	    finalize_deplibs="$dir/$old_library $finalize_deplibs"
+	  else
+	    deplibs="$lib $deplibs" # used for prog,scan pass
+	  fi
+	  continue
+	fi
+
+
+	if test prog = "$linkmode" && test link != "$pass"; then
+	  func_append newlib_search_path " $ladir"
+	  deplibs="$lib $deplibs"
+
+	  linkalldeplibs=false
+	  if test no != "$link_all_deplibs" || test -z "$library_names" ||
+	     test no = "$build_libtool_libs"; then
+	    linkalldeplibs=:
+	  fi
+
+	  tmp_libs=
+	  for deplib in $dependency_libs; do
+	    case $deplib in
+	    -L*) func_stripname '-L' '' "$deplib"
+	         func_resolve_sysroot "$func_stripname_result"
+	         func_append newlib_search_path " $func_resolve_sysroot_result"
+		 ;;
+	    esac
+	    # Need to link against all dependency_libs?
+	    if $linkalldeplibs; then
+	      deplibs="$deplib $deplibs"
+	    else
+	      # Need to hardcode shared library paths
+	      # or/and link against static libraries
+	      newdependency_libs="$deplib $newdependency_libs"
+	    fi
+	    if $opt_preserve_dup_deps; then
+	      case "$tmp_libs " in
+	      *" $deplib "*) func_append specialdeplibs " $deplib" ;;
+	      esac
+	    fi
+	    func_append tmp_libs " $deplib"
+	  done # for deplib
+	  continue
+	fi # $linkmode = prog...
+
+	if test prog,link = "$linkmode,$pass"; then
+	  if test -n "$library_names" &&
+	     { { test no = "$prefer_static_libs" ||
+	         test built,yes = "$prefer_static_libs,$installed"; } ||
+	       test -z "$old_library"; }; then
+	    # We need to hardcode the library path
+	    if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then
+	      # Make sure the rpath contains only unique directories.
+	      case $temp_rpath: in
+	      *"$absdir:"*) ;;
+	      *) func_append temp_rpath "$absdir:" ;;
+	      esac
+	    fi
+
+	    # Hardcode the library path.
+	    # Skip directories that are in the system default run-time
+	    # search path.
+	    case " $sys_lib_dlsearch_path " in
+	    *" $absdir "*) ;;
+	    *)
+	      case "$compile_rpath " in
+	      *" $absdir "*) ;;
+	      *) func_append compile_rpath " $absdir" ;;
+	      esac
+	      ;;
+	    esac
+	    case " $sys_lib_dlsearch_path " in
+	    *" $libdir "*) ;;
+	    *)
+	      case "$finalize_rpath " in
+	      *" $libdir "*) ;;
+	      *) func_append finalize_rpath " $libdir" ;;
+	      esac
+	      ;;
+	    esac
+	  fi # $linkmode,$pass = prog,link...
+
+	  if $alldeplibs &&
+	     { test pass_all = "$deplibs_check_method" ||
+	       { test yes = "$build_libtool_libs" &&
+		 test -n "$library_names"; }; }; then
+	    # We only need to search for static libraries
+	    continue
+	  fi
+	fi
+
+	link_static=no # Whether the deplib will be linked statically
+	use_static_libs=$prefer_static_libs
+	if test built = "$use_static_libs" && test yes = "$installed"; then
+	  use_static_libs=no
+	fi
+	if test -n "$library_names" &&
+	   { test no = "$use_static_libs" || test -z "$old_library"; }; then
+	  case $host in
+	  *cygwin* | *mingw* | *cegcc* | *os2*)
+	      # No point in relinking DLLs because paths are not encoded
+	      func_append notinst_deplibs " $lib"
+	      need_relink=no
+	    ;;
+	  *)
+	    if test no = "$installed"; then
+	      func_append notinst_deplibs " $lib"
+	      need_relink=yes
+	    fi
+	    ;;
+	  esac
+	  # This is a shared library
+
+	  # Warn about portability, can't link against -module's on some
+	  # systems (darwin).  Don't bleat about dlopened modules though!
+	  dlopenmodule=
+	  for dlpremoduletest in $dlprefiles; do
+	    if test "X$dlpremoduletest" = "X$lib"; then
+	      dlopenmodule=$dlpremoduletest
+	      break
+	    fi
+	  done
+	  if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then
+	    echo
+	    if test prog = "$linkmode"; then
+	      $ECHO "*** Warning: Linking the executable $output against the loadable module"
+	    else
+	      $ECHO "*** Warning: Linking the shared library $output against the loadable module"
+	    fi
+	    $ECHO "*** $linklib is not portable!"
+	  fi
+	  if test lib = "$linkmode" &&
+	     test yes = "$hardcode_into_libs"; then
+	    # Hardcode the library path.
+	    # Skip directories that are in the system default run-time
+	    # search path.
+	    case " $sys_lib_dlsearch_path " in
+	    *" $absdir "*) ;;
+	    *)
+	      case "$compile_rpath " in
+	      *" $absdir "*) ;;
+	      *) func_append compile_rpath " $absdir" ;;
+	      esac
+	      ;;
+	    esac
+	    case " $sys_lib_dlsearch_path " in
+	    *" $libdir "*) ;;
+	    *)
+	      case "$finalize_rpath " in
+	      *" $libdir "*) ;;
+	      *) func_append finalize_rpath " $libdir" ;;
+	      esac
+	      ;;
+	    esac
+	  fi
+
+	  if test -n "$old_archive_from_expsyms_cmds"; then
+	    # figure out the soname
+	    set dummy $library_names
+	    shift
+	    realname=$1
+	    shift
+	    libname=`eval "\\$ECHO \"$libname_spec\""`
+	    # use dlname if we got it. it's perfectly good, no?
+	    if test -n "$dlname"; then
+	      soname=$dlname
+	    elif test -n "$soname_spec"; then
+	      # bleh windows
+	      case $host in
+	      *cygwin* | mingw* | *cegcc* | *os2*)
+	        func_arith $current - $age
+		major=$func_arith_result
+		versuffix=-$major
+		;;
+	      esac
+	      eval soname=\"$soname_spec\"
+	    else
+	      soname=$realname
+	    fi
+
+	    # Make a new name for the extract_expsyms_cmds to use
+	    soroot=$soname
+	    func_basename "$soroot"
+	    soname=$func_basename_result
+	    func_stripname 'lib' '.dll' "$soname"
+	    newlib=libimp-$func_stripname_result.a
+
+	    # If the library has no export list, then create one now
+	    if test -f "$output_objdir/$soname-def"; then :
+	    else
+	      func_verbose "extracting exported symbol list from '$soname'"
+	      func_execute_cmds "$extract_expsyms_cmds" 'exit $?'
+	    fi
+
+	    # Create $newlib
+	    if test -f "$output_objdir/$newlib"; then :; else
+	      func_verbose "generating import library for '$soname'"
+	      func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?'
+	    fi
+	    # make sure the library variables are pointing to the new library
+	    dir=$output_objdir
+	    linklib=$newlib
+	  fi # test -n "$old_archive_from_expsyms_cmds"
+
+	  if test prog = "$linkmode" || test relink != "$opt_mode"; then
+	    add_shlibpath=
+	    add_dir=
+	    add=
+	    lib_linked=yes
+	    case $hardcode_action in
+	    immediate | unsupported)
+	      if test no = "$hardcode_direct"; then
+		add=$dir/$linklib
+		case $host in
+		  *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;;
+		  *-*-sysv4*uw2*) add_dir=-L$dir ;;
+		  *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \
+		    *-*-unixware7*) add_dir=-L$dir ;;
+		  *-*-darwin* )
+		    # if the lib is a (non-dlopened) module then we cannot
+		    # link against it, someone is ignoring the earlier warnings
+		    if /usr/bin/file -L $add 2> /dev/null |
+			 $GREP ": [^:]* bundle" >/dev/null; then
+		      if test "X$dlopenmodule" != "X$lib"; then
+			$ECHO "*** Warning: lib $linklib is a module, not a shared library"
+			if test -z "$old_library"; then
+			  echo
+			  echo "*** And there doesn't seem to be a static archive available"
+			  echo "*** The link will probably fail, sorry"
+			else
+			  add=$dir/$old_library
+			fi
+		      elif test -n "$old_library"; then
+			add=$dir/$old_library
+		      fi
+		    fi
+		esac
+	      elif test no = "$hardcode_minus_L"; then
+		case $host in
+		*-*-sunos*) add_shlibpath=$dir ;;
+		esac
+		add_dir=-L$dir
+		add=-l$name
+	      elif test no = "$hardcode_shlibpath_var"; then
+		add_shlibpath=$dir
+		add=-l$name
+	      else
+		lib_linked=no
+	      fi
+	      ;;
+	    relink)
+	      if test yes = "$hardcode_direct" &&
+	         test no = "$hardcode_direct_absolute"; then
+		add=$dir/$linklib
+	      elif test yes = "$hardcode_minus_L"; then
+		add_dir=-L$absdir
+		# Try looking first in the location we're being installed to.
+		if test -n "$inst_prefix_dir"; then
+		  case $libdir in
+		    [\\/]*)
+		      func_append add_dir " -L$inst_prefix_dir$libdir"
+		      ;;
+		  esac
+		fi
+		add=-l$name
+	      elif test yes = "$hardcode_shlibpath_var"; then
+		add_shlibpath=$dir
+		add=-l$name
+	      else
+		lib_linked=no
+	      fi
+	      ;;
+	    *) lib_linked=no ;;
+	    esac
+
+	    if test yes != "$lib_linked"; then
+	      func_fatal_configuration "unsupported hardcode properties"
+	    fi
+
+	    if test -n "$add_shlibpath"; then
+	      case :$compile_shlibpath: in
+	      *":$add_shlibpath:"*) ;;
+	      *) func_append compile_shlibpath "$add_shlibpath:" ;;
+	      esac
+	    fi
+	    if test prog = "$linkmode"; then
+	      test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
+	      test -n "$add" && compile_deplibs="$add $compile_deplibs"
+	    else
+	      test -n "$add_dir" && deplibs="$add_dir $deplibs"
+	      test -n "$add" && deplibs="$add $deplibs"
+	      if test yes != "$hardcode_direct" &&
+		 test yes != "$hardcode_minus_L" &&
+		 test yes = "$hardcode_shlibpath_var"; then
+		case :$finalize_shlibpath: in
+		*":$libdir:"*) ;;
+		*) func_append finalize_shlibpath "$libdir:" ;;
+		esac
+	      fi
+	    fi
+	  fi
+
+	  if test prog = "$linkmode" || test relink = "$opt_mode"; then
+	    add_shlibpath=
+	    add_dir=
+	    add=
+	    # Finalize command for both is simple: just hardcode it.
+	    if test yes = "$hardcode_direct" &&
+	       test no = "$hardcode_direct_absolute"; then
+	      add=$libdir/$linklib
+	    elif test yes = "$hardcode_minus_L"; then
+	      add_dir=-L$libdir
+	      add=-l$name
+	    elif test yes = "$hardcode_shlibpath_var"; then
+	      case :$finalize_shlibpath: in
+	      *":$libdir:"*) ;;
+	      *) func_append finalize_shlibpath "$libdir:" ;;
+	      esac
+	      add=-l$name
+	    elif test yes = "$hardcode_automatic"; then
+	      if test -n "$inst_prefix_dir" &&
+		 test -f "$inst_prefix_dir$libdir/$linklib"; then
+		add=$inst_prefix_dir$libdir/$linklib
+	      else
+		add=$libdir/$linklib
+	      fi
+	    else
+	      # We cannot seem to hardcode it, guess we'll fake it.
+	      add_dir=-L$libdir
+	      # Try looking first in the location we're being installed to.
+	      if test -n "$inst_prefix_dir"; then
+		case $libdir in
+		  [\\/]*)
+		    func_append add_dir " -L$inst_prefix_dir$libdir"
+		    ;;
+		esac
+	      fi
+	      add=-l$name
+	    fi
+
+	    if test prog = "$linkmode"; then
+	      test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
+	      test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
+	    else
+	      test -n "$add_dir" && deplibs="$add_dir $deplibs"
+	      test -n "$add" && deplibs="$add $deplibs"
+	    fi
+	  fi
+	elif test prog = "$linkmode"; then
+	  # Here we assume that one of hardcode_direct or hardcode_minus_L
+	  # is not unsupported.  This is valid on all known static and
+	  # shared platforms.
+	  if test unsupported != "$hardcode_direct"; then
+	    test -n "$old_library" && linklib=$old_library
+	    compile_deplibs="$dir/$linklib $compile_deplibs"
+	    finalize_deplibs="$dir/$linklib $finalize_deplibs"
+	  else
+	    compile_deplibs="-l$name -L$dir $compile_deplibs"
+	    finalize_deplibs="-l$name -L$dir $finalize_deplibs"
+	  fi
+	elif test yes = "$build_libtool_libs"; then
+	  # Not a shared library
+	  if test pass_all != "$deplibs_check_method"; then
+	    # We're trying link a shared library against a static one
+	    # but the system doesn't support it.
+
+	    # Just print a warning and add the library to dependency_libs so
+	    # that the program can be linked against the static library.
+	    echo
+	    $ECHO "*** Warning: This system cannot link to static lib archive $lib."
+	    echo "*** I have the capability to make that library automatically link in when"
+	    echo "*** you link to this library.  But I can only do this if you have a"
+	    echo "*** shared version of the library, which you do not appear to have."
+	    if test yes = "$module"; then
+	      echo "*** But as you try to build a module library, libtool will still create "
+	      echo "*** a static module, that should work as long as the dlopening application"
+	      echo "*** is linked with the -dlopen flag to resolve symbols at runtime."
+	      if test -z "$global_symbol_pipe"; then
+		echo
+		echo "*** However, this would only work if libtool was able to extract symbol"
+		echo "*** lists from a program, using 'nm' or equivalent, but libtool could"
+		echo "*** not find such a program.  So, this module is probably useless."
+		echo "*** 'nm' from GNU binutils and a full rebuild may help."
+	      fi
+	      if test no = "$build_old_libs"; then
+		build_libtool_libs=module
+		build_old_libs=yes
+	      else
+		build_libtool_libs=no
+	      fi
+	    fi
+	  else
+	    deplibs="$dir/$old_library $deplibs"
+	    link_static=yes
+	  fi
+	fi # link shared/static library?
+
+	if test lib = "$linkmode"; then
+	  if test -n "$dependency_libs" &&
+	     { test yes != "$hardcode_into_libs" ||
+	       test yes = "$build_old_libs" ||
+	       test yes = "$link_static"; }; then
+	    # Extract -R from dependency_libs
+	    temp_deplibs=
+	    for libdir in $dependency_libs; do
+	      case $libdir in
+	      -R*) func_stripname '-R' '' "$libdir"
+	           temp_xrpath=$func_stripname_result
+		   case " $xrpath " in
+		   *" $temp_xrpath "*) ;;
+		   *) func_append xrpath " $temp_xrpath";;
+		   esac;;
+	      *) func_append temp_deplibs " $libdir";;
+	      esac
+	    done
+	    dependency_libs=$temp_deplibs
+	  fi
+
+	  func_append newlib_search_path " $absdir"
+	  # Link against this library
+	  test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
+	  # ... and its dependency_libs
+	  tmp_libs=
+	  for deplib in $dependency_libs; do
+	    newdependency_libs="$deplib $newdependency_libs"
+	    case $deplib in
+              -L*) func_stripname '-L' '' "$deplib"
+                   func_resolve_sysroot "$func_stripname_result";;
+              *) func_resolve_sysroot "$deplib" ;;
+            esac
+	    if $opt_preserve_dup_deps; then
+	      case "$tmp_libs " in
+	      *" $func_resolve_sysroot_result "*)
+                func_append specialdeplibs " $func_resolve_sysroot_result" ;;
+	      esac
+	    fi
+	    func_append tmp_libs " $func_resolve_sysroot_result"
+	  done
+
+	  if test no != "$link_all_deplibs"; then
+	    # Add the search paths of all dependency libraries
+	    for deplib in $dependency_libs; do
+	      path=
+	      case $deplib in
+	      -L*) path=$deplib ;;
+	      *.la)
+	        func_resolve_sysroot "$deplib"
+	        deplib=$func_resolve_sysroot_result
+	        func_dirname "$deplib" "" "."
+		dir=$func_dirname_result
+		# We need an absolute path.
+		case $dir in
+		[\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;;
+		*)
+		  absdir=`cd "$dir" && pwd`
+		  if test -z "$absdir"; then
+		    func_warning "cannot determine absolute directory name of '$dir'"
+		    absdir=$dir
+		  fi
+		  ;;
+		esac
+		if $GREP "^installed=no" $deplib > /dev/null; then
+		case $host in
+		*-*-darwin*)
+		  depdepl=
+		  eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib`
+		  if test -n "$deplibrary_names"; then
+		    for tmp in $deplibrary_names; do
+		      depdepl=$tmp
+		    done
+		    if test -f "$absdir/$objdir/$depdepl"; then
+		      depdepl=$absdir/$objdir/$depdepl
+		      darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
+                      if test -z "$darwin_install_name"; then
+                          darwin_install_name=`$OTOOL64 -L $depdepl  | awk '{if (NR == 2) {print $1;exit}}'`
+                      fi
+		      func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl"
+		      func_append linker_flags " -dylib_file $darwin_install_name:$depdepl"
+		      path=
+		    fi
+		  fi
+		  ;;
+		*)
+		  path=-L$absdir/$objdir
+		  ;;
+		esac
+		else
+		  eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+		  test -z "$libdir" && \
+		    func_fatal_error "'$deplib' is not a valid libtool archive"
+		  test "$absdir" != "$libdir" && \
+		    func_warning "'$deplib' seems to be moved"
+
+		  path=-L$absdir
+		fi
+		;;
+	      esac
+	      case " $deplibs " in
+	      *" $path "*) ;;
+	      *) deplibs="$path $deplibs" ;;
+	      esac
+	    done
+	  fi # link_all_deplibs != no
+	fi # linkmode = lib
+      done # for deplib in $libs
+      if test link = "$pass"; then
+	if test prog = "$linkmode"; then
+	  compile_deplibs="$new_inherited_linker_flags $compile_deplibs"
+	  finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs"
+	else
+	  compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	fi
+      fi
+      dependency_libs=$newdependency_libs
+      if test dlpreopen = "$pass"; then
+	# Link the dlpreopened libraries before other libraries
+	for deplib in $save_deplibs; do
+	  deplibs="$deplib $deplibs"
+	done
+      fi
+      if test dlopen != "$pass"; then
+	test conv = "$pass" || {
+	  # Make sure lib_search_path contains only unique directories.
+	  lib_search_path=
+	  for dir in $newlib_search_path; do
+	    case "$lib_search_path " in
+	    *" $dir "*) ;;
+	    *) func_append lib_search_path " $dir" ;;
+	    esac
+	  done
+	  newlib_search_path=
+	}
+
+	if test prog,link = "$linkmode,$pass"; then
+	  vars="compile_deplibs finalize_deplibs"
+	else
+	  vars=deplibs
+	fi
+	for var in $vars dependency_libs; do
+	  # Add libraries to $var in reverse order
+	  eval tmp_libs=\"\$$var\"
+	  new_libs=
+	  for deplib in $tmp_libs; do
+	    # FIXME: Pedantically, this is the right thing to do, so
+	    #        that some nasty dependency loop isn't accidentally
+	    #        broken:
+	    #new_libs="$deplib $new_libs"
+	    # Pragmatically, this seems to cause very few problems in
+	    # practice:
+	    case $deplib in
+	    -L*) new_libs="$deplib $new_libs" ;;
+	    -R*) ;;
+	    *)
+	      # And here is the reason: when a library appears more
+	      # than once as an explicit dependence of a library, or
+	      # is implicitly linked in more than once by the
+	      # compiler, it is considered special, and multiple
+	      # occurrences thereof are not removed.  Compare this
+	      # with having the same library being listed as a
+	      # dependency of multiple other libraries: in this case,
+	      # we know (pedantically, we assume) the library does not
+	      # need to be listed more than once, so we keep only the
+	      # last copy.  This is not always right, but it is rare
+	      # enough that we require users that really mean to play
+	      # such unportable linking tricks to link the library
+	      # using -Wl,-lname, so that libtool does not consider it
+	      # for duplicate removal.
+	      case " $specialdeplibs " in
+	      *" $deplib "*) new_libs="$deplib $new_libs" ;;
+	      *)
+		case " $new_libs " in
+		*" $deplib "*) ;;
+		*) new_libs="$deplib $new_libs" ;;
+		esac
+		;;
+	      esac
+	      ;;
+	    esac
+	  done
+	  tmp_libs=
+	  for deplib in $new_libs; do
+	    case $deplib in
+	    -L*)
+	      case " $tmp_libs " in
+	      *" $deplib "*) ;;
+	      *) func_append tmp_libs " $deplib" ;;
+	      esac
+	      ;;
+	    *) func_append tmp_libs " $deplib" ;;
+	    esac
+	  done
+	  eval $var=\"$tmp_libs\"
+	done # for var
+      fi
+
+      # Add Sun CC postdeps if required:
+      test CXX = "$tagname" && {
+        case $host_os in
+        linux*)
+          case `$CC -V 2>&1 | sed 5q` in
+          *Sun\ C*) # Sun C++ 5.9
+            func_suncc_cstd_abi
+
+            if test no != "$suncc_use_cstd_abi"; then
+              func_append postdeps ' -library=Cstd -library=Crun'
+            fi
+            ;;
+          esac
+          ;;
+
+        solaris*)
+          func_cc_basename "$CC"
+          case $func_cc_basename_result in
+          CC* | sunCC*)
+            func_suncc_cstd_abi
+
+            if test no != "$suncc_use_cstd_abi"; then
+              func_append postdeps ' -library=Cstd -library=Crun'
+            fi
+            ;;
+          esac
+          ;;
+        esac
+      }
+
+      # Last step: remove runtime libs from dependency_libs
+      # (they stay in deplibs)
+      tmp_libs=
+      for i in $dependency_libs; do
+	case " $predeps $postdeps $compiler_lib_search_path " in
+	*" $i "*)
+	  i=
+	  ;;
+	esac
+	if test -n "$i"; then
+	  func_append tmp_libs " $i"
+	fi
+      done
+      dependency_libs=$tmp_libs
+    done # for pass
+    if test prog = "$linkmode"; then
+      dlfiles=$newdlfiles
+    fi
+    if test prog = "$linkmode" || test lib = "$linkmode"; then
+      dlprefiles=$newdlprefiles
+    fi
+
+    case $linkmode in
+    oldlib)
+      if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then
+	func_warning "'-dlopen' is ignored for archives"
+      fi
+
+      case " $deplibs" in
+      *\ -l* | *\ -L*)
+	func_warning "'-l' and '-L' are ignored for archives" ;;
+      esac
+
+      test -n "$rpath" && \
+	func_warning "'-rpath' is ignored for archives"
+
+      test -n "$xrpath" && \
+	func_warning "'-R' is ignored for archives"
+
+      test -n "$vinfo" && \
+	func_warning "'-version-info/-version-number' is ignored for archives"
+
+      test -n "$release" && \
+	func_warning "'-release' is ignored for archives"
+
+      test -n "$export_symbols$export_symbols_regex" && \
+	func_warning "'-export-symbols' is ignored for archives"
+
+      # Now set the variables for building old libraries.
+      build_libtool_libs=no
+      oldlibs=$output
+      func_append objs "$old_deplibs"
+      ;;
+
+    lib)
+      # Make sure we only generate libraries of the form 'libNAME.la'.
+      case $outputname in
+      lib*)
+	func_stripname 'lib' '.la' "$outputname"
+	name=$func_stripname_result
+	eval shared_ext=\"$shrext_cmds\"
+	eval libname=\"$libname_spec\"
+	;;
+      *)
+	test no = "$module" \
+	  && func_fatal_help "libtool library '$output' must begin with 'lib'"
+
+	if test no != "$need_lib_prefix"; then
+	  # Add the "lib" prefix for modules if required
+	  func_stripname '' '.la' "$outputname"
+	  name=$func_stripname_result
+	  eval shared_ext=\"$shrext_cmds\"
+	  eval libname=\"$libname_spec\"
+	else
+	  func_stripname '' '.la' "$outputname"
+	  libname=$func_stripname_result
+	fi
+	;;
+      esac
+
+      if test -n "$objs"; then
+	if test pass_all != "$deplibs_check_method"; then
+	  func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs"
+	else
+	  echo
+	  $ECHO "*** Warning: Linking the shared library $output against the non-libtool"
+	  $ECHO "*** objects $objs is not portable!"
+	  func_append libobjs " $objs"
+	fi
+      fi
+
+      test no = "$dlself" \
+	|| func_warning "'-dlopen self' is ignored for libtool libraries"
+
+      set dummy $rpath
+      shift
+      test 1 -lt "$#" \
+	&& func_warning "ignoring multiple '-rpath's for a libtool library"
+
+      install_libdir=$1
+
+      oldlibs=
+      if test -z "$rpath"; then
+	if test yes = "$build_libtool_libs"; then
+	  # Building a libtool convenience library.
+	  # Some compilers have problems with a '.al' extension so
+	  # convenience libraries should have the same extension an
+	  # archive normally would.
+	  oldlibs="$output_objdir/$libname.$libext $oldlibs"
+	  build_libtool_libs=convenience
+	  build_old_libs=yes
+	fi
+
+	test -n "$vinfo" && \
+	  func_warning "'-version-info/-version-number' is ignored for convenience libraries"
+
+	test -n "$release" && \
+	  func_warning "'-release' is ignored for convenience libraries"
+      else
+
+	# Parse the version information argument.
+	save_ifs=$IFS; IFS=:
+	set dummy $vinfo 0 0 0
+	shift
+	IFS=$save_ifs
+
+	test -n "$7" && \
+	  func_fatal_help "too many parameters to '-version-info'"
+
+	# convert absolute version numbers to libtool ages
+	# this retains compatibility with .la files and attempts
+	# to make the code below a bit more comprehensible
+
+	case $vinfo_number in
+	yes)
+	  number_major=$1
+	  number_minor=$2
+	  number_revision=$3
+	  #
+	  # There are really only two kinds -- those that
+	  # use the current revision as the major version
+	  # and those that subtract age and use age as
+	  # a minor version.  But, then there is irix
+	  # that has an extra 1 added just for fun
+	  #
+	  case $version_type in
+	  # correct linux to gnu/linux during the next big refactor
+	  darwin|freebsd-elf|linux|osf|windows|none)
+	    func_arith $number_major + $number_minor
+	    current=$func_arith_result
+	    age=$number_minor
+	    revision=$number_revision
+	    ;;
+	  freebsd-aout|qnx|sunos)
+	    current=$number_major
+	    revision=$number_minor
+	    age=0
+	    ;;
+	  irix|nonstopux)
+	    func_arith $number_major + $number_minor
+	    current=$func_arith_result
+	    age=$number_minor
+	    revision=$number_minor
+	    lt_irix_increment=no
+	    ;;
+	  esac
+	  ;;
+	no)
+	  current=$1
+	  revision=$2
+	  age=$3
+	  ;;
+	esac
+
+	# Check that each of the things are valid numbers.
+	case $current in
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+	*)
+	  func_error "CURRENT '$current' must be a nonnegative integer"
+	  func_fatal_error "'$vinfo' is not valid version information"
+	  ;;
+	esac
+
+	case $revision in
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+	*)
+	  func_error "REVISION '$revision' must be a nonnegative integer"
+	  func_fatal_error "'$vinfo' is not valid version information"
+	  ;;
+	esac
+
+	case $age in
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+	*)
+	  func_error "AGE '$age' must be a nonnegative integer"
+	  func_fatal_error "'$vinfo' is not valid version information"
+	  ;;
+	esac
+
+	if test "$age" -gt "$current"; then
+	  func_error "AGE '$age' is greater than the current interface number '$current'"
+	  func_fatal_error "'$vinfo' is not valid version information"
+	fi
+
+	# Calculate the version variables.
+	major=
+	versuffix=
+	verstring=
+	case $version_type in
+	none) ;;
+
+	darwin)
+	  # Like Linux, but with the current version available in
+	  # verstring for coding it into the library header
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix=$major.$age.$revision
+	  # Darwin ld doesn't like 0 for these options...
+	  func_arith $current + 1
+	  minor_current=$func_arith_result
+	  xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision"
+	  verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+          # On Darwin other compilers
+          case $CC in
+              nagfor*)
+                  verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision"
+                  ;;
+              *)
+                  verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+                  ;;
+          esac
+	  ;;
+
+	freebsd-aout)
+	  major=.$current
+	  versuffix=.$current.$revision
+	  ;;
+
+	freebsd-elf)
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix=$major.$age.$revision
+	  ;;
+
+	irix | nonstopux)
+	  if test no = "$lt_irix_increment"; then
+	    func_arith $current - $age
+	  else
+	    func_arith $current - $age + 1
+	  fi
+	  major=$func_arith_result
+
+	  case $version_type in
+	    nonstopux) verstring_prefix=nonstopux ;;
+	    *)         verstring_prefix=sgi ;;
+	  esac
+	  verstring=$verstring_prefix$major.$revision
+
+	  # Add in all the interfaces that we are compatible with.
+	  loop=$revision
+	  while test 0 -ne "$loop"; do
+	    func_arith $revision - $loop
+	    iface=$func_arith_result
+	    func_arith $loop - 1
+	    loop=$func_arith_result
+	    verstring=$verstring_prefix$major.$iface:$verstring
+	  done
+
+	  # Before this point, $major must not contain '.'.
+	  major=.$major
+	  versuffix=$major.$revision
+	  ;;
+
+	linux) # correct to gnu/linux during the next big refactor
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix=$major.$age.$revision
+	  ;;
+
+	osf)
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix=.$current.$age.$revision
+	  verstring=$current.$age.$revision
+
+	  # Add in all the interfaces that we are compatible with.
+	  loop=$age
+	  while test 0 -ne "$loop"; do
+	    func_arith $current - $loop
+	    iface=$func_arith_result
+	    func_arith $loop - 1
+	    loop=$func_arith_result
+	    verstring=$verstring:$iface.0
+	  done
+
+	  # Make executables depend on our current version.
+	  func_append verstring ":$current.0"
+	  ;;
+
+	qnx)
+	  major=.$current
+	  versuffix=.$current
+	  ;;
+
+	sco)
+	  major=.$current
+	  versuffix=.$current
+	  ;;
+
+	sunos)
+	  major=.$current
+	  versuffix=.$current.$revision
+	  ;;
+
+	windows)
+	  # Use '-' rather than '.', since we only want one
+	  # extension on DOS 8.3 file systems.
+	  func_arith $current - $age
+	  major=$func_arith_result
+	  versuffix=-$major
+	  ;;
+
+	*)
+	  func_fatal_configuration "unknown library version type '$version_type'"
+	  ;;
+	esac
+
+	# Clear the version info if we defaulted, and they specified a release.
+	if test -z "$vinfo" && test -n "$release"; then
+	  major=
+	  case $version_type in
+	  darwin)
+	    # we can't check for "0.0" in archive_cmds due to quoting
+	    # problems, so we reset it completely
+	    verstring=
+	    ;;
+	  *)
+	    verstring=0.0
+	    ;;
+	  esac
+	  if test no = "$need_version"; then
+	    versuffix=
+	  else
+	    versuffix=.0.0
+	  fi
+	fi
+
+	# Remove version info from name if versioning should be avoided
+	if test yes,no = "$avoid_version,$need_version"; then
+	  major=
+	  versuffix=
+	  verstring=
+	fi
+
+	# Check to see if the archive will have undefined symbols.
+	if test yes = "$allow_undefined"; then
+	  if test unsupported = "$allow_undefined_flag"; then
+	    if test yes = "$build_old_libs"; then
+	      func_warning "undefined symbols not allowed in $host shared libraries; building static only"
+	      build_libtool_libs=no
+	    else
+	      func_fatal_error "can't build $host shared library unless -no-undefined is specified"
+	    fi
+	  fi
+	else
+	  # Don't allow undefined symbols.
+	  allow_undefined_flag=$no_undefined_flag
+	fi
+
+      fi
+
+      func_generate_dlsyms "$libname" "$libname" :
+      func_append libobjs " $symfileobj"
+      test " " = "$libobjs" && libobjs=
+
+      if test relink != "$opt_mode"; then
+	# Remove our outputs, but don't remove object files since they
+	# may have been created when compiling PIC objects.
+	removelist=
+	tempremovelist=`$ECHO "$output_objdir/*"`
+	for p in $tempremovelist; do
+	  case $p in
+	    *.$objext | *.gcno)
+	       ;;
+	    $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*)
+	       if test -n "$precious_files_regex"; then
+		 if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1
+		 then
+		   continue
+		 fi
+	       fi
+	       func_append removelist " $p"
+	       ;;
+	    *) ;;
+	  esac
+	done
+	test -n "$removelist" && \
+	  func_show_eval "${RM}r \$removelist"
+      fi
+
+      # Now set the variables for building old libraries.
+      if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then
+	func_append oldlibs " $output_objdir/$libname.$libext"
+
+	# Transform .lo files to .o files.
+	oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP`
+      fi
+
+      # Eliminate all temporary directories.
+      #for path in $notinst_path; do
+      #	lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"`
+      #	deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"`
+      #	dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"`
+      #done
+
+      if test -n "$xrpath"; then
+	# If the user specified any rpath flags, then add them.
+	temp_xrpath=
+	for libdir in $xrpath; do
+	  func_replace_sysroot "$libdir"
+	  func_append temp_xrpath " -R$func_replace_sysroot_result"
+	  case "$finalize_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append finalize_rpath " $libdir" ;;
+	  esac
+	done
+	if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then
+	  dependency_libs="$temp_xrpath $dependency_libs"
+	fi
+      fi
+
+      # Make sure dlfiles contains only unique files that won't be dlpreopened
+      old_dlfiles=$dlfiles
+      dlfiles=
+      for lib in $old_dlfiles; do
+	case " $dlprefiles $dlfiles " in
+	*" $lib "*) ;;
+	*) func_append dlfiles " $lib" ;;
+	esac
+      done
+
+      # Make sure dlprefiles contains only unique files
+      old_dlprefiles=$dlprefiles
+      dlprefiles=
+      for lib in $old_dlprefiles; do
+	case "$dlprefiles " in
+	*" $lib "*) ;;
+	*) func_append dlprefiles " $lib" ;;
+	esac
+      done
+
+      if test yes = "$build_libtool_libs"; then
+	if test -n "$rpath"; then
+	  case $host in
+	  *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*)
+	    # these systems don't actually have a c library (as such)!
+	    ;;
+	  *-*-rhapsody* | *-*-darwin1.[012])
+	    # Rhapsody C library is in the System framework
+	    func_append deplibs " System.ltframework"
+	    ;;
+	  *-*-netbsd*)
+	    # Don't link with libc until the a.out ld.so is fixed.
+	    ;;
+	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+	    # Do not include libc due to us having libc/libc_r.
+	    ;;
+	  *-*-sco3.2v5* | *-*-sco5v6*)
+	    # Causes problems with __ctype
+	    ;;
+	  *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+	    # Compiler inserts libc in the correct place for threads to work
+	    ;;
+	  *)
+	    # Add libc to deplibs on all other systems if necessary.
+	    if test yes = "$build_libtool_need_lc"; then
+	      func_append deplibs " -lc"
+	    fi
+	    ;;
+	  esac
+	fi
+
+	# Transform deplibs into only deplibs that can be linked in shared.
+	name_save=$name
+	libname_save=$libname
+	release_save=$release
+	versuffix_save=$versuffix
+	major_save=$major
+	# I'm not sure if I'm treating the release correctly.  I think
+	# release should show up in the -l (ie -lgmp5) so we don't want to
+	# add it in twice.  Is that correct?
+	release=
+	versuffix=
+	major=
+	newdeplibs=
+	droppeddeps=no
+	case $deplibs_check_method in
+	pass_all)
+	  # Don't check for shared/static.  Everything works.
+	  # This might be a little naive.  We might want to check
+	  # whether the library exists or not.  But this is on
+	  # osf3 & osf4 and I'm not really sure... Just
+	  # implementing what was already the behavior.
+	  newdeplibs=$deplibs
+	  ;;
+	test_compile)
+	  # This code stresses the "libraries are programs" paradigm to its
+	  # limits. Maybe even breaks it.  We compile a program, linking it
+	  # against the deplibs as a proxy for the library.  Then we can check
+	  # whether they linked in statically or dynamically with ldd.
+	  $opt_dry_run || $RM conftest.c
+	  cat > conftest.c <<EOF
+	  int main() { return 0; }
+EOF
+	  $opt_dry_run || $RM conftest
+	  if $LTCC $LTCFLAGS -o conftest conftest.c $deplibs; then
+	    ldd_output=`ldd conftest`
+	    for i in $deplibs; do
+	      case $i in
+	      -l*)
+		func_stripname -l '' "$i"
+		name=$func_stripname_result
+		if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+		  case " $predeps $postdeps " in
+		  *" $i "*)
+		    func_append newdeplibs " $i"
+		    i=
+		    ;;
+		  esac
+		fi
+		if test -n "$i"; then
+		  libname=`eval "\\$ECHO \"$libname_spec\""`
+		  deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+		  set dummy $deplib_matches; shift
+		  deplib_match=$1
+		  if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0; then
+		    func_append newdeplibs " $i"
+		  else
+		    droppeddeps=yes
+		    echo
+		    $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+		    echo "*** I have the capability to make that library automatically link in when"
+		    echo "*** you link to this library.  But I can only do this if you have a"
+		    echo "*** shared version of the library, which I believe you do not have"
+		    echo "*** because a test_compile did reveal that the linker did not use it for"
+		    echo "*** its dynamic dependency list that programs get resolved with at runtime."
+		  fi
+		fi
+		;;
+	      *)
+		func_append newdeplibs " $i"
+		;;
+	      esac
+	    done
+	  else
+	    # Error occurred in the first compile.  Let's try to salvage
+	    # the situation: Compile a separate program for each library.
+	    for i in $deplibs; do
+	      case $i in
+	      -l*)
+		func_stripname -l '' "$i"
+		name=$func_stripname_result
+		$opt_dry_run || $RM conftest
+		if $LTCC $LTCFLAGS -o conftest conftest.c $i; then
+		  ldd_output=`ldd conftest`
+		  if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+		    case " $predeps $postdeps " in
+		    *" $i "*)
+		      func_append newdeplibs " $i"
+		      i=
+		      ;;
+		    esac
+		  fi
+		  if test -n "$i"; then
+		    libname=`eval "\\$ECHO \"$libname_spec\""`
+		    deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+		    set dummy $deplib_matches; shift
+		    deplib_match=$1
+		    if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0; then
+		      func_append newdeplibs " $i"
+		    else
+		      droppeddeps=yes
+		      echo
+		      $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+		      echo "*** I have the capability to make that library automatically link in when"
+		      echo "*** you link to this library.  But I can only do this if you have a"
+		      echo "*** shared version of the library, which you do not appear to have"
+		      echo "*** because a test_compile did reveal that the linker did not use this one"
+		      echo "*** as a dynamic dependency that programs can get resolved with at runtime."
+		    fi
+		  fi
+		else
+		  droppeddeps=yes
+		  echo
+		  $ECHO "*** Warning!  Library $i is needed by this library but I was not able to"
+		  echo "*** make it link in!  You will probably need to install it or some"
+		  echo "*** library that it depends on before this library will be fully"
+		  echo "*** functional.  Installing it before continuing would be even better."
+		fi
+		;;
+	      *)
+		func_append newdeplibs " $i"
+		;;
+	      esac
+	    done
+	  fi
+	  ;;
+	file_magic*)
+	  set dummy $deplibs_check_method; shift
+	  file_magic_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+	  for a_deplib in $deplibs; do
+	    case $a_deplib in
+	    -l*)
+	      func_stripname -l '' "$a_deplib"
+	      name=$func_stripname_result
+	      if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+		case " $predeps $postdeps " in
+		*" $a_deplib "*)
+		  func_append newdeplibs " $a_deplib"
+		  a_deplib=
+		  ;;
+		esac
+	      fi
+	      if test -n "$a_deplib"; then
+		libname=`eval "\\$ECHO \"$libname_spec\""`
+		if test -n "$file_magic_glob"; then
+		  libnameglob=`func_echo_all "$libname" | $SED -e $file_magic_glob`
+		else
+		  libnameglob=$libname
+		fi
+		test yes = "$want_nocaseglob" && nocaseglob=`shopt -p nocaseglob`
+		for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+		  if test yes = "$want_nocaseglob"; then
+		    shopt -s nocaseglob
+		    potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+		    $nocaseglob
+		  else
+		    potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+		  fi
+		  for potent_lib in $potential_libs; do
+		      # Follow soft links.
+		      if ls -lLd "$potent_lib" 2>/dev/null |
+			 $GREP " -> " >/dev/null; then
+			continue
+		      fi
+		      # The statement above tries to avoid entering an
+		      # endless loop below, in case of cyclic links.
+		      # We might still enter an endless loop, since a link
+		      # loop can be closed while we follow links,
+		      # but so what?
+		      potlib=$potent_lib
+		      while test -h "$potlib" 2>/dev/null; do
+			potliblink=`ls -ld $potlib | $SED 's/.* -> //'`
+			case $potliblink in
+			[\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;;
+			*) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";;
+			esac
+		      done
+		      if eval $file_magic_cmd \"\$potlib\" 2>/dev/null |
+			 $SED -e 10q |
+			 $EGREP "$file_magic_regex" > /dev/null; then
+			func_append newdeplibs " $a_deplib"
+			a_deplib=
+			break 2
+		      fi
+		  done
+		done
+	      fi
+	      if test -n "$a_deplib"; then
+		droppeddeps=yes
+		echo
+		$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+		echo "*** I have the capability to make that library automatically link in when"
+		echo "*** you link to this library.  But I can only do this if you have a"
+		echo "*** shared version of the library, which you do not appear to have"
+		echo "*** because I did check the linker path looking for a file starting"
+		if test -z "$potlib"; then
+		  $ECHO "*** with $libname but no candidates were found. (...for file magic test)"
+		else
+		  $ECHO "*** with $libname and none of the candidates passed a file format test"
+		  $ECHO "*** using a file magic. Last file checked: $potlib"
+		fi
+	      fi
+	      ;;
+	    *)
+	      # Add a -L argument.
+	      func_append newdeplibs " $a_deplib"
+	      ;;
+	    esac
+	  done # Gone through all deplibs.
+	  ;;
+	match_pattern*)
+	  set dummy $deplibs_check_method; shift
+	  match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+	  for a_deplib in $deplibs; do
+	    case $a_deplib in
+	    -l*)
+	      func_stripname -l '' "$a_deplib"
+	      name=$func_stripname_result
+	      if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+		case " $predeps $postdeps " in
+		*" $a_deplib "*)
+		  func_append newdeplibs " $a_deplib"
+		  a_deplib=
+		  ;;
+		esac
+	      fi
+	      if test -n "$a_deplib"; then
+		libname=`eval "\\$ECHO \"$libname_spec\""`
+		for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+		  potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+		  for potent_lib in $potential_libs; do
+		    potlib=$potent_lib # see symlink-check above in file_magic test
+		    if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \
+		       $EGREP "$match_pattern_regex" > /dev/null; then
+		      func_append newdeplibs " $a_deplib"
+		      a_deplib=
+		      break 2
+		    fi
+		  done
+		done
+	      fi
+	      if test -n "$a_deplib"; then
+		droppeddeps=yes
+		echo
+		$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+		echo "*** I have the capability to make that library automatically link in when"
+		echo "*** you link to this library.  But I can only do this if you have a"
+		echo "*** shared version of the library, which you do not appear to have"
+		echo "*** because I did check the linker path looking for a file starting"
+		if test -z "$potlib"; then
+		  $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)"
+		else
+		  $ECHO "*** with $libname and none of the candidates passed a file format test"
+		  $ECHO "*** using a regex pattern. Last file checked: $potlib"
+		fi
+	      fi
+	      ;;
+	    *)
+	      # Add a -L argument.
+	      func_append newdeplibs " $a_deplib"
+	      ;;
+	    esac
+	  done # Gone through all deplibs.
+	  ;;
+	none | unknown | *)
+	  newdeplibs=
+	  tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'`
+	  if test yes = "$allow_libtool_libs_with_static_runtimes"; then
+	    for i in $predeps $postdeps; do
+	      # can't use Xsed below, because $i might contain '/'
+	      tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"`
+	    done
+	  fi
+	  case $tmp_deplibs in
+	  *[!\	\ ]*)
+	    echo
+	    if test none = "$deplibs_check_method"; then
+	      echo "*** Warning: inter-library dependencies are not supported in this platform."
+	    else
+	      echo "*** Warning: inter-library dependencies are not known to be supported."
+	    fi
+	    echo "*** All declared inter-library dependencies are being dropped."
+	    droppeddeps=yes
+	    ;;
+	  esac
+	  ;;
+	esac
+	versuffix=$versuffix_save
+	major=$major_save
+	release=$release_save
+	libname=$libname_save
+	name=$name_save
+
+	case $host in
+	*-*-rhapsody* | *-*-darwin1.[012])
+	  # On Rhapsody replace the C library with the System framework
+	  newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'`
+	  ;;
+	esac
+
+	if test yes = "$droppeddeps"; then
+	  if test yes = "$module"; then
+	    echo
+	    echo "*** Warning: libtool could not satisfy all declared inter-library"
+	    $ECHO "*** dependencies of module $libname.  Therefore, libtool will create"
+	    echo "*** a static module, that should work as long as the dlopening"
+	    echo "*** application is linked with the -dlopen flag."
+	    if test -z "$global_symbol_pipe"; then
+	      echo
+	      echo "*** However, this would only work if libtool was able to extract symbol"
+	      echo "*** lists from a program, using 'nm' or equivalent, but libtool could"
+	      echo "*** not find such a program.  So, this module is probably useless."
+	      echo "*** 'nm' from GNU binutils and a full rebuild may help."
+	    fi
+	    if test no = "$build_old_libs"; then
+	      oldlibs=$output_objdir/$libname.$libext
+	      build_libtool_libs=module
+	      build_old_libs=yes
+	    else
+	      build_libtool_libs=no
+	    fi
+	  else
+	    echo "*** The inter-library dependencies that have been dropped here will be"
+	    echo "*** automatically added whenever a program is linked with this library"
+	    echo "*** or is declared to -dlopen it."
+
+	    if test no = "$allow_undefined"; then
+	      echo
+	      echo "*** Since this library must not contain undefined symbols,"
+	      echo "*** because either the platform does not support them or"
+	      echo "*** it was explicitly requested with -no-undefined,"
+	      echo "*** libtool will only create a static version of it."
+	      if test no = "$build_old_libs"; then
+		oldlibs=$output_objdir/$libname.$libext
+		build_libtool_libs=module
+		build_old_libs=yes
+	      else
+		build_libtool_libs=no
+	      fi
+	    fi
+	  fi
+	fi
+	# Done checking deplibs!
+	deplibs=$newdeplibs
+      fi
+      # Time to change all our "foo.ltframework" stuff back to "-framework foo"
+      case $host in
+	*-*-darwin*)
+	  newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	  new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	  deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	  ;;
+      esac
+
+      # move library search paths that coincide with paths to not yet
+      # installed libraries to the beginning of the library search list
+      new_libs=
+      for path in $notinst_path; do
+	case " $new_libs " in
+	*" -L$path/$objdir "*) ;;
+	*)
+	  case " $deplibs " in
+	  *" -L$path/$objdir "*)
+	    func_append new_libs " -L$path/$objdir" ;;
+	  esac
+	  ;;
+	esac
+      done
+      for deplib in $deplibs; do
+	case $deplib in
+	-L*)
+	  case " $new_libs " in
+	  *" $deplib "*) ;;
+	  *) func_append new_libs " $deplib" ;;
+	  esac
+	  ;;
+	*) func_append new_libs " $deplib" ;;
+	esac
+      done
+      deplibs=$new_libs
+
+      # All the library-specific variables (install_libdir is set above).
+      library_names=
+      old_library=
+      dlname=
+
+      # Test again, we may have decided not to build it any more
+      if test yes = "$build_libtool_libs"; then
+	# Remove $wl instances when linking with ld.
+	# FIXME: should test the right _cmds variable.
+	case $archive_cmds in
+	  *\$LD\ *) wl= ;;
+        esac
+	if test yes = "$hardcode_into_libs"; then
+	  # Hardcode the library paths
+	  hardcode_libdirs=
+	  dep_rpath=
+	  rpath=$finalize_rpath
+	  test relink = "$opt_mode" || rpath=$compile_rpath$rpath
+	  for libdir in $rpath; do
+	    if test -n "$hardcode_libdir_flag_spec"; then
+	      if test -n "$hardcode_libdir_separator"; then
+		func_replace_sysroot "$libdir"
+		libdir=$func_replace_sysroot_result
+		if test -z "$hardcode_libdirs"; then
+		  hardcode_libdirs=$libdir
+		else
+		  # Just accumulate the unique libdirs.
+		  case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+		  *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+		    ;;
+		  *)
+		    func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+		    ;;
+		  esac
+		fi
+	      else
+		eval flag=\"$hardcode_libdir_flag_spec\"
+		func_append dep_rpath " $flag"
+	      fi
+	    elif test -n "$runpath_var"; then
+	      case "$perm_rpath " in
+	      *" $libdir "*) ;;
+	      *) func_append perm_rpath " $libdir" ;;
+	      esac
+	    fi
+	  done
+	  # Substitute the hardcoded libdirs into the rpath.
+	  if test -n "$hardcode_libdir_separator" &&
+	     test -n "$hardcode_libdirs"; then
+	    libdir=$hardcode_libdirs
+	    eval "dep_rpath=\"$hardcode_libdir_flag_spec\""
+	  fi
+	  if test -n "$runpath_var" && test -n "$perm_rpath"; then
+	    # We should set the runpath_var.
+	    rpath=
+	    for dir in $perm_rpath; do
+	      func_append rpath "$dir:"
+	    done
+	    eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
+	  fi
+	  test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
+	fi
+
+	shlibpath=$finalize_shlibpath
+	test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath
+	if test -n "$shlibpath"; then
+	  eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
+	fi
+
+	# Get the real and link names of the library.
+	eval shared_ext=\"$shrext_cmds\"
+	eval library_names=\"$library_names_spec\"
+	set dummy $library_names
+	shift
+	realname=$1
+	shift
+
+	if test -n "$soname_spec"; then
+	  eval soname=\"$soname_spec\"
+	else
+	  soname=$realname
+	fi
+	if test -z "$dlname"; then
+	  dlname=$soname
+	fi
+
+	lib=$output_objdir/$realname
+	linknames=
+	for link
+	do
+	  func_append linknames " $link"
+	done
+
+	# Use standard objects if they are pic
+	test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP`
+	test "X$libobjs" = "X " && libobjs=
+
+	delfiles=
+	if test -n "$export_symbols" && test -n "$include_expsyms"; then
+	  $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp"
+	  export_symbols=$output_objdir/$libname.uexp
+	  func_append delfiles " $export_symbols"
+	fi
+
+	orig_export_symbols=
+	case $host_os in
+	cygwin* | mingw* | cegcc*)
+	  if test -n "$export_symbols" && test -z "$export_symbols_regex"; then
+	    # exporting using user supplied symfile
+	    func_dll_def_p "$export_symbols" || {
+	      # and it's NOT already a .def file. Must figure out
+	      # which of the given symbols are data symbols and tag
+	      # them as such. So, trigger use of export_symbols_cmds.
+	      # export_symbols gets reassigned inside the "prepare
+	      # the list of exported symbols" if statement, so the
+	      # include_expsyms logic still works.
+	      orig_export_symbols=$export_symbols
+	      export_symbols=
+	      always_export_symbols=yes
+	    }
+	  fi
+	  ;;
+	esac
+
+	# Prepare the list of exported symbols
+	if test -z "$export_symbols"; then
+	  if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then
+	    func_verbose "generating symbol list for '$libname.la'"
+	    export_symbols=$output_objdir/$libname.exp
+	    $opt_dry_run || $RM $export_symbols
+	    cmds=$export_symbols_cmds
+	    save_ifs=$IFS; IFS='~'
+	    for cmd1 in $cmds; do
+	      IFS=$save_ifs
+	      # Take the normal branch if the nm_file_list_spec branch
+	      # doesn't work or if tool conversion is not needed.
+	      case $nm_file_list_spec~$to_tool_file_cmd in
+		*~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*)
+		  try_normal_branch=yes
+		  eval cmd=\"$cmd1\"
+		  func_len " $cmd"
+		  len=$func_len_result
+		  ;;
+		*)
+		  try_normal_branch=no
+		  ;;
+	      esac
+	      if test yes = "$try_normal_branch" \
+		 && { test "$len" -lt "$max_cmd_len" \
+		      || test "$max_cmd_len" -le -1; }
+	      then
+		func_show_eval "$cmd" 'exit $?'
+		skipped_export=false
+	      elif test -n "$nm_file_list_spec"; then
+		func_basename "$output"
+		output_la=$func_basename_result
+		save_libobjs=$libobjs
+		save_output=$output
+		output=$output_objdir/$output_la.nm
+		func_to_tool_file "$output"
+		libobjs=$nm_file_list_spec$func_to_tool_file_result
+		func_append delfiles " $output"
+		func_verbose "creating $NM input file list: $output"
+		for obj in $save_libobjs; do
+		  func_to_tool_file "$obj"
+		  $ECHO "$func_to_tool_file_result"
+		done > "$output"
+		eval cmd=\"$cmd1\"
+		func_show_eval "$cmd" 'exit $?'
+		output=$save_output
+		libobjs=$save_libobjs
+		skipped_export=false
+	      else
+		# The command line is too long to execute in one step.
+		func_verbose "using reloadable object file for export list..."
+		skipped_export=:
+		# Break out early, otherwise skipped_export may be
+		# set to false by a later but shorter cmd.
+		break
+	      fi
+	    done
+	    IFS=$save_ifs
+	    if test -n "$export_symbols_regex" && test : != "$skipped_export"; then
+	      func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+	      func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+	    fi
+	  fi
+	fi
+
+	if test -n "$export_symbols" && test -n "$include_expsyms"; then
+	  tmp_export_symbols=$export_symbols
+	  test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols
+	  $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
+	fi
+
+	if test : != "$skipped_export" && test -n "$orig_export_symbols"; then
+	  # The given exports_symbols file has to be filtered, so filter it.
+	  func_verbose "filter symbol list for '$libname.la' to tag DATA exports"
+	  # FIXME: $output_objdir/$libname.filter potentially contains lots of
+	  # 's' commands, which not all seds can handle. GNU sed should be fine
+	  # though. Also, the filter scales superlinearly with the number of
+	  # global variables. join(1) would be nice here, but unfortunately
+	  # isn't a blessed tool.
+	  $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+	  func_append delfiles " $export_symbols $output_objdir/$libname.filter"
+	  export_symbols=$output_objdir/$libname.def
+	  $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+	fi
+
+	tmp_deplibs=
+	for test_deplib in $deplibs; do
+	  case " $convenience " in
+	  *" $test_deplib "*) ;;
+	  *)
+	    func_append tmp_deplibs " $test_deplib"
+	    ;;
+	  esac
+	done
+	deplibs=$tmp_deplibs
+
+	if test -n "$convenience"; then
+	  if test -n "$whole_archive_flag_spec" &&
+	    test yes = "$compiler_needs_object" &&
+	    test -z "$libobjs"; then
+	    # extract the archives, so we have objects to list.
+	    # TODO: could optimize this to just extract one archive.
+	    whole_archive_flag_spec=
+	  fi
+	  if test -n "$whole_archive_flag_spec"; then
+	    save_libobjs=$libobjs
+	    eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+	    test "X$libobjs" = "X " && libobjs=
+	  else
+	    gentop=$output_objdir/${outputname}x
+	    func_append generated " $gentop"
+
+	    func_extract_archives $gentop $convenience
+	    func_append libobjs " $func_extract_archives_result"
+	    test "X$libobjs" = "X " && libobjs=
+	  fi
+	fi
+
+	if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then
+	  eval flag=\"$thread_safe_flag_spec\"
+	  func_append linker_flags " $flag"
+	fi
+
+	# Make a backup of the uninstalled library when relinking
+	if test relink = "$opt_mode"; then
+	  $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $?
+	fi
+
+	# Do each of the archive commands.
+	if test yes = "$module" && test -n "$module_cmds"; then
+	  if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+	    eval test_cmds=\"$module_expsym_cmds\"
+	    cmds=$module_expsym_cmds
+	  else
+	    eval test_cmds=\"$module_cmds\"
+	    cmds=$module_cmds
+	  fi
+	else
+	  if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+	    eval test_cmds=\"$archive_expsym_cmds\"
+	    cmds=$archive_expsym_cmds
+	  else
+	    eval test_cmds=\"$archive_cmds\"
+	    cmds=$archive_cmds
+	  fi
+	fi
+
+	if test : != "$skipped_export" &&
+	   func_len " $test_cmds" &&
+	   len=$func_len_result &&
+	   test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+	  :
+	else
+	  # The command line is too long to link in one step, link piecewise
+	  # or, if using GNU ld and skipped_export is not :, use a linker
+	  # script.
+
+	  # Save the value of $output and $libobjs because we want to
+	  # use them later.  If we have whole_archive_flag_spec, we
+	  # want to use save_libobjs as it was before
+	  # whole_archive_flag_spec was expanded, because we can't
+	  # assume the linker understands whole_archive_flag_spec.
+	  # This may have to be revisited, in case too many
+	  # convenience libraries get linked in and end up exceeding
+	  # the spec.
+	  if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
+	    save_libobjs=$libobjs
+	  fi
+	  save_output=$output
+	  func_basename "$output"
+	  output_la=$func_basename_result
+
+	  # Clear the reloadable object creation command queue and
+	  # initialize k to one.
+	  test_cmds=
+	  concat_cmds=
+	  objlist=
+	  last_robj=
+	  k=1
+
+	  if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then
+	    output=$output_objdir/$output_la.lnkscript
+	    func_verbose "creating GNU ld script: $output"
+	    echo 'INPUT (' > $output
+	    for obj in $save_libobjs
+	    do
+	      func_to_tool_file "$obj"
+	      $ECHO "$func_to_tool_file_result" >> $output
+	    done
+	    echo ')' >> $output
+	    func_append delfiles " $output"
+	    func_to_tool_file "$output"
+	    output=$func_to_tool_file_result
+	  elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then
+	    output=$output_objdir/$output_la.lnk
+	    func_verbose "creating linker input file list: $output"
+	    : > $output
+	    set x $save_libobjs
+	    shift
+	    firstobj=
+	    if test yes = "$compiler_needs_object"; then
+	      firstobj="$1 "
+	      shift
+	    fi
+	    for obj
+	    do
+	      func_to_tool_file "$obj"
+	      $ECHO "$func_to_tool_file_result" >> $output
+	    done
+	    func_append delfiles " $output"
+	    func_to_tool_file "$output"
+	    output=$firstobj\"$file_list_spec$func_to_tool_file_result\"
+	  else
+	    if test -n "$save_libobjs"; then
+	      func_verbose "creating reloadable object files..."
+	      output=$output_objdir/$output_la-$k.$objext
+	      eval test_cmds=\"$reload_cmds\"
+	      func_len " $test_cmds"
+	      len0=$func_len_result
+	      len=$len0
+
+	      # Loop over the list of objects to be linked.
+	      for obj in $save_libobjs
+	      do
+		func_len " $obj"
+		func_arith $len + $func_len_result
+		len=$func_arith_result
+		if test -z "$objlist" ||
+		   test "$len" -lt "$max_cmd_len"; then
+		  func_append objlist " $obj"
+		else
+		  # The command $test_cmds is almost too long, add a
+		  # command to the queue.
+		  if test 1 -eq "$k"; then
+		    # The first file doesn't have a previous command to add.
+		    reload_objs=$objlist
+		    eval concat_cmds=\"$reload_cmds\"
+		  else
+		    # All subsequent reloadable object files will link in
+		    # the last one created.
+		    reload_objs="$objlist $last_robj"
+		    eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\"
+		  fi
+		  last_robj=$output_objdir/$output_la-$k.$objext
+		  func_arith $k + 1
+		  k=$func_arith_result
+		  output=$output_objdir/$output_la-$k.$objext
+		  objlist=" $obj"
+		  func_len " $last_robj"
+		  func_arith $len0 + $func_len_result
+		  len=$func_arith_result
+		fi
+	      done
+	      # Handle the remaining objects by creating one last
+	      # reloadable object file.  All subsequent reloadable object
+	      # files will link in the last one created.
+	      test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+	      reload_objs="$objlist $last_robj"
+	      eval concat_cmds=\"\$concat_cmds$reload_cmds\"
+	      if test -n "$last_robj"; then
+	        eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\"
+	      fi
+	      func_append delfiles " $output"
+
+	    else
+	      output=
+	    fi
+
+	    ${skipped_export-false} && {
+	      func_verbose "generating symbol list for '$libname.la'"
+	      export_symbols=$output_objdir/$libname.exp
+	      $opt_dry_run || $RM $export_symbols
+	      libobjs=$output
+	      # Append the command to create the export file.
+	      test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+	      eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\"
+	      if test -n "$last_robj"; then
+		eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\"
+	      fi
+	    }
+
+	    test -n "$save_libobjs" &&
+	      func_verbose "creating a temporary reloadable object file: $output"
+
+	    # Loop through the commands generated above and execute them.
+	    save_ifs=$IFS; IFS='~'
+	    for cmd in $concat_cmds; do
+	      IFS=$save_ifs
+	      $opt_quiet || {
+		  func_quote_for_expand "$cmd"
+		  eval "func_echo $func_quote_for_expand_result"
+	      }
+	      $opt_dry_run || eval "$cmd" || {
+		lt_exit=$?
+
+		# Restore the uninstalled library and exit
+		if test relink = "$opt_mode"; then
+		  ( cd "$output_objdir" && \
+		    $RM "${realname}T" && \
+		    $MV "${realname}U" "$realname" )
+		fi
+
+		exit $lt_exit
+	      }
+	    done
+	    IFS=$save_ifs
+
+	    if test -n "$export_symbols_regex" && ${skipped_export-false}; then
+	      func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+	      func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+	    fi
+	  fi
+
+          ${skipped_export-false} && {
+	    if test -n "$export_symbols" && test -n "$include_expsyms"; then
+	      tmp_export_symbols=$export_symbols
+	      test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols
+	      $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
+	    fi
+
+	    if test -n "$orig_export_symbols"; then
+	      # The given exports_symbols file has to be filtered, so filter it.
+	      func_verbose "filter symbol list for '$libname.la' to tag DATA exports"
+	      # FIXME: $output_objdir/$libname.filter potentially contains lots of
+	      # 's' commands, which not all seds can handle. GNU sed should be fine
+	      # though. Also, the filter scales superlinearly with the number of
+	      # global variables. join(1) would be nice here, but unfortunately
+	      # isn't a blessed tool.
+	      $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+	      func_append delfiles " $export_symbols $output_objdir/$libname.filter"
+	      export_symbols=$output_objdir/$libname.def
+	      $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+	    fi
+	  }
+
+	  libobjs=$output
+	  # Restore the value of output.
+	  output=$save_output
+
+	  if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
+	    eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+	    test "X$libobjs" = "X " && libobjs=
+	  fi
+	  # Expand the library linking commands again to reset the
+	  # value of $libobjs for piecewise linking.
+
+	  # Do each of the archive commands.
+	  if test yes = "$module" && test -n "$module_cmds"; then
+	    if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+	      cmds=$module_expsym_cmds
+	    else
+	      cmds=$module_cmds
+	    fi
+	  else
+	    if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+	      cmds=$archive_expsym_cmds
+	    else
+	      cmds=$archive_cmds
+	    fi
+	  fi
+	fi
+
+	if test -n "$delfiles"; then
+	  # Append the command to remove temporary files to $cmds.
+	  eval cmds=\"\$cmds~\$RM $delfiles\"
+	fi
+
+	# Add any objects from preloaded convenience libraries
+	if test -n "$dlprefiles"; then
+	  gentop=$output_objdir/${outputname}x
+	  func_append generated " $gentop"
+
+	  func_extract_archives $gentop $dlprefiles
+	  func_append libobjs " $func_extract_archives_result"
+	  test "X$libobjs" = "X " && libobjs=
+	fi
+
+	save_ifs=$IFS; IFS='~'
+	for cmd in $cmds; do
+	  IFS=$sp$nl
+	  eval cmd=\"$cmd\"
+	  IFS=$save_ifs
+	  $opt_quiet || {
+	    func_quote_for_expand "$cmd"
+	    eval "func_echo $func_quote_for_expand_result"
+	  }
+	  $opt_dry_run || eval "$cmd" || {
+	    lt_exit=$?
+
+	    # Restore the uninstalled library and exit
+	    if test relink = "$opt_mode"; then
+	      ( cd "$output_objdir" && \
+	        $RM "${realname}T" && \
+		$MV "${realname}U" "$realname" )
+	    fi
+
+	    exit $lt_exit
+	  }
+	done
+	IFS=$save_ifs
+
+	# Restore the uninstalled library and exit
+	if test relink = "$opt_mode"; then
+	  $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $?
+
+	  if test -n "$convenience"; then
+	    if test -z "$whole_archive_flag_spec"; then
+	      func_show_eval '${RM}r "$gentop"'
+	    fi
+	  fi
+
+	  exit $EXIT_SUCCESS
+	fi
+
+	# Create links to the real library.
+	for linkname in $linknames; do
+	  if test "$realname" != "$linkname"; then
+	    func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?'
+	  fi
+	done
+
+	# If -module or -export-dynamic was specified, set the dlname.
+	if test yes = "$module" || test yes = "$export_dynamic"; then
+	  # On all known operating systems, these are identical.
+	  dlname=$soname
+	fi
+      fi
+      ;;
+
+    obj)
+      if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then
+	func_warning "'-dlopen' is ignored for objects"
+      fi
+
+      case " $deplibs" in
+      *\ -l* | *\ -L*)
+	func_warning "'-l' and '-L' are ignored for objects" ;;
+      esac
+
+      test -n "$rpath" && \
+	func_warning "'-rpath' is ignored for objects"
+
+      test -n "$xrpath" && \
+	func_warning "'-R' is ignored for objects"
+
+      test -n "$vinfo" && \
+	func_warning "'-version-info' is ignored for objects"
+
+      test -n "$release" && \
+	func_warning "'-release' is ignored for objects"
+
+      case $output in
+      *.lo)
+	test -n "$objs$old_deplibs" && \
+	  func_fatal_error "cannot build library object '$output' from non-libtool objects"
+
+	libobj=$output
+	func_lo2o "$libobj"
+	obj=$func_lo2o_result
+	;;
+      *)
+	libobj=
+	obj=$output
+	;;
+      esac
+
+      # Delete the old objects.
+      $opt_dry_run || $RM $obj $libobj
+
+      # Objects from convenience libraries.  This assumes
+      # single-version convenience libraries.  Whenever we create
+      # different ones for PIC/non-PIC, this we'll have to duplicate
+      # the extraction.
+      reload_conv_objs=
+      gentop=
+      # if reload_cmds runs $LD directly, get rid of -Wl from
+      # whole_archive_flag_spec and hope we can get by with turning comma
+      # into space.
+      case $reload_cmds in
+        *\$LD[\ \$]*) wl= ;;
+      esac
+      if test -n "$convenience"; then
+	if test -n "$whole_archive_flag_spec"; then
+	  eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\"
+	  test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'`
+	  reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags
+	else
+	  gentop=$output_objdir/${obj}x
+	  func_append generated " $gentop"
+
+	  func_extract_archives $gentop $convenience
+	  reload_conv_objs="$reload_objs $func_extract_archives_result"
+	fi
+      fi
+
+      # If we're not building shared, we need to use non_pic_objs
+      test yes = "$build_libtool_libs" || libobjs=$non_pic_objects
+
+      # Create the old-style object.
+      reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs
+
+      output=$obj
+      func_execute_cmds "$reload_cmds" 'exit $?'
+
+      # Exit if we aren't doing a library object file.
+      if test -z "$libobj"; then
+	if test -n "$gentop"; then
+	  func_show_eval '${RM}r "$gentop"'
+	fi
+
+	exit $EXIT_SUCCESS
+      fi
+
+      test yes = "$build_libtool_libs" || {
+	if test -n "$gentop"; then
+	  func_show_eval '${RM}r "$gentop"'
+	fi
+
+	# Create an invalid libtool object if no PIC, so that we don't
+	# accidentally link it into a program.
+	# $show "echo timestamp > $libobj"
+	# $opt_dry_run || eval "echo timestamp > $libobj" || exit $?
+	exit $EXIT_SUCCESS
+      }
+
+      if test -n "$pic_flag" || test default != "$pic_mode"; then
+	# Only do commands if we really have different PIC objects.
+	reload_objs="$libobjs $reload_conv_objs"
+	output=$libobj
+	func_execute_cmds "$reload_cmds" 'exit $?'
+      fi
+
+      if test -n "$gentop"; then
+	func_show_eval '${RM}r "$gentop"'
+      fi
+
+      exit $EXIT_SUCCESS
+      ;;
+
+    prog)
+      case $host in
+	*cygwin*) func_stripname '' '.exe' "$output"
+	          output=$func_stripname_result.exe;;
+      esac
+      test -n "$vinfo" && \
+	func_warning "'-version-info' is ignored for programs"
+
+      test -n "$release" && \
+	func_warning "'-release' is ignored for programs"
+
+      $preload \
+	&& test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \
+	&& func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support."
+
+      case $host in
+      *-*-rhapsody* | *-*-darwin1.[012])
+	# On Rhapsody replace the C library is the System framework
+	compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'`
+	finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'`
+	;;
+      esac
+
+      case $host in
+      *-*-darwin*)
+	# Don't allow lazy linking, it breaks C++ global constructors
+	# But is supposedly fixed on 10.4 or later (yay!).
+	if test CXX = "$tagname"; then
+	  case ${MACOSX_DEPLOYMENT_TARGET-10.0} in
+	    10.[0123])
+	      func_append compile_command " $wl-bind_at_load"
+	      func_append finalize_command " $wl-bind_at_load"
+	    ;;
+	  esac
+	fi
+	# Time to change all our "foo.ltframework" stuff back to "-framework foo"
+	compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	;;
+      esac
+
+
+      # move library search paths that coincide with paths to not yet
+      # installed libraries to the beginning of the library search list
+      new_libs=
+      for path in $notinst_path; do
+	case " $new_libs " in
+	*" -L$path/$objdir "*) ;;
+	*)
+	  case " $compile_deplibs " in
+	  *" -L$path/$objdir "*)
+	    func_append new_libs " -L$path/$objdir" ;;
+	  esac
+	  ;;
+	esac
+      done
+      for deplib in $compile_deplibs; do
+	case $deplib in
+	-L*)
+	  case " $new_libs " in
+	  *" $deplib "*) ;;
+	  *) func_append new_libs " $deplib" ;;
+	  esac
+	  ;;
+	*) func_append new_libs " $deplib" ;;
+	esac
+      done
+      compile_deplibs=$new_libs
+
+
+      func_append compile_command " $compile_deplibs"
+      func_append finalize_command " $finalize_deplibs"
+
+      if test -n "$rpath$xrpath"; then
+	# If the user specified any rpath flags, then add them.
+	for libdir in $rpath $xrpath; do
+	  # This is the magic to use -rpath.
+	  case "$finalize_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append finalize_rpath " $libdir" ;;
+	  esac
+	done
+      fi
+
+      # Now hardcode the library paths
+      rpath=
+      hardcode_libdirs=
+      for libdir in $compile_rpath $finalize_rpath; do
+	if test -n "$hardcode_libdir_flag_spec"; then
+	  if test -n "$hardcode_libdir_separator"; then
+	    if test -z "$hardcode_libdirs"; then
+	      hardcode_libdirs=$libdir
+	    else
+	      # Just accumulate the unique libdirs.
+	      case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+	      *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+		;;
+	      *)
+		func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+		;;
+	      esac
+	    fi
+	  else
+	    eval flag=\"$hardcode_libdir_flag_spec\"
+	    func_append rpath " $flag"
+	  fi
+	elif test -n "$runpath_var"; then
+	  case "$perm_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append perm_rpath " $libdir" ;;
+	  esac
+	fi
+	case $host in
+	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+	  testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'`
+	  case :$dllsearchpath: in
+	  *":$libdir:"*) ;;
+	  ::) dllsearchpath=$libdir;;
+	  *) func_append dllsearchpath ":$libdir";;
+	  esac
+	  case :$dllsearchpath: in
+	  *":$testbindir:"*) ;;
+	  ::) dllsearchpath=$testbindir;;
+	  *) func_append dllsearchpath ":$testbindir";;
+	  esac
+	  ;;
+	esac
+      done
+      # Substitute the hardcoded libdirs into the rpath.
+      if test -n "$hardcode_libdir_separator" &&
+	 test -n "$hardcode_libdirs"; then
+	libdir=$hardcode_libdirs
+	eval rpath=\" $hardcode_libdir_flag_spec\"
+      fi
+      compile_rpath=$rpath
+
+      rpath=
+      hardcode_libdirs=
+      for libdir in $finalize_rpath; do
+	if test -n "$hardcode_libdir_flag_spec"; then
+	  if test -n "$hardcode_libdir_separator"; then
+	    if test -z "$hardcode_libdirs"; then
+	      hardcode_libdirs=$libdir
+	    else
+	      # Just accumulate the unique libdirs.
+	      case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+	      *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+		;;
+	      *)
+		func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+		;;
+	      esac
+	    fi
+	  else
+	    eval flag=\"$hardcode_libdir_flag_spec\"
+	    func_append rpath " $flag"
+	  fi
+	elif test -n "$runpath_var"; then
+	  case "$finalize_perm_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append finalize_perm_rpath " $libdir" ;;
+	  esac
+	fi
+      done
+      # Substitute the hardcoded libdirs into the rpath.
+      if test -n "$hardcode_libdir_separator" &&
+	 test -n "$hardcode_libdirs"; then
+	libdir=$hardcode_libdirs
+	eval rpath=\" $hardcode_libdir_flag_spec\"
+      fi
+      finalize_rpath=$rpath
+
+      if test -n "$libobjs" && test yes = "$build_old_libs"; then
+	# Transform all the library objects into standard objects.
+	compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
+	finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
+      fi
+
+      func_generate_dlsyms "$outputname" "@PROGRAM@" false
+
+      # template prelinking step
+      if test -n "$prelink_cmds"; then
+	func_execute_cmds "$prelink_cmds" 'exit $?'
+      fi
+
+      wrappers_required=:
+      case $host in
+      *cegcc* | *mingw32ce*)
+        # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway.
+        wrappers_required=false
+        ;;
+      *cygwin* | *mingw* )
+        test yes = "$build_libtool_libs" || wrappers_required=false
+        ;;
+      *)
+        if test no = "$need_relink" || test yes != "$build_libtool_libs"; then
+          wrappers_required=false
+        fi
+        ;;
+      esac
+      $wrappers_required || {
+	# Replace the output file specification.
+	compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
+	link_command=$compile_command$compile_rpath
+
+	# We have no uninstalled library dependencies, so finalize right now.
+	exit_status=0
+	func_show_eval "$link_command" 'exit_status=$?'
+
+	if test -n "$postlink_cmds"; then
+	  func_to_tool_file "$output"
+	  postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+	  func_execute_cmds "$postlink_cmds" 'exit $?'
+	fi
+
+	# Delete the generated files.
+	if test -f "$output_objdir/${outputname}S.$objext"; then
+	  func_show_eval '$RM "$output_objdir/${outputname}S.$objext"'
+	fi
+
+	exit $exit_status
+      }
+
+      if test -n "$compile_shlibpath$finalize_shlibpath"; then
+	compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
+      fi
+      if test -n "$finalize_shlibpath"; then
+	finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
+      fi
+
+      compile_var=
+      finalize_var=
+      if test -n "$runpath_var"; then
+	if test -n "$perm_rpath"; then
+	  # We should set the runpath_var.
+	  rpath=
+	  for dir in $perm_rpath; do
+	    func_append rpath "$dir:"
+	  done
+	  compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
+	fi
+	if test -n "$finalize_perm_rpath"; then
+	  # We should set the runpath_var.
+	  rpath=
+	  for dir in $finalize_perm_rpath; do
+	    func_append rpath "$dir:"
+	  done
+	  finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
+	fi
+      fi
+
+      if test yes = "$no_install"; then
+	# We don't need to create a wrapper script.
+	link_command=$compile_var$compile_command$compile_rpath
+	# Replace the output file specification.
+	link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
+	# Delete the old output file.
+	$opt_dry_run || $RM $output
+	# Link the executable and exit
+	func_show_eval "$link_command" 'exit $?'
+
+	if test -n "$postlink_cmds"; then
+	  func_to_tool_file "$output"
+	  postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+	  func_execute_cmds "$postlink_cmds" 'exit $?'
+	fi
+
+	exit $EXIT_SUCCESS
+      fi
+
+      case $hardcode_action,$fast_install in
+        relink,*)
+	  # Fast installation is not supported
+	  link_command=$compile_var$compile_command$compile_rpath
+	  relink_command=$finalize_var$finalize_command$finalize_rpath
+
+	  func_warning "this platform does not like uninstalled shared libraries"
+	  func_warning "'$output' will be relinked during installation"
+	  ;;
+        *,yes)
+	  link_command=$finalize_var$compile_command$finalize_rpath
+	  relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'`
+          ;;
+	*,no)
+	  link_command=$compile_var$compile_command$compile_rpath
+	  relink_command=$finalize_var$finalize_command$finalize_rpath
+          ;;
+	*,needless)
+	  link_command=$finalize_var$compile_command$finalize_rpath
+	  relink_command=
+          ;;
+      esac
+
+      # Replace the output file specification.
+      link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
+
+      # Delete the old output files.
+      $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname
+
+      func_show_eval "$link_command" 'exit $?'
+
+      if test -n "$postlink_cmds"; then
+	func_to_tool_file "$output_objdir/$outputname"
+	postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+	func_execute_cmds "$postlink_cmds" 'exit $?'
+      fi
+
+      # Now create the wrapper script.
+      func_verbose "creating $output"
+
+      # Quote the relink command for shipping.
+      if test -n "$relink_command"; then
+	# Preserve any variables that may affect compiler behavior
+	for var in $variables_saved_for_relink; do
+	  if eval test -z \"\${$var+set}\"; then
+	    relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+	  elif eval var_value=\$$var; test -z "$var_value"; then
+	    relink_command="$var=; export $var; $relink_command"
+	  else
+	    func_quote_for_eval "$var_value"
+	    relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+	  fi
+	done
+	relink_command="(cd `pwd`; $relink_command)"
+	relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
+      fi
+
+      # Only actually do things if not in dry run mode.
+      $opt_dry_run || {
+	# win32 will think the script is a binary if it has
+	# a .exe suffix, so we strip it off here.
+	case $output in
+	  *.exe) func_stripname '' '.exe' "$output"
+	         output=$func_stripname_result ;;
+	esac
+	# test for cygwin because mv fails w/o .exe extensions
+	case $host in
+	  *cygwin*)
+	    exeext=.exe
+	    func_stripname '' '.exe' "$outputname"
+	    outputname=$func_stripname_result ;;
+	  *) exeext= ;;
+	esac
+	case $host in
+	  *cygwin* | *mingw* )
+	    func_dirname_and_basename "$output" "" "."
+	    output_name=$func_basename_result
+	    output_path=$func_dirname_result
+	    cwrappersource=$output_path/$objdir/lt-$output_name.c
+	    cwrapper=$output_path/$output_name.exe
+	    $RM $cwrappersource $cwrapper
+	    trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
+
+	    func_emit_cwrapperexe_src > $cwrappersource
+
+	    # The wrapper executable is built using the $host compiler,
+	    # because it contains $host paths and files. If cross-
+	    # compiling, it, like the target executable, must be
+	    # executed on the $host or under an emulation environment.
+	    $opt_dry_run || {
+	      $LTCC $LTCFLAGS -o $cwrapper $cwrappersource
+	      $STRIP $cwrapper
+	    }
+
+	    # Now, create the wrapper script for func_source use:
+	    func_ltwrapper_scriptname $cwrapper
+	    $RM $func_ltwrapper_scriptname_result
+	    trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15
+	    $opt_dry_run || {
+	      # note: this script will not be executed, so do not chmod.
+	      if test "x$build" = "x$host"; then
+		$cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result
+	      else
+		func_emit_wrapper no > $func_ltwrapper_scriptname_result
+	      fi
+	    }
+	  ;;
+	  * )
+	    $RM $output
+	    trap "$RM $output; exit $EXIT_FAILURE" 1 2 15
+
+	    func_emit_wrapper no > $output
+	    chmod +x $output
+	  ;;
+	esac
+      }
+      exit $EXIT_SUCCESS
+      ;;
+    esac
+
+    # See if we need to build an old-fashioned archive.
+    for oldlib in $oldlibs; do
+
+      case $build_libtool_libs in
+        convenience)
+	  oldobjs="$libobjs_save $symfileobj"
+	  addlibs=$convenience
+	  build_libtool_libs=no
+	  ;;
+	module)
+	  oldobjs=$libobjs_save
+	  addlibs=$old_convenience
+	  build_libtool_libs=no
+          ;;
+	*)
+	  oldobjs="$old_deplibs $non_pic_objects"
+	  $preload && test -f "$symfileobj" \
+	    && func_append oldobjs " $symfileobj"
+	  addlibs=$old_convenience
+	  ;;
+      esac
+
+      if test -n "$addlibs"; then
+	gentop=$output_objdir/${outputname}x
+	func_append generated " $gentop"
+
+	func_extract_archives $gentop $addlibs
+	func_append oldobjs " $func_extract_archives_result"
+      fi
+
+      # Do each command in the archive commands.
+      if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then
+	cmds=$old_archive_from_new_cmds
+      else
+
+	# Add any objects from preloaded convenience libraries
+	if test -n "$dlprefiles"; then
+	  gentop=$output_objdir/${outputname}x
+	  func_append generated " $gentop"
+
+	  func_extract_archives $gentop $dlprefiles
+	  func_append oldobjs " $func_extract_archives_result"
+	fi
+
+	# POSIX demands no paths to be encoded in archives.  We have
+	# to avoid creating archives with duplicate basenames if we
+	# might have to extract them afterwards, e.g., when creating a
+	# static archive out of a convenience library, or when linking
+	# the entirety of a libtool archive into another (currently
+	# not supported by libtool).
+	if (for obj in $oldobjs
+	    do
+	      func_basename "$obj"
+	      $ECHO "$func_basename_result"
+	    done | sort | sort -uc >/dev/null 2>&1); then
+	  :
+	else
+	  echo "copying selected object files to avoid basename conflicts..."
+	  gentop=$output_objdir/${outputname}x
+	  func_append generated " $gentop"
+	  func_mkdir_p "$gentop"
+	  save_oldobjs=$oldobjs
+	  oldobjs=
+	  counter=1
+	  for obj in $save_oldobjs
+	  do
+	    func_basename "$obj"
+	    objbase=$func_basename_result
+	    case " $oldobjs " in
+	    " ") oldobjs=$obj ;;
+	    *[\ /]"$objbase "*)
+	      while :; do
+		# Make sure we don't pick an alternate name that also
+		# overlaps.
+		newobj=lt$counter-$objbase
+		func_arith $counter + 1
+		counter=$func_arith_result
+		case " $oldobjs " in
+		*[\ /]"$newobj "*) ;;
+		*) if test ! -f "$gentop/$newobj"; then break; fi ;;
+		esac
+	      done
+	      func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
+	      func_append oldobjs " $gentop/$newobj"
+	      ;;
+	    *) func_append oldobjs " $obj" ;;
+	    esac
+	  done
+	fi
+	func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+	tool_oldlib=$func_to_tool_file_result
+	eval cmds=\"$old_archive_cmds\"
+
+	func_len " $cmds"
+	len=$func_len_result
+	if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+	  cmds=$old_archive_cmds
+	elif test -n "$archiver_list_spec"; then
+	  func_verbose "using command file archive linking..."
+	  for obj in $oldobjs
+	  do
+	    func_to_tool_file "$obj"
+	    $ECHO "$func_to_tool_file_result"
+	  done > $output_objdir/$libname.libcmd
+	  func_to_tool_file "$output_objdir/$libname.libcmd"
+	  oldobjs=" $archiver_list_spec$func_to_tool_file_result"
+	  cmds=$old_archive_cmds
+	else
+	  # the command line is too long to link in one step, link in parts
+	  func_verbose "using piecewise archive linking..."
+	  save_RANLIB=$RANLIB
+	  RANLIB=:
+	  objlist=
+	  concat_cmds=
+	  save_oldobjs=$oldobjs
+	  oldobjs=
+	  # Is there a better way of finding the last object in the list?
+	  for obj in $save_oldobjs
+	  do
+	    last_oldobj=$obj
+	  done
+	  eval test_cmds=\"$old_archive_cmds\"
+	  func_len " $test_cmds"
+	  len0=$func_len_result
+	  len=$len0
+	  for obj in $save_oldobjs
+	  do
+	    func_len " $obj"
+	    func_arith $len + $func_len_result
+	    len=$func_arith_result
+	    func_append objlist " $obj"
+	    if test "$len" -lt "$max_cmd_len"; then
+	      :
+	    else
+	      # the above command should be used before it gets too long
+	      oldobjs=$objlist
+	      if test "$obj" = "$last_oldobj"; then
+		RANLIB=$save_RANLIB
+	      fi
+	      test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+	      eval concat_cmds=\"\$concat_cmds$old_archive_cmds\"
+	      objlist=
+	      len=$len0
+	    fi
+	  done
+	  RANLIB=$save_RANLIB
+	  oldobjs=$objlist
+	  if test -z "$oldobjs"; then
+	    eval cmds=\"\$concat_cmds\"
+	  else
+	    eval cmds=\"\$concat_cmds~\$old_archive_cmds\"
+	  fi
+	fi
+      fi
+      func_execute_cmds "$cmds" 'exit $?'
+    done
+
+    test -n "$generated" && \
+      func_show_eval "${RM}r$generated"
+
+    # Now create the libtool archive.
+    case $output in
+    *.la)
+      old_library=
+      test yes = "$build_old_libs" && old_library=$libname.$libext
+      func_verbose "creating $output"
+
+      # Preserve any variables that may affect compiler behavior
+      for var in $variables_saved_for_relink; do
+	if eval test -z \"\${$var+set}\"; then
+	  relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+	elif eval var_value=\$$var; test -z "$var_value"; then
+	  relink_command="$var=; export $var; $relink_command"
+	else
+	  func_quote_for_eval "$var_value"
+	  relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+	fi
+      done
+      # Quote the link command for shipping.
+      relink_command="(cd `pwd`; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
+      relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
+      if test yes = "$hardcode_automatic"; then
+	relink_command=
+      fi
+
+      # Only create the output if not a dry run.
+      $opt_dry_run || {
+	for installed in no yes; do
+	  if test yes = "$installed"; then
+	    if test -z "$install_libdir"; then
+	      break
+	    fi
+	    output=$output_objdir/${outputname}i
+	    # Replace all uninstalled libtool libraries with the installed ones
+	    newdependency_libs=
+	    for deplib in $dependency_libs; do
+	      case $deplib in
+	      *.la)
+		func_basename "$deplib"
+		name=$func_basename_result
+		func_resolve_sysroot "$deplib"
+		eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result`
+		test -z "$libdir" && \
+		  func_fatal_error "'$deplib' is not a valid libtool archive"
+		func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name"
+		;;
+	      -L*)
+		func_stripname -L '' "$deplib"
+		func_replace_sysroot "$func_stripname_result"
+		func_append newdependency_libs " -L$func_replace_sysroot_result"
+		;;
+	      -R*)
+		func_stripname -R '' "$deplib"
+		func_replace_sysroot "$func_stripname_result"
+		func_append newdependency_libs " -R$func_replace_sysroot_result"
+		;;
+	      *) func_append newdependency_libs " $deplib" ;;
+	      esac
+	    done
+	    dependency_libs=$newdependency_libs
+	    newdlfiles=
+
+	    for lib in $dlfiles; do
+	      case $lib in
+	      *.la)
+	        func_basename "$lib"
+		name=$func_basename_result
+		eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+		test -z "$libdir" && \
+		  func_fatal_error "'$lib' is not a valid libtool archive"
+		func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name"
+		;;
+	      *) func_append newdlfiles " $lib" ;;
+	      esac
+	    done
+	    dlfiles=$newdlfiles
+	    newdlprefiles=
+	    for lib in $dlprefiles; do
+	      case $lib in
+	      *.la)
+		# Only pass preopened files to the pseudo-archive (for
+		# eventual linking with the app. that links it) if we
+		# didn't already link the preopened objects directly into
+		# the library:
+		func_basename "$lib"
+		name=$func_basename_result
+		eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+		test -z "$libdir" && \
+		  func_fatal_error "'$lib' is not a valid libtool archive"
+		func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name"
+		;;
+	      esac
+	    done
+	    dlprefiles=$newdlprefiles
+	  else
+	    newdlfiles=
+	    for lib in $dlfiles; do
+	      case $lib in
+		[\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;;
+		*) abs=`pwd`"/$lib" ;;
+	      esac
+	      func_append newdlfiles " $abs"
+	    done
+	    dlfiles=$newdlfiles
+	    newdlprefiles=
+	    for lib in $dlprefiles; do
+	      case $lib in
+		[\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;;
+		*) abs=`pwd`"/$lib" ;;
+	      esac
+	      func_append newdlprefiles " $abs"
+	    done
+	    dlprefiles=$newdlprefiles
+	  fi
+	  $RM $output
+	  # place dlname in correct position for cygwin
+	  # In fact, it would be nice if we could use this code for all target
+	  # systems that can't hard-code library paths into their executables
+	  # and that have no shared library path variable independent of PATH,
+	  # but it turns out we can't easily determine that from inspecting
+	  # libtool variables, so we have to hard-code the OSs to which it
+	  # applies here; at the moment, that means platforms that use the PE
+	  # object format with DLL files.  See the long comment at the top of
+	  # tests/bindir.at for full details.
+	  tdlname=$dlname
+	  case $host,$output,$installed,$module,$dlname in
+	    *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll)
+	      # If a -bindir argument was supplied, place the dll there.
+	      if test -n "$bindir"; then
+		func_relative_path "$install_libdir" "$bindir"
+		tdlname=$func_relative_path_result/$dlname
+	      else
+		# Otherwise fall back on heuristic.
+		tdlname=../bin/$dlname
+	      fi
+	      ;;
+	  esac
+	  $ECHO > $output "\
+# $outputname - a libtool library file
+# Generated by $PROGRAM (GNU $PACKAGE) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='$tdlname'
+
+# Names of this library.
+library_names='$library_names'
+
+# The name of the static archive.
+old_library='$old_library'
+
+# Linker flags that cannot go in dependency_libs.
+inherited_linker_flags='$new_inherited_linker_flags'
+
+# Libraries that this one depends upon.
+dependency_libs='$dependency_libs'
+
+# Names of additional weak libraries provided by this library
+weak_library_names='$weak_libs'
+
+# Version information for $libname.
+current=$current
+age=$age
+revision=$revision
+
+# Is this an already installed library?
+installed=$installed
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=$module
+
+# Files to dlopen/dlpreopen
+dlopen='$dlfiles'
+dlpreopen='$dlprefiles'
+
+# Directory that this library needs to be installed in:
+libdir='$install_libdir'"
+	  if test no,yes = "$installed,$need_relink"; then
+	    $ECHO >> $output "\
+relink_command=\"$relink_command\""
+	  fi
+	done
+      }
+
+      # Do a symbolic link so that the libtool archive can be found in
+      # LD_LIBRARY_PATH before the program is installed.
+      func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?'
+      ;;
+    esac
+    exit $EXIT_SUCCESS
+}
+
+if test link = "$opt_mode" || test relink = "$opt_mode"; then
+  func_mode_link ${1+"$@"}
+fi
+
+
+# func_mode_uninstall arg...
+func_mode_uninstall ()
+{
+    $debug_cmd
+
+    RM=$nonopt
+    files=
+    rmforce=false
+    exit_status=0
+
+    # This variable tells wrapper scripts just to set variables rather
+    # than running their programs.
+    libtool_install_magic=$magic
+
+    for arg
+    do
+      case $arg in
+      -f) func_append RM " $arg"; rmforce=: ;;
+      -*) func_append RM " $arg" ;;
+      *) func_append files " $arg" ;;
+      esac
+    done
+
+    test -z "$RM" && \
+      func_fatal_help "you must specify an RM program"
+
+    rmdirs=
+
+    for file in $files; do
+      func_dirname "$file" "" "."
+      dir=$func_dirname_result
+      if test . = "$dir"; then
+	odir=$objdir
+      else
+	odir=$dir/$objdir
+      fi
+      func_basename "$file"
+      name=$func_basename_result
+      test uninstall = "$opt_mode" && odir=$dir
+
+      # Remember odir for removal later, being careful to avoid duplicates
+      if test clean = "$opt_mode"; then
+	case " $rmdirs " in
+	  *" $odir "*) ;;
+	  *) func_append rmdirs " $odir" ;;
+	esac
+      fi
+
+      # Don't error if the file doesn't exist and rm -f was used.
+      if { test -L "$file"; } >/dev/null 2>&1 ||
+	 { test -h "$file"; } >/dev/null 2>&1 ||
+	 test -f "$file"; then
+	:
+      elif test -d "$file"; then
+	exit_status=1
+	continue
+      elif $rmforce; then
+	continue
+      fi
+
+      rmfiles=$file
+
+      case $name in
+      *.la)
+	# Possibly a libtool archive, so verify it.
+	if func_lalib_p "$file"; then
+	  func_source $dir/$name
+
+	  # Delete the libtool libraries and symlinks.
+	  for n in $library_names; do
+	    func_append rmfiles " $odir/$n"
+	  done
+	  test -n "$old_library" && func_append rmfiles " $odir/$old_library"
+
+	  case $opt_mode in
+	  clean)
+	    case " $library_names " in
+	    *" $dlname "*) ;;
+	    *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;;
+	    esac
+	    test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i"
+	    ;;
+	  uninstall)
+	    if test -n "$library_names"; then
+	      # Do each command in the postuninstall commands.
+	      func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1'
+	    fi
+
+	    if test -n "$old_library"; then
+	      # Do each command in the old_postuninstall commands.
+	      func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1'
+	    fi
+	    # FIXME: should reinstall the best remaining shared library.
+	    ;;
+	  esac
+	fi
+	;;
+
+      *.lo)
+	# Possibly a libtool object, so verify it.
+	if func_lalib_p "$file"; then
+
+	  # Read the .lo file
+	  func_source $dir/$name
+
+	  # Add PIC object to the list of files to remove.
+	  if test -n "$pic_object" && test none != "$pic_object"; then
+	    func_append rmfiles " $dir/$pic_object"
+	  fi
+
+	  # Add non-PIC object to the list of files to remove.
+	  if test -n "$non_pic_object" && test none != "$non_pic_object"; then
+	    func_append rmfiles " $dir/$non_pic_object"
+	  fi
+	fi
+	;;
+
+      *)
+	if test clean = "$opt_mode"; then
+	  noexename=$name
+	  case $file in
+	  *.exe)
+	    func_stripname '' '.exe' "$file"
+	    file=$func_stripname_result
+	    func_stripname '' '.exe' "$name"
+	    noexename=$func_stripname_result
+	    # $file with .exe has already been added to rmfiles,
+	    # add $file without .exe
+	    func_append rmfiles " $file"
+	    ;;
+	  esac
+	  # Do a test to see if this is a libtool program.
+	  if func_ltwrapper_p "$file"; then
+	    if func_ltwrapper_executable_p "$file"; then
+	      func_ltwrapper_scriptname "$file"
+	      relink_command=
+	      func_source $func_ltwrapper_scriptname_result
+	      func_append rmfiles " $func_ltwrapper_scriptname_result"
+	    else
+	      relink_command=
+	      func_source $dir/$noexename
+	    fi
+
+	    # note $name still contains .exe if it was in $file originally
+	    # as does the version of $file that was added into $rmfiles
+	    func_append rmfiles " $odir/$name $odir/${name}S.$objext"
+	    if test yes = "$fast_install" && test -n "$relink_command"; then
+	      func_append rmfiles " $odir/lt-$name"
+	    fi
+	    if test "X$noexename" != "X$name"; then
+	      func_append rmfiles " $odir/lt-$noexename.c"
+	    fi
+	  fi
+	fi
+	;;
+      esac
+      func_show_eval "$RM $rmfiles" 'exit_status=1'
+    done
+
+    # Try to remove the $objdir's in the directories where we deleted files
+    for dir in $rmdirs; do
+      if test -d "$dir"; then
+	func_show_eval "rmdir $dir >/dev/null 2>&1"
+      fi
+    done
+
+    exit $exit_status
+}
+
+if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then
+  func_mode_uninstall ${1+"$@"}
+fi
+
+test -z "$opt_mode" && {
+  help=$generic_help
+  func_fatal_help "you must specify a MODE"
+}
+
+test -z "$exec_cmd" && \
+  func_fatal_help "invalid operation mode '$opt_mode'"
+
+if test -n "$exec_cmd"; then
+  eval exec "$exec_cmd"
+  exit $EXIT_FAILURE
+fi
+
+exit $exit_status
+
+
+# The TAGs below are defined such that we never get into a situation
+# where we disable both kinds of libraries.  Given conflicting
+# choices, we go for a static library, that is the most portable,
+# since we can't tell whether shared libraries were disabled because
+# the user asked for that or because the platform doesn't support
+# them.  This is particularly important on AIX, because we don't
+# support having both static and shared libraries enabled at the same
+# time on that platform, so we default to a shared-only configuration.
+# If a disable-shared tag is given, we'll fallback to a static-only
+# configuration.  But we'll never go from static-only to shared-only.
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
+build_libtool_libs=no
+build_old_libs=yes
+# ### END LIBTOOL TAG CONFIG: disable-shared
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-static
+build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac`
+# ### END LIBTOOL TAG CONFIG: disable-static
+
+# Local Variables:
+# mode:shell-script
+# sh-indentation:2
+# End:
+
+# ### BEGIN LIBTOOL TAG CONFIG: CXX
+
+# The linker used to build libraries.
+LD="/Library/Developer/CommandLineTools/usr/bin/ld"
+
+# How to create reloadable object files.
+reload_flag=" -r"
+reload_cmds="\$LTCC \$LTCFLAGS -nostdlib \$wl-r -o \$output\$reload_objs"
+
+# Commands used to build an old-style archive.
+old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$tool_oldlib"
+
+# A language specific compiler.
+CC="g++"
+
+# Is the compiler the GNU compiler?
+with_gcc=yes
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=" -fno-builtin"
+
+# Additional compiler flags for building library objects.
+pic_flag=" -fno-common -DPIC"
+
+# How to pass a linker flag through the compiler.
+wl="-Wl,"
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=""
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o="yes"
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=no
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=no
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=""
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec="\`for conv in \$convenience\\\"\\\"; do test  -n \\\"\$conv\\\" && new_convenience=\\\"\$new_convenience \$wl-force_load,\$conv\\\"; done; func_echo_all \\\"\$new_convenience\\\"\`"
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object="no"
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=""
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=""
+
+# Commands used to build a shared archive.
+archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module"
+archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym"
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags"
+module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym"
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld="no"
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag="\$wl-undefined \${wl}dynamic_lookup"
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=""
+
+# Flag to hardcode $libdir into a binary during linking.
+# This must work even if $libdir does not exist
+hardcode_libdir_flag_spec=""
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=""
+
+# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=no
+
+# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting $shlibpath_var if the
+# library is relocated.
+hardcode_direct_absolute=no
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=no
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=unsupported
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=yes
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=no
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=yes
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=no
+
+# The commands to list exported symbols.
+export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols"
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*"
+
+# Symbols that must always be exported.
+include_expsyms=""
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=""
+
+# Commands necessary for finishing linking programs.
+postlink_cmds=""
+
+# Specify filename containing input files.
+file_list_spec=""
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=immediate
+
+# The directories searched by this compiler when creating a shared library.
+compiler_lib_search_dirs=""
+
+# Dependencies to place before and after the objects being linked to
+# create a shared library.
+predep_objects=""
+postdep_objects=""
+predeps=""
+postdeps=""
+
+# The library search path used internally by the compiler when linking
+# a shared library.
+compiler_lib_search_path=""
+
+# ### END LIBTOOL TAG CONFIG: CXX

Property changes on: applgrid/tags/applgrid-01-06-36/libtool
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: applgrid/tags/applgrid-01-06-36/doc/nnlo-scale.tex
===================================================================
--- applgrid/tags/applgrid-01-06-36/doc/nnlo-scale.tex	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/doc/nnlo-scale.tex	(revision 2010)
@@ -0,0 +1,74 @@
+\documentclass[12pt]{article}
+
+\usepackage{a4wide}
+\usepackage{amsmath}
+\usepackage{amssymb}
+
+\newcommand{\as}{\alpha_s}
+\newcommand{\asbar}{{\bar \alpha}_s}
+\newcommand{\LO}{\text{LO}}
+\newcommand{\NLO}{\text{NLO}}
+\newcommand{\NNLO}{\text{NNLO}}
+\setlength\parskip{1em}
+
+\begin{document}
+%----------------------------------------------------------------------
+\section*{NNLO scale dependence terms}
+
+Incomplete DRAFT of NNLO scale terms; some terms are still to be
+double checked. Being put together by Mark, Pavel \& Gavin.
+
+First a few definition
+\begin{align}
+  \label{eq:defs}
+  L_R &= \ln \frac{\mu_R^2}{Q^2}\\
+  L_F &= \ln \frac{\mu_F^2}{Q^2}\\
+  \beta_0 &= \frac{11C_A - 2 n_f}{6}
+\end{align}
+where $Q^2$ is the scale that was used in the grid originally.
+%
+Splitting functions are taken to multiply powers of $\asbar(\mu_R^2) \equiv
+\frac{\as(\mu_R^2)}{2\pi}$. 
+%
+If the renormalisation scale is not specified, it's assumed to be
+$\mu_R^2$. 
+%
+If the factorisation scale is not specified, it's assumed to be
+$\mu_F^2$. 
+
+Next we introduce the following combinations of splitting functions
+and PDFs
+\begin{align}
+  f_\LO   &\equiv f\\
+  f_\NLO  &= - L_F P_\LO \otimes f\\
+  f_\NNLO &=  \frac12 L_F^2 P_\LO \otimes P_\LO \otimes f
+             - L_F P_\NLO \otimes f
+             - (\beta_0(L_R L_F - L_F^2)) P_\LO \otimes f
+\end{align}
+
+Now call the hard perturbative coefficients are $W_\LO$,
+$W_\NLO$, $W_\NNLO$,  multiplying powers of $\as2pi^p$,
+$\as2pi^{p+1}$, $\as2pi^{p+2}$.
+%
+We then have the full structure of the answer, which is
+\begin{multline}
+  \asbar^p W_\LO \otimes f_\LO^{(1)} \otimes f_\LO^{(2)}
+  \\
+  + \asbar^{p+1} \left[ 
+    (W_\NLO + p \beta_0 L_R W_\LO) \otimes f_\LO^{(1)}  \otimes f_\LO^{(2)}
+    + W_\LO \otimes (f_\NLO^{(1)}\otimes f_\LO^{(2)} + f_\LO^{(1)}\otimes f_\NLO^{(2)})
+  \right]
+  \\
+  + \asbar^{p+2} \left[ 
+    (W_\NNLO + (p+1) \beta_0 L_R W_\NLO + \cdots) \otimes f_\LO^{(1)}
+    \otimes f_\LO^{(2)}\right.
+  \\
+    + (W_\NLO + p \beta_0 L_R W_\LO) \otimes (f_\NLO^{(1)}\otimes f_\LO^{(2)}  + f_\LO^{(1)}\otimes f_\NLO^{(2)})
+    \\
+    \left.
+    + W_\LO  \otimes (f_\NNLO^{(1)}\otimes f_\LO^{(2)}  +
+    f_\NLO^{(1)}\otimes f_\NLO^{(2)} + f_\LO^{(1)}\otimes f_\NNLO^{(2)})
+  \right]
+\end{multline}
+
+\end{document}
Index: applgrid/tags/applgrid-01-06-36/doc
===================================================================
--- applgrid/tags/applgrid-01-06-36/doc	(revision 2009)
+++ applgrid/tags/applgrid-01-06-36/doc	(revision 2010)

Property changes on: applgrid/tags/applgrid-01-06-36/doc
___________________________________________________________________
Added: svn:ignore
## -0,0 +1,6 ##
+nnlo-scale.aux
+nnlo-scale.log
+nnlo-scale.fls
+nnlo-scale.fdb_latexmk
+
+nnlo-scale.pdf
Index: applgrid/tags/applgrid-01-06-36/INSTALL
===================================================================
--- applgrid/tags/applgrid-01-06-36/INSTALL	(revision 0)
+++ applgrid/tags/applgrid-01-06-36/INSTALL	(revision 2010)
@@ -0,0 +1,61 @@
+APPLgrid
+--------
+
+./configure
+make clean
+make
+make install
+
+These are standard targets in the usual way, so the 
+library and headers will be installed in the usual place 
+(ie change location with 
+
+ ./configure --prefix=<my_installation_path>
+ 
+if you want it somewhere else.
+
+You may need the 
+
+ make clean 
+
+to remove the root Dictionary files
+
+make install installs a utility script 
+
+  applgrid-config
+
+in the 
+
+  bin
+
+directory of the installation directory specified by --prefix
+during ./configure
+
+This script is there to help with compilation and linking. The 
+user should ensure that this script is in the standard search 
+path for executables so that it can be used to aid with 
+compilation
+
+ applgrid-config --help
+
+displays useful information ...
+
+applgrid-config: configuration tool for the APPLgrid
+                 fast cross section convolution code
+                 http://projects.hepforge.org/applgrid/
+
+Usage: applgrid-config [[--help|-h] | [--prefix] | [...]]
+Options:
+  --help | -h    : this help
+
+  --prefix       : installation prefix (cf. autoconf)
+  --incdir       : path to the APPLgrid header directory
+  --libdir       : path to the APPLgrid library directory
+  --cxxflags     : compiler flags for the C preprocessor
+  --ldflags      : compiler flags for the linker including the fortan interface
+  --ldcflags     : compiler flags for the linker just for c code
+
+  --version      : release version number
+
+
+