diff -Nru orig/Lib/ctypes/util.py modified/Lib/ctypes/util.py
--- orig/Lib/ctypes/util.py 2015-02-03 19:49:03.000000000 +0800
+++ modified/Lib/ctypes/util.py 2015-02-28 19:22:12.000000000 +0800
@@ -70,7 +70,7 @@
def find_library(name):
return name
-if os.name == "posix" and sys.platform == "darwin":
+if os.name == "posix" and sys.platform in ("darwin", "ios"):
from ctypes.macholib.dyld import dyld_find as _dyld_find
def find_library(name):
possible = ['lib%s.dylib' % name,
diff -Nru orig/Lib/plat-ios/IN.py modified/Lib/plat-ios/IN.py
--- orig/Lib/plat-ios/IN.py 1970-01-01 08:00:00.000000000 +0800
+++ modified/Lib/plat-ios/IN.py 2015-02-23 08:44:22.000000000 +0800
@@ -0,0 +1,6577 @@
+# Generated by h2py from /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS8.1.sdk/usr/include/netinet/in.h
+
+# Included from sys/appleapiopts.h
+
+# Included from sys/_types.h
+
+# Included from sys/cdefs.h
+def __P(protos): return protos
+
+def __STRING(x): return #x
+
+def __P(protos): return ()
+
+def __STRING(x): return "x"
+
+def __deprecated_msg(_msg): return __attribute__((deprecated(_msg)))
+
+def __deprecated_msg(_msg): return __attribute__((deprecated))
+
+def __deprecated_msg(_msg): return __attribute__((deprecated(_msg)))
+
+def __deprecated_msg(_msg): return __attribute__((deprecated))
+
+def __deprecated_enum_msg(_msg): return __deprecated_msg(_msg)
+
+def __COPYRIGHT(s): return __IDSTRING(copyright,s)
+
+def __RCSID(s): return __IDSTRING(rcsid,s)
+
+def __SCCSID(s): return __IDSTRING(sccsid,s)
+
+def __PROJECT_VERSION(s): return __IDSTRING(project_version,s)
+
+def __FBSDID(s): return
+
+__DARWIN_ONLY_64_BIT_INO_T = 1
+__DARWIN_ONLY_UNIX_CONFORMANCE = 1
+__DARWIN_ONLY_VERS_1050 = 1
+__DARWIN_ONLY_UNIX_CONFORMANCE = 1
+__DARWIN_ONLY_UNIX_CONFORMANCE = 0
+__DARWIN_UNIX03 = 1
+__DARWIN_UNIX03 = 0
+__DARWIN_UNIX03 = 1
+__DARWIN_UNIX03 = 0
+__DARWIN_UNIX03 = 0
+__DARWIN_UNIX03 = 1
+__DARWIN_64_BIT_INO_T = 1
+__DARWIN_64_BIT_INO_T = 0
+__DARWIN_64_BIT_INO_T = 1
+__DARWIN_64_BIT_INO_T = 0
+__DARWIN_64_BIT_INO_T = 1
+__DARWIN_VERS_1050 = 1
+__DARWIN_VERS_1050 = 0
+__DARWIN_VERS_1050 = 1
+__DARWIN_NON_CANCELABLE = 0
+__DARWIN_SUF_UNIX03 = "$UNIX2003"
+__DARWIN_SUF_64_BIT_INO_T = "$INODE64"
+__DARWIN_SUF_1050 = "$1050"
+__DARWIN_SUF_NON_CANCELABLE = "$NOCANCEL"
+__DARWIN_SUF_EXTSN = "$DARWIN_EXTSN"
+
+# Included from sys/_symbol_aliasing.h
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_2_0(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_2_1(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_2_2(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_3_0(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_3_1(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_3_2(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_0(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_1(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_2(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_3(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_5_0(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_5_1(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_6_0(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_6_1(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_7_0(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_7_1(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_0(x): return x
+
+def __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_1(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_0(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_1(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_2(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_3(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_4(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_5(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_6(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_7(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_8(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_9(x): return x
+
+def __DARWIN_ALIAS_STARTING_MAC___MAC_10_10(x): return x
+
+_POSIX_C_SOURCE = 199009
+_POSIX_C_SOURCE = 199209
+_POSIX_C_SOURCE = 200809
+_POSIX_C_SOURCE = 200112
+_POSIX_C_SOURCE = 199506
+_POSIX_C_SOURCE = 198808
+
+# Included from sys/_posix_availability.h
+def __POSIX_C_DEPRECATED(ver): return ___POSIX_C_DEPRECATED_STARTING_##ver
+
+__DARWIN_C_ANSI = 010000
+__DARWIN_C_FULL = 900000
+__DARWIN_C_LEVEL = __DARWIN_C_ANSI
+__DARWIN_C_LEVEL = _POSIX_C_SOURCE
+__DARWIN_C_LEVEL = __DARWIN_C_FULL
+__STDC_WANT_LIB_EXT1__ = 1
+_DARWIN_FEATURE_64_BIT_INODE = 1
+_DARWIN_FEATURE_ONLY_64_BIT_INODE = 1
+_DARWIN_FEATURE_ONLY_VERS_1050 = 1
+_DARWIN_FEATURE_ONLY_UNIX_CONFORMANCE = 1
+_DARWIN_FEATURE_UNIX_CONFORMANCE = 3
+
+# Included from machine/_types.h
+__DARWIN_NULL = (0)
+__DARWIN_NULL = 0
+
+# Included from sys/_pthread/_pthread_types.h
+__PTHREAD_SIZE__ = 8176
+__PTHREAD_ATTR_SIZE__ = 56
+__PTHREAD_MUTEXATTR_SIZE__ = 8
+__PTHREAD_MUTEX_SIZE__ = 56
+__PTHREAD_CONDATTR_SIZE__ = 8
+__PTHREAD_COND_SIZE__ = 40
+__PTHREAD_ONCE_SIZE__ = 8
+__PTHREAD_RWLOCK_SIZE__ = 192
+__PTHREAD_RWLOCKATTR_SIZE__ = 16
+__PTHREAD_SIZE__ = 4088
+__PTHREAD_ATTR_SIZE__ = 36
+__PTHREAD_MUTEXATTR_SIZE__ = 8
+__PTHREAD_MUTEX_SIZE__ = 40
+__PTHREAD_CONDATTR_SIZE__ = 4
+__PTHREAD_COND_SIZE__ = 24
+__PTHREAD_ONCE_SIZE__ = 4
+__PTHREAD_RWLOCK_SIZE__ = 124
+__PTHREAD_RWLOCKATTR_SIZE__ = 12
+
+# Included from stdint.h
+__WORDSIZE = 64
+__WORDSIZE = 32
+
+# Included from sys/_types/_int8_t.h
+
+# Included from sys/_types/_int16_t.h
+
+# Included from sys/_types/_int32_t.h
+
+# Included from sys/_types/_int64_t.h
+
+# Included from _types/_uint8_t.h
+
+# Included from _types/_uint16_t.h
+
+# Included from _types/_uint32_t.h
+
+# Included from _types/_uint64_t.h
+
+# Included from sys/_types/_intptr_t.h
+
+# Included from sys/_types/_uintptr_t.h
+
+# Included from _types/_intmax_t.h
+
+# Included from _types/_uintmax_t.h
+INT8_MAX = 127
+INT16_MAX = 32767
+INT32_MAX = 2147483647
+INT8_MIN = -128
+INT16_MIN = -32768
+INT32_MIN = (-INT32_MAX-1)
+UINT8_MAX = 255
+UINT16_MAX = 65535
+INT_LEAST8_MIN = INT8_MIN
+INT_LEAST16_MIN = INT16_MIN
+INT_LEAST32_MIN = INT32_MIN
+INT_LEAST8_MAX = INT8_MAX
+INT_LEAST16_MAX = INT16_MAX
+INT_LEAST32_MAX = INT32_MAX
+UINT_LEAST8_MAX = UINT8_MAX
+UINT_LEAST16_MAX = UINT16_MAX
+INT_FAST8_MIN = INT8_MIN
+INT_FAST16_MIN = INT16_MIN
+INT_FAST32_MIN = INT32_MIN
+INT_FAST8_MAX = INT8_MAX
+INT_FAST16_MAX = INT16_MAX
+INT_FAST32_MAX = INT32_MAX
+UINT_FAST8_MAX = UINT8_MAX
+UINT_FAST16_MAX = UINT16_MAX
+INTPTR_MIN = INT32_MIN
+INTPTR_MAX = INT32_MAX
+PTRDIFF_MIN = INT32_MIN
+PTRDIFF_MAX = INT32_MAX
+WCHAR_MAX = 0x7fffffff
+WCHAR_MIN = 0
+WCHAR_MIN = (-WCHAR_MAX-1)
+WINT_MIN = INT32_MIN
+WINT_MAX = INT32_MAX
+SIG_ATOMIC_MIN = INT32_MIN
+SIG_ATOMIC_MAX = INT32_MAX
+def INT8_C(v): return (v)
+
+def INT16_C(v): return (v)
+
+def INT32_C(v): return (v)
+
+
+# Included from Availability.h
+__MAC_10_0 = 1000
+__MAC_10_1 = 1010
+__MAC_10_2 = 1020
+__MAC_10_3 = 1030
+__MAC_10_4 = 1040
+__MAC_10_5 = 1050
+__MAC_10_6 = 1060
+__MAC_10_7 = 1070
+__MAC_10_8 = 1080
+__MAC_10_9 = 1090
+__MAC_10_10 = 101000
+__IPHONE_2_0 = 20000
+__IPHONE_2_1 = 20100
+__IPHONE_2_2 = 20200
+__IPHONE_3_0 = 30000
+__IPHONE_3_1 = 30100
+__IPHONE_3_2 = 30200
+__IPHONE_4_0 = 40000
+__IPHONE_4_1 = 40100
+__IPHONE_4_2 = 40200
+__IPHONE_4_3 = 40300
+__IPHONE_5_0 = 50000
+__IPHONE_5_1 = 50100
+__IPHONE_6_0 = 60000
+__IPHONE_6_1 = 60100
+__IPHONE_7_0 = 70000
+__IPHONE_7_1 = 70100
+__IPHONE_8_0 = 80000
+__IPHONE_8_1 = 80100
+
+# Included from AvailabilityInternal.h
+def __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg): return __attribute__((deprecated(_msg)))
+
+def __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg): return __attribute__((deprecated))
+
+def __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg): return __attribute__((deprecated(_msg)))
+
+def __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg): return __attribute__((deprecated))
+
+__IPHONE_OS_VERSION_MAX_ALLOWED = __IPHONE_8_1
+__IPHONE_OS_VERSION_MIN_REQUIRED = __IPHONE_2_0
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=2.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=2.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=2.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=2.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=2.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=2.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=2.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=2.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=2.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=3.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=3.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=3.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=3.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=3.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=3.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=3.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=3.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=3.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=4.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=4.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=4.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=4.2)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=4.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=4.3)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=5.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=5.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=5.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=5.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=6.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=6.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=6.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=6.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=7.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=7.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=7.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=7.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=8.0)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=8.1)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_NA_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,unavailable)))
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_2_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_2_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_2_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_8_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_8_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_8_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_2_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_3_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_2
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_4_3
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_5_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_6_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_7_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_8_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_8_0
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_REGULAR
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL__IPHONE_8_1
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__IPHONE_NA_DEP__IPHONE_NA_MSG(_msg): return __AVAILABILITY_INTERNAL_UNAVAILABLE
+
+__MAC_OS_X_VERSION_MAX_ALLOWED = __MAC_10_10
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_0_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.0,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_0_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.0)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_1_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_1_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.1)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.2)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.3)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.4,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.4)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.5,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.5)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.6,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.6)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.7,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.7)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.8,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_1_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.1,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_1_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.1)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.2)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.3)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.4,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.4)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.5,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.5)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.6,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.6)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.7,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.7)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.8,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.2,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.2)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.3)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.4,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.4)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.5,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.5)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.6,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.6)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.7,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.7)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.8,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.3,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.3)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.4,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.4)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.5,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.5)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.6,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.6)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.7,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.7)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.8,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.4,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.4)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.5,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.5)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.6,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.6)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.7,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.7)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.8,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.5,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.5)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.6,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.6)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.7,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.7)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.8,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.6,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.6)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.7,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.7)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.8,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.7,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.7)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.8,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.8,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.9,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.10,message=_msg)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10)))
+
+def __AVAILABILITY_INTERNAL__MAC_NA_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,unavailable)))
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_1_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_1_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_1_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_2_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_2_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_2_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_2_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_3_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_3_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_3_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_3_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_3_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_3
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_3
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_4_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_4
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_3
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_4
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_5_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_5
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_3
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_4
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_5
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_6_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_6
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_3
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_4
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_5
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_6
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_7_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_7
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_3
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_4
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_5
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_6
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_7
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_8_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_8
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_3
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_4
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_5
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_6
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_7
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_8
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_9_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_9
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg)
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_3
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_4
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_5
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_6
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_7
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_8
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_9
+
+def __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_10_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_10
+
+def __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_0
+
+def __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_1
+
+def __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_2
+
+def __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_3
+
+def __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_4
+
+def __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_5
+
+def __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_6
+
+def __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_7
+
+def __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_8
+
+def __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_9
+
+def __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL__MAC_10_10
+
+def __AVAILABILITY_INTERNAL__MAC_NA_DEP__MAC_NA_MSG(_msg): return __AVAILABILITY_INTERNAL_UNAVAILABLE
+
+def __OSX_EXTENSION_UNAVAILABLE(_msg): return __OS_AVAILABILITY_MSG(macosx_app_extension,unavailable,_msg)
+
+def __IOS_EXTENSION_UNAVAILABLE(_msg): return __OS_AVAILABILITY_MSG(ios_app_extension,unavailable,_msg)
+
+
+# Included from sys/_types/_in_addr_t.h
+
+# Included from sys/_types/_in_port_t.h
+
+# Included from sys/socket.h
+
+# Included from sys/types.h
+
+# Included from machine/types.h
+
+# Included from machine/endian.h
+
+# Included from sys/_types/_dev_t.h
+
+# Included from sys/_types/_blkcnt_t.h
+
+# Included from sys/_types/_blksize_t.h
+
+# Included from sys/_types/_gid_t.h
+
+# Included from sys/_types/_ino_t.h
+
+# Included from sys/_types/_ino64_t.h
+
+# Included from sys/_types/_key_t.h
+
+# Included from sys/_types/_mode_t.h
+
+# Included from sys/_types/_nlink_t.h
+
+# Included from sys/_types/_id_t.h
+
+# Included from sys/_types/_pid_t.h
+
+# Included from sys/_types/_off_t.h
+
+# Included from sys/_types/_uid_t.h
+def major(x): return ((int32_t)(((u_int32_t)(x) >> 24) & 0xff))
+
+def minor(x): return ((int32_t)((x) & 0xffffff))
+
+
+# Included from sys/_types/_clock_t.h
+
+# Included from sys/_types/_size_t.h
+
+# Included from sys/_types/_ssize_t.h
+
+# Included from sys/_types/_time_t.h
+
+# Included from sys/_types/_useconds_t.h
+
+# Included from sys/_types/_suseconds_t.h
+
+# Included from sys/_types/_rsize_t.h
+
+# Included from sys/_types/_errno_t.h
+
+# Included from sys/_types/_fd_def.h
+__DARWIN_FD_SETSIZE = 1024
+__DARWIN_NBBY = 8
+def __DARWIN_FD_ZERO(p): return __builtin_bzero(p, sizeof(*(p)))
+
+def __DARWIN_FD_ZERO(p): return bzero(p, sizeof(*(p)))
+
+NBBY = __DARWIN_NBBY
+
+# Included from sys/_types/_fd_setsize.h
+FD_SETSIZE = __DARWIN_FD_SETSIZE
+
+# Included from sys/_types/_fd_set.h
+
+# Included from sys/_types/_fd_clr.h
+
+# Included from sys/_types/_fd_zero.h
+def FD_ZERO(p): return __DARWIN_FD_ZERO(p)
+
+
+# Included from sys/_types/_fd_isset.h
+
+# Included from sys/_types/_fd_copy.h
+
+# Included from sys/_pthread/_pthread_attr_t.h
+
+# Included from sys/_pthread/_pthread_cond_t.h
+
+# Included from sys/_pthread/_pthread_condattr_t.h
+
+# Included from sys/_pthread/_pthread_mutex_t.h
+
+# Included from sys/_pthread/_pthread_mutexattr_t.h
+
+# Included from sys/_pthread/_pthread_once_t.h
+
+# Included from sys/_pthread/_pthread_rwlock_t.h
+
+# Included from sys/_pthread/_pthread_rwlockattr_t.h
+
+# Included from sys/_pthread/_pthread_t.h
+
+# Included from sys/_pthread/_pthread_key_t.h
+
+# Included from sys/_types/_fsblkcnt_t.h
+
+# Included from sys/_types/_fsfilcnt_t.h
+
+# Included from machine/_param.h
+
+# Included from sys/_types/_sa_family_t.h
+
+# Included from sys/_types/_socklen_t.h
+
+# Included from sys/_types/_iovec_t.h
+SOCK_STREAM = 1
+SOCK_DGRAM = 2
+SOCK_RAW = 3
+SOCK_RDM = 4
+SOCK_SEQPACKET = 5
+SO_DEBUG = 0x0001
+SO_ACCEPTCONN = 0x0002
+SO_REUSEADDR = 0x0004
+SO_KEEPALIVE = 0x0008
+SO_DONTROUTE = 0x0010
+SO_BROADCAST = 0x0020
+SO_USELOOPBACK = 0x0040
+SO_LINGER = 0x0080
+SO_LINGER = 0x1080
+SO_OOBINLINE = 0x0100
+SO_REUSEPORT = 0x0200
+SO_TIMESTAMP = 0x0400
+SO_TIMESTAMP_MONOTONIC = 0x0800
+SO_ACCEPTFILTER = 0x1000
+SO_DONTTRUNC = 0x2000
+SO_WANTMORE = 0x4000
+SO_WANTOOBFLAG = 0x8000
+SO_SNDBUF = 0x1001
+SO_RCVBUF = 0x1002
+SO_SNDLOWAT = 0x1003
+SO_RCVLOWAT = 0x1004
+SO_SNDTIMEO = 0x1005
+SO_RCVTIMEO = 0x1006
+SO_ERROR = 0x1007
+SO_TYPE = 0x1008
+SO_LABEL = 0x1010
+SO_PEERLABEL = 0x1011
+SO_NREAD = 0x1020
+SO_NKE = 0x1021
+SO_NOSIGPIPE = 0x1022
+SO_NOADDRERR = 0x1023
+SO_NWRITE = 0x1024
+SO_REUSESHAREUID = 0x1025
+SO_NOTIFYCONFLICT = 0x1026
+SO_UPCALLCLOSEWAIT = 0x1027
+SO_LINGER_SEC = 0x1080
+SO_RANDOMPORT = 0x1082
+SO_NP_EXTENSIONS = 0x1083
+SO_NUMRCVPKT = 0x1112
+SONPX_SETOPTSHUT = 0x000000001
+SOL_SOCKET = 0xffff
+AF_UNSPEC = 0
+AF_UNIX = 1
+AF_LOCAL = AF_UNIX
+AF_INET = 2
+AF_IMPLINK = 3
+AF_PUP = 4
+AF_CHAOS = 5
+AF_NS = 6
+AF_ISO = 7
+AF_OSI = AF_ISO
+AF_ECMA = 8
+AF_DATAKIT = 9
+AF_CCITT = 10
+AF_SNA = 11
+AF_DECnet = 12
+AF_DLI = 13
+AF_LAT = 14
+AF_HYLINK = 15
+AF_APPLETALK = 16
+AF_ROUTE = 17
+AF_LINK = 18
+pseudo_AF_XTP = 19
+AF_COIP = 20
+AF_CNT = 21
+pseudo_AF_RTIP = 22
+AF_IPX = 23
+AF_SIP = 24
+pseudo_AF_PIP = 25
+AF_NDRV = 27
+AF_ISDN = 28
+AF_E164 = AF_ISDN
+pseudo_AF_KEY = 29
+AF_INET6 = 30
+AF_NATM = 31
+AF_SYSTEM = 32
+AF_NETBIOS = 33
+AF_PPP = 34
+pseudo_AF_HDRCMPLT = 35
+AF_RESERVED_36 = 36
+AF_IEEE80211 = 37
+AF_UTUN = 38
+AF_MAX = 40
+SOCK_MAXADDRLEN = 255
+_SS_MAXSIZE = 128
+PF_UNSPEC = AF_UNSPEC
+PF_LOCAL = AF_LOCAL
+PF_UNIX = PF_LOCAL
+PF_INET = AF_INET
+PF_IMPLINK = AF_IMPLINK
+PF_PUP = AF_PUP
+PF_CHAOS = AF_CHAOS
+PF_NS = AF_NS
+PF_ISO = AF_ISO
+PF_OSI = AF_ISO
+PF_ECMA = AF_ECMA
+PF_DATAKIT = AF_DATAKIT
+PF_CCITT = AF_CCITT
+PF_SNA = AF_SNA
+PF_DECnet = AF_DECnet
+PF_DLI = AF_DLI
+PF_LAT = AF_LAT
+PF_HYLINK = AF_HYLINK
+PF_APPLETALK = AF_APPLETALK
+PF_ROUTE = AF_ROUTE
+PF_LINK = AF_LINK
+PF_XTP = pseudo_AF_XTP
+PF_COIP = AF_COIP
+PF_CNT = AF_CNT
+PF_SIP = AF_SIP
+PF_IPX = AF_IPX
+PF_RTIP = pseudo_AF_RTIP
+PF_PIP = pseudo_AF_PIP
+PF_NDRV = AF_NDRV
+PF_ISDN = AF_ISDN
+PF_KEY = pseudo_AF_KEY
+PF_INET6 = AF_INET6
+PF_NATM = AF_NATM
+PF_SYSTEM = AF_SYSTEM
+PF_NETBIOS = AF_NETBIOS
+PF_PPP = AF_PPP
+PF_RESERVED_36 = AF_RESERVED_36
+PF_UTUN = AF_UTUN
+PF_MAX = AF_MAX
+NET_MAXID = AF_MAX
+NET_RT_DUMP = 1
+NET_RT_FLAGS = 2
+NET_RT_IFLIST = 3
+NET_RT_STAT = 4
+NET_RT_TRASH = 5
+NET_RT_IFLIST2 = 6
+NET_RT_DUMP2 = 7
+NET_RT_MAXID = 10
+SOMAXCONN = 128
+MSG_OOB = 0x1
+MSG_PEEK = 0x2
+MSG_DONTROUTE = 0x4
+MSG_EOR = 0x8
+MSG_TRUNC = 0x10
+MSG_CTRUNC = 0x20
+MSG_WAITALL = 0x40
+MSG_DONTWAIT = 0x80
+MSG_EOF = 0x100
+MSG_WAITSTREAM = 0x200
+MSG_FLUSH = 0x400
+MSG_HOLD = 0x800
+MSG_SEND = 0x1000
+MSG_HAVEMORE = 0x2000
+MSG_RCVMORE = 0x4000
+MSG_NEEDSA = 0x10000
+CMGROUP_MAX = 16
+def CMSG_FIRSTHDR(mhdr): return \
+
+SCM_RIGHTS = 0x01
+SCM_TIMESTAMP = 0x02
+SCM_CREDS = 0x03
+SCM_TIMESTAMP_MONOTONIC = 0x04
+SHUT_RD = 0
+SHUT_WR = 1
+SHUT_RDWR = 2
+
+# Included from sys/_endian.h
+def ntohl(x): return ((__uint32_t)(x))
+
+def ntohs(x): return ((__uint16_t)(x))
+
+def htonl(x): return ((__uint32_t)(x))
+
+def htons(x): return ((__uint16_t)(x))
+
+def ntohll(x): return ((__uint64_t)(x))
+
+def htonll(x): return ((__uint64_t)(x))
+
+def NTOHL(x): return (x)
+
+def NTOHS(x): return (x)
+
+def NTOHLL(x): return (x)
+
+def HTONL(x): return (x)
+
+def HTONS(x): return (x)
+
+def HTONLL(x): return (x)
+
+
+# Included from libkern/_OSByteOrder.h
+def __DARWIN_OSSwapConstInt16(x): return \
+
+def __DARWIN_OSSwapConstInt32(x): return \
+
+def __DARWIN_OSSwapConstInt64(x): return \
+
+
+# Included from libkern/arm/OSByteOrder.h
+
+# Included from arm/arch.h
+
+# Included from sys/_types/_os_inline.h
+def __DARWIN_OSSwapInt16(x): return \
+
+def __DARWIN_OSSwapInt32(x): return \
+
+def __DARWIN_OSSwapInt64(x): return \
+
+def __DARWIN_OSSwapInt16(x): return _OSSwapInt16(x)
+
+def __DARWIN_OSSwapInt32(x): return _OSSwapInt32(x)
+
+def __DARWIN_OSSwapInt64(x): return _OSSwapInt64(x)
+
+def ntohs(x): return __DARWIN_OSSwapInt16(x)
+
+def htons(x): return __DARWIN_OSSwapInt16(x)
+
+def ntohl(x): return __DARWIN_OSSwapInt32(x)
+
+def htonl(x): return __DARWIN_OSSwapInt32(x)
+
+def ntohll(x): return __DARWIN_OSSwapInt64(x)
+
+def htonll(x): return __DARWIN_OSSwapInt64(x)
+
+IPPROTO_IP = 0
+IPPROTO_HOPOPTS = 0
+IPPROTO_ICMP = 1
+IPPROTO_IGMP = 2
+IPPROTO_GGP = 3
+IPPROTO_IPV4 = 4
+IPPROTO_IPIP = IPPROTO_IPV4
+IPPROTO_TCP = 6
+IPPROTO_ST = 7
+IPPROTO_EGP = 8
+IPPROTO_PIGP = 9
+IPPROTO_RCCMON = 10
+IPPROTO_NVPII = 11
+IPPROTO_PUP = 12
+IPPROTO_ARGUS = 13
+IPPROTO_EMCON = 14
+IPPROTO_XNET = 15
+IPPROTO_CHAOS = 16
+IPPROTO_UDP = 17
+IPPROTO_MUX = 18
+IPPROTO_MEAS = 19
+IPPROTO_HMP = 20
+IPPROTO_PRM = 21
+IPPROTO_IDP = 22
+IPPROTO_TRUNK1 = 23
+IPPROTO_TRUNK2 = 24
+IPPROTO_LEAF1 = 25
+IPPROTO_LEAF2 = 26
+IPPROTO_RDP = 27
+IPPROTO_IRTP = 28
+IPPROTO_TP = 29
+IPPROTO_BLT = 30
+IPPROTO_NSP = 31
+IPPROTO_INP = 32
+IPPROTO_SEP = 33
+IPPROTO_3PC = 34
+IPPROTO_IDPR = 35
+IPPROTO_XTP = 36
+IPPROTO_DDP = 37
+IPPROTO_CMTP = 38
+IPPROTO_TPXX = 39
+IPPROTO_IL = 40
+IPPROTO_IPV6 = 41
+IPPROTO_SDRP = 42
+IPPROTO_ROUTING = 43
+IPPROTO_FRAGMENT = 44
+IPPROTO_IDRP = 45
+IPPROTO_RSVP = 46
+IPPROTO_GRE = 47
+IPPROTO_MHRP = 48
+IPPROTO_BHA = 49
+IPPROTO_ESP = 50
+IPPROTO_AH = 51
+IPPROTO_INLSP = 52
+IPPROTO_SWIPE = 53
+IPPROTO_NHRP = 54
+IPPROTO_ICMPV6 = 58
+IPPROTO_NONE = 59
+IPPROTO_DSTOPTS = 60
+IPPROTO_AHIP = 61
+IPPROTO_CFTP = 62
+IPPROTO_HELLO = 63
+IPPROTO_SATEXPAK = 64
+IPPROTO_KRYPTOLAN = 65
+IPPROTO_RVD = 66
+IPPROTO_IPPC = 67
+IPPROTO_ADFS = 68
+IPPROTO_SATMON = 69
+IPPROTO_VISA = 70
+IPPROTO_IPCV = 71
+IPPROTO_CPNX = 72
+IPPROTO_CPHB = 73
+IPPROTO_WSN = 74
+IPPROTO_PVP = 75
+IPPROTO_BRSATMON = 76
+IPPROTO_ND = 77
+IPPROTO_WBMON = 78
+IPPROTO_WBEXPAK = 79
+IPPROTO_EON = 80
+IPPROTO_VMTP = 81
+IPPROTO_SVMTP = 82
+IPPROTO_VINES = 83
+IPPROTO_TTP = 84
+IPPROTO_IGP = 85
+IPPROTO_DGP = 86
+IPPROTO_TCF = 87
+IPPROTO_IGRP = 88
+IPPROTO_OSPFIGP = 89
+IPPROTO_SRPC = 90
+IPPROTO_LARP = 91
+IPPROTO_MTP = 92
+IPPROTO_AX25 = 93
+IPPROTO_IPEIP = 94
+IPPROTO_MICP = 95
+IPPROTO_SCCSP = 96
+IPPROTO_ETHERIP = 97
+IPPROTO_ENCAP = 98
+IPPROTO_APES = 99
+IPPROTO_GMTP = 100
+IPPROTO_PIM = 103
+IPPROTO_IPCOMP = 108
+IPPROTO_PGM = 113
+IPPROTO_SCTP = 132
+IPPROTO_DIVERT = 254
+IPPROTO_RAW = 255
+IPPROTO_MAX = 256
+IPPROTO_DONE = 257
+__DARWIN_IPPORT_RESERVED = 1024
+IPPORT_RESERVED = __DARWIN_IPPORT_RESERVED
+IPPORT_USERRESERVED = 5000
+IPPORT_HIFIRSTAUTO = 49152
+IPPORT_HILASTAUTO = 65535
+IPPORT_RESERVEDSTART = 600
+def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
+
+IN_CLASSA_NET = 0xff000000
+IN_CLASSA_NSHIFT = 24
+IN_CLASSA_HOST = 0x00ffffff
+IN_CLASSA_MAX = 128
+def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
+
+IN_CLASSB_NET = 0xffff0000
+IN_CLASSB_NSHIFT = 16
+IN_CLASSB_HOST = 0x0000ffff
+IN_CLASSB_MAX = 65536
+def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
+
+IN_CLASSC_NET = 0xffffff00
+IN_CLASSC_NSHIFT = 8
+IN_CLASSC_HOST = 0x000000ff
+def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
+
+IN_CLASSD_NET = 0xf0000000
+IN_CLASSD_NSHIFT = 28
+IN_CLASSD_HOST = 0x0fffffff
+def IN_MULTICAST(i): return IN_CLASSD(i)
+
+def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+INADDR_NONE = 0xffffffff
+def IN_LINKLOCAL(i): return (((u_int32_t)(i) & IN_CLASSB_NET) == IN_LINKLOCALNETNUM)
+
+def IN_LOOPBACK(i): return (((u_int32_t)(i) & 0xff000000) == 0x7f000000)
+
+def IN_ZERONET(i): return (((u_int32_t)(i) & 0xff000000) == 0)
+
+def IN_LOCAL_GROUP(i): return (((u_int32_t)(i) & 0xffffff00) == 0xe0000000)
+
+IN_LOOPBACKNET = 127
+INET_ADDRSTRLEN = 16
+IP_OPTIONS = 1
+IP_HDRINCL = 2
+IP_TOS = 3
+IP_TTL = 4
+IP_RECVOPTS = 5
+IP_RECVRETOPTS = 6
+IP_RECVDSTADDR = 7
+IP_RETOPTS = 8
+IP_MULTICAST_IF = 9
+IP_MULTICAST_TTL = 10
+IP_MULTICAST_LOOP = 11
+IP_ADD_MEMBERSHIP = 12
+IP_DROP_MEMBERSHIP = 13
+IP_MULTICAST_VIF = 14
+IP_RSVP_ON = 15
+IP_RSVP_OFF = 16
+IP_RSVP_VIF_ON = 17
+IP_RSVP_VIF_OFF = 18
+IP_PORTRANGE = 19
+IP_RECVIF = 20
+IP_IPSEC_POLICY = 21
+IP_FAITH = 22
+IP_STRIPHDR = 23
+IP_RECVTTL = 24
+IP_BOUND_IF = 25
+IP_PKTINFO = 26
+IP_RECVPKTINFO = IP_PKTINFO
+IP_FW_ADD = 40
+IP_FW_DEL = 41
+IP_FW_FLUSH = 42
+IP_FW_ZERO = 43
+IP_FW_GET = 44
+IP_FW_RESETLOG = 45
+IP_OLD_FW_ADD = 50
+IP_OLD_FW_DEL = 51
+IP_OLD_FW_FLUSH = 52
+IP_OLD_FW_ZERO = 53
+IP_OLD_FW_GET = 54
+IP_NAT__XXX = 55
+IP_OLD_FW_RESETLOG = 56
+IP_DUMMYNET_CONFIGURE = 60
+IP_DUMMYNET_DEL = 61
+IP_DUMMYNET_FLUSH = 62
+IP_DUMMYNET_GET = 64
+IP_TRAFFIC_MGT_BACKGROUND = 65
+IP_MULTICAST_IFINDEX = 66
+IP_ADD_SOURCE_MEMBERSHIP = 70
+IP_DROP_SOURCE_MEMBERSHIP = 71
+IP_BLOCK_SOURCE = 72
+IP_UNBLOCK_SOURCE = 73
+IP_MSFILTER = 74
+MCAST_JOIN_GROUP = 80
+MCAST_LEAVE_GROUP = 81
+MCAST_JOIN_SOURCE_GROUP = 82
+MCAST_LEAVE_SOURCE_GROUP = 83
+MCAST_BLOCK_SOURCE = 84
+MCAST_UNBLOCK_SOURCE = 85
+IP_DEFAULT_MULTICAST_TTL = 1
+IP_DEFAULT_MULTICAST_LOOP = 1
+IP_MIN_MEMBERSHIPS = 31
+IP_MAX_MEMBERSHIPS = 4095
+IP_MAX_GROUP_SRC_FILTER = 512
+IP_MAX_SOCK_SRC_FILTER = 128
+IP_MAX_SOCK_MUTE_FILTER = 128
+MCAST_UNDEFINED = 0
+MCAST_INCLUDE = 1
+MCAST_EXCLUDE = 2
+IP_PORTRANGE_DEFAULT = 0
+IP_PORTRANGE_HIGH = 1
+IP_PORTRANGE_LOW = 2
+IPPROTO_MAXID = (IPPROTO_AH + 1)
+IPCTL_FORWARDING = 1
+IPCTL_SENDREDIRECTS = 2
+IPCTL_DEFTTL = 3
+IPCTL_DEFMTU = 4
+IPCTL_RTEXPIRE = 5
+IPCTL_RTMINEXPIRE = 6
+IPCTL_RTMAXCACHE = 7
+IPCTL_SOURCEROUTE = 8
+IPCTL_DIRECTEDBROADCAST = 9
+IPCTL_INTRQMAXLEN = 10
+IPCTL_INTRQDROPS = 11
+IPCTL_STATS = 12
+IPCTL_ACCEPTSOURCEROUTE = 13
+IPCTL_FASTFORWARDING = 14
+IPCTL_KEEPFAITH = 15
+IPCTL_GIF_TTL = 16
+IPCTL_MAXID = 17
+
+# Included from netinet6/in6.h
+__KAME_VERSION = "2009/apple-darwin"
+IPV6PORT_RESERVED = 1024
+IPV6PORT_ANONMIN = 49152
+IPV6PORT_ANONMAX = 65535
+IPV6PORT_RESERVEDMIN = 600
+IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
+INET6_ADDRSTRLEN = 46
+def IN6_IS_ADDR_UNSPECIFIED(a): return \
+
+def IN6_IS_ADDR_LOOPBACK(a): return \
+
+def IN6_IS_ADDR_V4COMPAT(a): return \
+
+def IN6_IS_ADDR_V4MAPPED(a): return \
+
+__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
+__IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
+__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
+__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
+__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
+__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
+def IN6_IS_ADDR_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_UNIQUE_LOCAL(a): return \
+
+def IN6_IS_ADDR_MC_NODELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_SITELOCAL(a): return \
+
+def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
+
+def IN6_IS_ADDR_MC_GLOBAL(a): return \
+
+IPV6_OPTIONS = 1
+IPV6_RECVOPTS = 5
+IPV6_RECVRETOPTS = 6
+IPV6_RECVDSTADDR = 7
+IPV6_RETOPTS = 8
+IPV6_SOCKOPT_RESERVED1 = 3
+IPV6_UNICAST_HOPS = 4
+IPV6_MULTICAST_IF = 9
+IPV6_MULTICAST_HOPS = 10
+IPV6_MULTICAST_LOOP = 11
+IPV6_JOIN_GROUP = 12
+IPV6_LEAVE_GROUP = 13
+IPV6_PORTRANGE = 14
+ICMP6_FILTER = 18
+IPV6_2292PKTINFO = 19
+IPV6_2292HOPLIMIT = 20
+IPV6_2292NEXTHOP = 21
+IPV6_2292HOPOPTS = 22
+IPV6_2292DSTOPTS = 23
+IPV6_2292RTHDR = 24
+IPV6_2292PKTOPTIONS = 25
+IPV6_PKTINFO = IPV6_2292PKTINFO
+IPV6_HOPLIMIT = IPV6_2292HOPLIMIT
+IPV6_NEXTHOP = IPV6_2292NEXTHOP
+IPV6_HOPOPTS = IPV6_2292HOPOPTS
+IPV6_DSTOPTS = IPV6_2292DSTOPTS
+IPV6_RTHDR = IPV6_2292RTHDR
+IPV6_PKTOPTIONS = IPV6_2292PKTOPTIONS
+IPV6_CHECKSUM = 26
+IPV6_V6ONLY = 27
+IPV6_BINDV6ONLY = IPV6_V6ONLY
+IPV6_IPSEC_POLICY = 28
+IPV6_FAITH = 29
+IPV6_FW_ADD = 30
+IPV6_FW_DEL = 31
+IPV6_FW_FLUSH = 32
+IPV6_FW_ZERO = 33
+IPV6_FW_GET = 34
+IPV6_RECVTCLASS = 35
+IPV6_TCLASS = 36
+IPV6_RTHDRDSTOPTS = 57
+IPV6_RECVPKTINFO = 61
+IPV6_RECVHOPLIMIT = 37
+IPV6_RECVRTHDR = 38
+IPV6_RECVHOPOPTS = 39
+IPV6_RECVDSTOPTS = 40
+IPV6_USE_MIN_MTU = 42
+IPV6_RECVPATHMTU = 43
+IPV6_PATHMTU = 44
+IPV6_REACHCONF = 45
+IPV6_3542PKTINFO = 46
+IPV6_3542HOPLIMIT = 47
+IPV6_3542NEXTHOP = 48
+IPV6_3542HOPOPTS = 49
+IPV6_3542DSTOPTS = 50
+IPV6_3542RTHDR = 51
+IPV6_PKTINFO = IPV6_3542PKTINFO
+IPV6_HOPLIMIT = IPV6_3542HOPLIMIT
+IPV6_NEXTHOP = IPV6_3542NEXTHOP
+IPV6_HOPOPTS = IPV6_3542HOPOPTS
+IPV6_DSTOPTS = IPV6_3542DSTOPTS
+IPV6_RTHDR = IPV6_3542RTHDR
+IPV6_AUTOFLOWLABEL = 59
+IPV6_DONTFRAG = 62
+IPV6_PREFER_TEMPADDR = 63
+IPV6_MSFILTER = 74
+IPV6_BOUND_IF = 125
+IPV6_RTHDR_LOOSE = 0
+IPV6_RTHDR_STRICT = 1
+IPV6_RTHDR_TYPE_0 = 0
+IPV6_DEFAULT_MULTICAST_HOPS = 1
+IPV6_DEFAULT_MULTICAST_LOOP = 1
+IPV6_MIN_MEMBERSHIPS = 31
+IPV6_MAX_MEMBERSHIPS = 4095
+IPV6_MAX_GROUP_SRC_FILTER = 512
+IPV6_MAX_SOCK_SRC_FILTER = 128
+IPV6_PORTRANGE_DEFAULT = 0
+IPV6_PORTRANGE_HIGH = 1
+IPV6_PORTRANGE_LOW = 2
+IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
+IPV6CTL_FORWARDING = 1
+IPV6CTL_SENDREDIRECTS = 2
+IPV6CTL_DEFHLIM = 3
+IPV6CTL_DEFMTU = 4
+IPV6CTL_FORWSRCRT = 5
+IPV6CTL_STATS = 6
+IPV6CTL_MRTSTATS = 7
+IPV6CTL_MRTPROTO = 8
+IPV6CTL_MAXFRAGPACKETS = 9
+IPV6CTL_SOURCECHECK = 10
+IPV6CTL_SOURCECHECK_LOGINT = 11
+IPV6CTL_ACCEPT_RTADV = 12
+IPV6CTL_KEEPFAITH = 13
+IPV6CTL_LOG_INTERVAL = 14
+IPV6CTL_HDRNESTLIMIT = 15
+IPV6CTL_DAD_COUNT = 16
+IPV6CTL_AUTO_FLOWLABEL = 17
+IPV6CTL_DEFMCASTHLIM = 18
+IPV6CTL_GIF_HLIM = 19
+IPV6CTL_KAME_VERSION = 20
+IPV6CTL_USE_DEPRECATED = 21
+IPV6CTL_RR_PRUNE = 22
+IPV6CTL_MAPPED_ADDR = 23
+IPV6CTL_V6ONLY = 24
+IPV6CTL_RTEXPIRE = 25
+IPV6CTL_RTMINEXPIRE = 26
+IPV6CTL_RTMAXCACHE = 27
+IPV6CTL_USETEMPADDR = 32
+IPV6CTL_TEMPPLTIME = 33
+IPV6CTL_TEMPVLTIME = 34
+IPV6CTL_AUTO_LINKLOCAL = 35
+IPV6CTL_RIP6STATS = 36
+IPV6CTL_PREFER_TEMPADDR = 37
+IPV6CTL_ADDRCTLPOLICY = 38
+IPV6CTL_USE_DEFAULTZONE = 39
+IPV6CTL_MAXFRAGS = 41
+IPV6CTL_MCAST_PMTU = 44
+IPV6CTL_NEIGHBORGCTHRESH = 46
+IPV6CTL_MAXIFPREFIXES = 47
+IPV6CTL_MAXIFDEFROUTERS = 48
+IPV6CTL_MAXDYNROUTES = 49
+ICMPV6CTL_ND6_ONLINKNSRFC4861 = 50
+IPV6CTL_MAXID = 51
diff -Nru orig/Lib/plat-ios/regen modified/Lib/plat-ios/regen
--- orig/Lib/plat-ios/regen 1970-01-01 08:00:00.000000000 +0800
+++ modified/Lib/plat-ios/regen 2015-02-15 08:51:58.000000000 +0800
@@ -0,0 +1,3 @@
+#! /bin/sh
+set -v
+include=`xcrun --sdk iphoneos --show-sdk-path`/usr/include python$EXE ../../Tools/scripts/h2py.py -i '(u_long)' `xcrun --sdk iphoneos --show-sdk-path`/usr/include/netinet/in.h
diff -Nru orig/Lib/platform.py modified/Lib/platform.py
--- orig/Lib/platform.py 2015-02-03 19:49:03.000000000 +0800
+++ modified/Lib/platform.py 2015-02-28 19:30:28.000000000 +0800
@@ -841,7 +841,7 @@
""" Interface to the system's uname command.
"""
- if sys.platform in ('dos', 'win32', 'win16'):
+ if sys.platform in ('dos', 'win32', 'win16', 'ios'):
# XXX Others too ?
return default
try:
@@ -864,7 +864,7 @@
default in case the command should fail.
"""
- if sys.platform in ('dos', 'win32', 'win16'):
+ if sys.platform in ('dos', 'win32', 'win16', 'ios'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
diff -Nru orig/Lib/webbrowser.py modified/Lib/webbrowser.py
--- orig/Lib/webbrowser.py 2015-02-03 19:49:04.000000000 +0800
+++ modified/Lib/webbrowser.py 2015-03-15 00:57:57.000000000 +0800
@@ -602,6 +602,57 @@
register("firefox", None, MacOSXOSAScript('firefox'), -1)
register("MacOSX", None, MacOSXOSAScript('default'), -1)
+#
+# Platform support for iOS
+#
+if sys.platform == 'ios':
+ class MobileSafari(BaseBrowser):
+ def open(self, url, new=0, autoraise=True):
+ # This code is the equivalent of:
+ # NSURL *nsurl = [NSURL URLWithString:url];
+ # [[UIApplication sharedApplication] openURL:nsurl];
+ from ctypes import cdll, c_void_p, c_char_p, c_uint32
+ from ctypes import util
+ objc = cdll.LoadLibrary(util.find_library(b'objc'))
+ cf = cdll.LoadLibrary(util.find_library(b'CoreFoundation'))
+ objc.objc_getClass.restype = c_void_p
+ objc.objc_getClass.argtypes = [c_char_p]
+ objc.sel_registerName.restype = c_void_p
+ objc.sel_registerName.argtypes = [c_char_p]
+ cf.CFStringCreateWithCString.restype = c_void_p
+ cf.CFStringCreateWithCString.argtypes = [c_void_p, c_char_p, c_uint32]
+
+ # Get an NSString describing the URL
+ kCFStringEncodingUTF8 = 0x08000100
+ url = c_void_p(cf.CFStringCreateWithCString(None, url.encode('utf-8'), kCFStringEncodingUTF8))
+ autorelease = c_void_p(objc.sel_registerName(b'autorelease'))
+ objc.objc_msgSend.argtypes = [c_void_p, c_void_p]
+ objc.objc_msgSend.restype = c_void_p
+ objc.objc_msgSend(url, autorelease)
+
+ # Get an NSURL object representing the URL
+ NSURL = c_void_p(objc.objc_getClass(b'NSURL'))
+ urlWithString_ = c_void_p(objc.sel_registerName(b'URLWithString:'))
+ objc.objc_msgSend.restype = c_void_p
+ objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_void_p]
+ nsurl = c_void_p(objc.objc_msgSend(NSURL, urlWithString_, url))
+
+ # Get the shared UIApplication instance
+ UIApplication = c_void_p(objc.objc_getClass(b'UIApplication'))
+ sharedApplication = c_void_p(objc.sel_registerName(b'sharedApplication'))
+ objc.objc_msgSend.argtypes = [c_void_p, c_void_p]
+ objc.objc_msgSend.restype = c_void_p
+ shared_app = c_void_p(objc.objc_msgSend(UIApplication, sharedApplication))
+
+ # Open the URL on the shared application
+ openURL_ = c_void_p(objc.sel_registerName(b'openURL:'))
+ objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_void_p]
+ objc.objc_msgSend.restype = None
+ objc.objc_msgSend(shared_app, openURL_, nsurl)
+
+ return True
+
+ register("mobilesafari", None, MobileSafari(), -1)
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
diff -Nru orig/Modules/Setup.ios-aarch64 modified/Modules/Setup.ios-aarch64
--- orig/Modules/Setup.ios-aarch64 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/Setup.ios-aarch64 2015-02-28 16:58:11.000000000 +0800
@@ -0,0 +1,118 @@
+#####################################################################
+# Static compilation instructions for all binary modules.
+#####################################################################
+
+_bisect _bisectmodule.c
+_codecs_cn cjkcodecs/_codecs_cn.c
+_codecs_hk cjkcodecs/_codecs_hk.c
+_codecs_iso2022 cjkcodecs/_codecs_iso2022.c
+_codecs_jp cjkcodecs/_codecs_jp.c
+_codecs_kr cjkcodecs/_codecs_kr.c
+_codecs_tw cjkcodecs/_codecs_tw.c
+_crypt _cryptmodule.c
+_csv _csv.c
+_datetime _datetimemodule.c
+_elementtree _elementtree.c \
+ -I$(srcdir)/Modules/expat
+ -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
+_heapq _heapqmodule.c
+_json _json.c
+_lsprof _lsprof.o rotatingtree.c
+_md5 md5module.c
+_multibytecodec cjkcodecs/multibytecodec.c
+_multiprocessing _multiprocessing/multiprocessing.c _multiprocessing/semaphore.c
+_opcode _opcode.c
+_pickle _pickle.c
+_posixsubprocess _posixsubprocess.c
+_random _randommodule.c
+_sha1 sha1module.c
+_sha256 sha256module.c
+_sha512 sha512module.c
+_socket socketmodule.c
+_struct _struct.c
+array arraymodule.c
+audioop audioop.c
+binascii binascii.c
+cmath cmathmodule.c _math.c
+fcntl fcntlmodule.c
+grp grpmodule.c
+math mathmodule.c
+mmap mmapmodule.c
+parser parsermodule.c
+pyexpat expat/xmlparse.c \
+ expat/xmlrole.c \
+ expat/xmltok.c \
+ pyexpat.c \
+ -I$(srcdir)/Modules/expat \
+ -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
+resource resource.c
+select selectmodule.c
+syslog syslogmodule.c
+termios termios.c
+time timemodule.c
+unicodedata unicodedata.c
+xxlimited xxlimited.c -DPy_LIMITED_API=0x0304000
+zlib zlibmodule.c -I$(prefix)/include -lz
+
+#####################################################################
+# Testing modules
+#####################################################################
+_ctypes_test _ctypes/_ctypes_test.c
+_testbuffer _testbuffer.c
+_testcapi _testcapimodule.c -I$(srcdir)/Include -DPy_BUILD_CORE=1
+_testimportmultiple _testimportmultiple.c
+
+#####################################################################
+# Platform specific configuration
+#####################################################################
+_ctypes _ctypes/_ctypes.c \
+ _ctypes/callbacks.c \
+ _ctypes/callproc.c \
+ _ctypes/stgdict.c \
+ _ctypes/cfield.c \
+ _ctypes/libffi_ios/prep_cif.c \
+ _ctypes/libffi_ios/types.c \
+ _ctypes/libffi_ios/raw_api.c \
+ _ctypes/libffi_ios/java_raw_api.c \
+ _ctypes/libffi_ios/closures.c \
+ _ctypes/libffi_ios/aarch64/ffi.c \
+ _ctypes/libffi_ios/aarch64/sysv.S \
+ -I$(srcdir)/Modules/_ctypes/libffi_ios/include
+
+_decimal _decimal/_decimal.c \
+ _decimal/libmpdec/basearith.c \
+ _decimal/libmpdec/constants.c \
+ _decimal/libmpdec/context.c \
+ _decimal/libmpdec/convolute.c \
+ _decimal/libmpdec/crt.c \
+ _decimal/libmpdec/difradix2.c \
+ _decimal/libmpdec/fnt.c \
+ _decimal/libmpdec/fourstep.c \
+ _decimal/libmpdec/io.c \
+ _decimal/libmpdec/memory.c \
+ _decimal/libmpdec/mpdecimal.c \
+ _decimal/libmpdec/numbertheory.c \
+ _decimal/libmpdec/sixstep.c \
+ _decimal/libmpdec/transpose.c \
+ -I$(srcdir)/Modules/_decimal/libmpdec \
+ -DCONFIG_64=1 -DANSI=1 -DHAVE_UINT128_T=1
+
+#####################################################################
+# Modules that require additional frameworks
+#####################################################################
+#_bz2 bz2module.c -lbz2
+#_curses _cursesmodule.c -lcurses -ltermcap
+#_curses_panel _curses_panel.c -lpanel -lncurses
+#_dbm _dbmmodule.c
+#_gdbm _gdbmmodule.c -I/usr/local/include -L/usr/local/lib -lgdbm
+#_lzma
+#_sqlite3
+#_ssl _ssl.c \
+# -I$(SSL)/include -I$(SSL)/include/openssl \
+# -L$(SSL)/lib -lssl -lcrypto
+# -DUSE_SSL
+#_tkinter _tkinter.c tkappinit.c -DWITH_APPINIT -I... -L...
+#nis nismodule.c -lnsl
+#ossaudiodev
+#readline readline.c -lreadline -ltermcap
+#spwd spwdmodule.c
diff -Nru orig/Modules/Setup.ios-arm modified/Modules/Setup.ios-arm
--- orig/Modules/Setup.ios-arm 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/Setup.ios-arm 2015-02-28 17:01:14.000000000 +0800
@@ -0,0 +1,119 @@
+#####################################################################
+# Static compilation instructions for all binary modules.
+#####################################################################
+
+_bisect _bisectmodule.c
+_codecs_cn cjkcodecs/_codecs_cn.c
+_codecs_hk cjkcodecs/_codecs_hk.c
+_codecs_iso2022 cjkcodecs/_codecs_iso2022.c
+_codecs_jp cjkcodecs/_codecs_jp.c
+_codecs_kr cjkcodecs/_codecs_kr.c
+_codecs_tw cjkcodecs/_codecs_tw.c
+_crypt _cryptmodule.c
+_csv _csv.c
+_datetime _datetimemodule.c
+_elementtree _elementtree.c \
+ -I$(srcdir)/Modules/expat
+ -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
+_heapq _heapqmodule.c
+_json _json.c
+_lsprof _lsprof.o rotatingtree.c
+_md5 md5module.c
+_multibytecodec cjkcodecs/multibytecodec.c
+_multiprocessing _multiprocessing/multiprocessing.c _multiprocessing/semaphore.c
+_opcode _opcode.c
+_pickle _pickle.c
+_posixsubprocess _posixsubprocess.c
+_random _randommodule.c
+_sha1 sha1module.c
+_sha256 sha256module.c
+_sha512 sha512module.c
+_socket socketmodule.c
+_struct _struct.c
+array arraymodule.c
+audioop audioop.c
+binascii binascii.c
+cmath cmathmodule.c _math.c
+fcntl fcntlmodule.c
+grp grpmodule.c
+math mathmodule.c
+mmap mmapmodule.c
+parser parsermodule.c
+pyexpat expat/xmlparse.c \
+ expat/xmlrole.c \
+ expat/xmltok.c \
+ pyexpat.c \
+ -I$(srcdir)/Modules/expat \
+ -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
+resource resource.c
+select selectmodule.c
+syslog syslogmodule.c
+termios termios.c
+time timemodule.c
+unicodedata unicodedata.c
+xxlimited xxlimited.c -DPy_LIMITED_API=0x0304000
+zlib zlibmodule.c -I$(prefix)/include -lz
+
+#####################################################################
+# Testing modules
+#####################################################################
+_ctypes_test _ctypes/_ctypes_test.c
+_testbuffer _testbuffer.c
+_testcapi _testcapimodule.c -I$(srcdir)/Include -DPy_BUILD_CORE=1
+_testimportmultiple _testimportmultiple.c
+
+#####################################################################
+# Platform specific configuration
+#####################################################################
+_ctypes _ctypes/_ctypes.c \
+ _ctypes/callbacks.c \
+ _ctypes/callproc.c \
+ _ctypes/stgdict.c \
+ _ctypes/cfield.c \
+ _ctypes/libffi_ios/prep_cif.c \
+ _ctypes/libffi_ios/types.c \
+ _ctypes/libffi_ios/raw_api.c \
+ _ctypes/libffi_ios/java_raw_api.c \
+ _ctypes/libffi_ios/closures.c \
+ _ctypes/libffi_ios/arm/ffi.c \
+ _ctypes/libffi_ios/arm/sysv.S \
+ _ctypes/libffi_ios/arm/trampoline.S \
+ -I$(srcdir)/Modules/_ctypes/libffi_ios/include
+
+_decimal _decimal/_decimal.c \
+ _decimal/libmpdec/basearith.c \
+ _decimal/libmpdec/constants.c \
+ _decimal/libmpdec/context.c \
+ _decimal/libmpdec/convolute.c \
+ _decimal/libmpdec/crt.c \
+ _decimal/libmpdec/difradix2.c \
+ _decimal/libmpdec/fnt.c \
+ _decimal/libmpdec/fourstep.c \
+ _decimal/libmpdec/io.c \
+ _decimal/libmpdec/memory.c \
+ _decimal/libmpdec/mpdecimal.c \
+ _decimal/libmpdec/numbertheory.c \
+ _decimal/libmpdec/sixstep.c \
+ _decimal/libmpdec/transpose.c \
+ -I$(srcdir)/Modules/_decimal/libmpdec \
+ -DCONFIG_32=1 -DANSI=1
+
+#####################################################################
+# Modules that require additional frameworks
+#####################################################################
+#_bz2 bz2module.c -lbz2
+#_curses _cursesmodule.c -lcurses -ltermcap
+#_curses_panel _curses_panel.c -lpanel -lncurses
+#_dbm _dbmmodule.c
+#_gdbm _gdbmmodule.c -I/usr/local/include -L/usr/local/lib -lgdbm
+#_lzma
+#_sqlite3
+#_ssl _ssl.c \
+# -I$(SSL)/include -I$(SSL)/include/openssl \
+# -L$(SSL)/lib -lssl -lcrypto
+# -DUSE_SSL
+#_tkinter _tkinter.c tkappinit.c -DWITH_APPINIT -I... -L...
+#nis nismodule.c -lnsl
+#ossaudiodev
+#readline readline.c -lreadline -ltermcap
+#spwd spwdmodule.c
diff -Nru orig/Modules/Setup.ios-i386 modified/Modules/Setup.ios-i386
--- orig/Modules/Setup.ios-i386 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/Setup.ios-i386 2015-03-12 21:44:02.000000000 +0800
@@ -0,0 +1,118 @@
+#####################################################################
+# Static compilation instructions for all binary modules.
+#####################################################################
+
+_bisect _bisectmodule.c
+_codecs_cn cjkcodecs/_codecs_cn.c
+_codecs_hk cjkcodecs/_codecs_hk.c
+_codecs_iso2022 cjkcodecs/_codecs_iso2022.c
+_codecs_jp cjkcodecs/_codecs_jp.c
+_codecs_kr cjkcodecs/_codecs_kr.c
+_codecs_tw cjkcodecs/_codecs_tw.c
+_crypt _cryptmodule.c
+_csv _csv.c
+_datetime _datetimemodule.c
+_elementtree _elementtree.c \
+ -I$(srcdir)/Modules/expat
+ -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
+_heapq _heapqmodule.c
+_json _json.c
+_lsprof _lsprof.o rotatingtree.c
+_md5 md5module.c
+_multibytecodec cjkcodecs/multibytecodec.c
+_multiprocessing _multiprocessing/multiprocessing.c _multiprocessing/semaphore.c
+_opcode _opcode.c
+_pickle _pickle.c
+_posixsubprocess _posixsubprocess.c
+_random _randommodule.c
+_sha1 sha1module.c
+_sha256 sha256module.c
+_sha512 sha512module.c
+_socket socketmodule.c
+_struct _struct.c
+array arraymodule.c
+audioop audioop.c
+binascii binascii.c
+cmath cmathmodule.c _math.c
+fcntl fcntlmodule.c
+grp grpmodule.c
+math mathmodule.c
+mmap mmapmodule.c
+parser parsermodule.c
+pyexpat expat/xmlparse.c \
+ expat/xmlrole.c \
+ expat/xmltok.c \
+ pyexpat.c \
+ -I$(srcdir)/Modules/expat \
+ -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
+resource resource.c
+select selectmodule.c
+syslog syslogmodule.c
+termios termios.c
+time timemodule.c
+unicodedata unicodedata.c
+xxlimited xxlimited.c -DPy_LIMITED_API=0x0304000
+zlib zlibmodule.c -I$(prefix)/include -lz
+
+#####################################################################
+# Testing modules
+#####################################################################
+_ctypes_test _ctypes/_ctypes_test.c
+_testbuffer _testbuffer.c
+_testcapi _testcapimodule.c -I$(srcdir)/Include -DPy_BUILD_CORE=1
+_testimportmultiple _testimportmultiple.c
+
+#####################################################################
+# Platform specific configuration
+#####################################################################
+_ctypes _ctypes/_ctypes.c \
+ _ctypes/callbacks.c \
+ _ctypes/callproc.c \
+ _ctypes/stgdict.c \
+ _ctypes/cfield.c \
+ _ctypes/libffi_ios/prep_cif.c \
+ _ctypes/libffi_ios/types.c \
+ _ctypes/libffi_ios/raw_api.c \
+ _ctypes/libffi_ios/java_raw_api.c \
+ _ctypes/libffi_ios/closures.c \
+ _ctypes/libffi_ios/x86/ffi.c \
+ _ctypes/libffi_ios/x86/sysv.S \
+ -I$(srcdir)/Modules/_ctypes/libffi_ios/include
+
+_decimal _decimal/_decimal.c \
+ _decimal/libmpdec/basearith.c \
+ _decimal/libmpdec/constants.c \
+ _decimal/libmpdec/context.c \
+ _decimal/libmpdec/convolute.c \
+ _decimal/libmpdec/crt.c \
+ _decimal/libmpdec/difradix2.c \
+ _decimal/libmpdec/fnt.c \
+ _decimal/libmpdec/fourstep.c \
+ _decimal/libmpdec/io.c \
+ _decimal/libmpdec/memory.c \
+ _decimal/libmpdec/mpdecimal.c \
+ _decimal/libmpdec/numbertheory.c \
+ _decimal/libmpdec/sixstep.c \
+ _decimal/libmpdec/transpose.c \
+ -I$(srcdir)/Modules/_decimal/libmpdec \
+ -DCONFIG_32=1 -DANSI=1
+
+#####################################################################
+# Modules that require additional frameworks
+#####################################################################
+#_bz2 bz2module.c -lbz2
+#_curses _cursesmodule.c -lcurses -ltermcap
+#_curses_panel _curses_panel.c -lpanel -lncurses
+#_dbm _dbmmodule.c
+#_gdbm _gdbmmodule.c -I/usr/local/include -L/usr/local/lib -lgdbm
+#_lzma
+#_sqlite3
+#_ssl _ssl.c \
+# -I$(SSL)/include -I$(SSL)/include/openssl \
+# -L$(SSL)/lib -lssl -lcrypto
+# -DUSE_SSL
+#_tkinter _tkinter.c tkappinit.c -DWITH_APPINIT -I... -L...
+#nis nismodule.c -lnsl
+#ossaudiodev
+#readline readline.c -lreadline -ltermcap
+#spwd spwdmodule.c
diff -Nru orig/Modules/Setup.ios-x86_64 modified/Modules/Setup.ios-x86_64
--- orig/Modules/Setup.ios-x86_64 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/Setup.ios-x86_64 2015-03-12 21:40:26.000000000 +0800
@@ -0,0 +1,118 @@
+#####################################################################
+# Static compilation instructions for all binary modules.
+#####################################################################
+
+_bisect _bisectmodule.c
+_codecs_cn cjkcodecs/_codecs_cn.c
+_codecs_hk cjkcodecs/_codecs_hk.c
+_codecs_iso2022 cjkcodecs/_codecs_iso2022.c
+_codecs_jp cjkcodecs/_codecs_jp.c
+_codecs_kr cjkcodecs/_codecs_kr.c
+_codecs_tw cjkcodecs/_codecs_tw.c
+_crypt _cryptmodule.c
+_csv _csv.c
+_datetime _datetimemodule.c
+_elementtree _elementtree.c \
+ -I$(srcdir)/Modules/expat
+ -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
+_heapq _heapqmodule.c
+_json _json.c
+_lsprof _lsprof.o rotatingtree.c
+_md5 md5module.c
+_multibytecodec cjkcodecs/multibytecodec.c
+_multiprocessing _multiprocessing/multiprocessing.c _multiprocessing/semaphore.c
+_opcode _opcode.c
+_pickle _pickle.c
+_posixsubprocess _posixsubprocess.c
+_random _randommodule.c
+_sha1 sha1module.c
+_sha256 sha256module.c
+_sha512 sha512module.c
+_socket socketmodule.c
+_struct _struct.c
+array arraymodule.c
+audioop audioop.c
+binascii binascii.c
+cmath cmathmodule.c _math.c
+fcntl fcntlmodule.c
+grp grpmodule.c
+math mathmodule.c
+mmap mmapmodule.c
+parser parsermodule.c
+pyexpat expat/xmlparse.c \
+ expat/xmlrole.c \
+ expat/xmltok.c \
+ pyexpat.c \
+ -I$(srcdir)/Modules/expat \
+ -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
+resource resource.c
+select selectmodule.c
+syslog syslogmodule.c
+termios termios.c
+time timemodule.c
+unicodedata unicodedata.c
+xxlimited xxlimited.c -DPy_LIMITED_API=0x0304000
+zlib zlibmodule.c -I$(prefix)/include -lz
+
+#####################################################################
+# Testing modules
+#####################################################################
+_ctypes_test _ctypes/_ctypes_test.c
+_testbuffer _testbuffer.c
+_testcapi _testcapimodule.c -I$(srcdir)/Include -DPy_BUILD_CORE=1
+_testimportmultiple _testimportmultiple.c
+
+#####################################################################
+# Platform specific configuration
+#####################################################################
+_ctypes _ctypes/_ctypes.c \
+ _ctypes/callbacks.c \
+ _ctypes/callproc.c \
+ _ctypes/stgdict.c \
+ _ctypes/cfield.c \
+ _ctypes/libffi_ios/prep_cif.c \
+ _ctypes/libffi_ios/types.c \
+ _ctypes/libffi_ios/raw_api.c \
+ _ctypes/libffi_ios/java_raw_api.c \
+ _ctypes/libffi_ios/closures.c \
+ _ctypes/libffi_ios/x86/ffi64.c \
+ _ctypes/libffi_ios/x86/unix64.S \
+ -I$(srcdir)/Modules/_ctypes/libffi_ios/include
+
+_decimal _decimal/_decimal.c \
+ _decimal/libmpdec/basearith.c \
+ _decimal/libmpdec/constants.c \
+ _decimal/libmpdec/context.c \
+ _decimal/libmpdec/convolute.c \
+ _decimal/libmpdec/crt.c \
+ _decimal/libmpdec/difradix2.c \
+ _decimal/libmpdec/fnt.c \
+ _decimal/libmpdec/fourstep.c \
+ _decimal/libmpdec/io.c \
+ _decimal/libmpdec/memory.c \
+ _decimal/libmpdec/mpdecimal.c \
+ _decimal/libmpdec/numbertheory.c \
+ _decimal/libmpdec/sixstep.c \
+ _decimal/libmpdec/transpose.c \
+ -I$(srcdir)/Modules/_decimal/libmpdec \
+ -DCONFIG_64=1 -DANSI=1 -DHAVE_UINT128_T=1
+
+#####################################################################
+# Modules that require additional frameworks
+#####################################################################
+#_bz2 bz2module.c -lbz2
+#_curses _cursesmodule.c -lcurses -ltermcap
+#_curses_panel _curses_panel.c -lpanel -lncurses
+#_dbm _dbmmodule.c
+#_gdbm _gdbmmodule.c -I/usr/local/include -L/usr/local/lib -lgdbm
+#_lzma
+#_sqlite3
+#_ssl _ssl.c \
+# -I$(SSL)/include -I$(SSL)/include/openssl \
+# -L$(SSL)/lib -lssl -lcrypto
+# -DUSE_SSL
+#_tkinter _tkinter.c tkappinit.c -DWITH_APPINIT -I... -L...
+#nis nismodule.c -lnsl
+#ossaudiodev
+#readline readline.c -lreadline -ltermcap
+#spwd spwdmodule.c
diff -Nru orig/Modules/_ctypes/libffi_ios/LICENSE modified/Modules/_ctypes/libffi_ios/LICENSE
--- orig/Modules/_ctypes/libffi_ios/LICENSE 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/LICENSE 2015-02-26 10:00:15.000000000 +0800
@@ -0,0 +1,21 @@
+libffi - Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others.
+See source files for details.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff -Nru orig/Modules/_ctypes/libffi_ios/README modified/Modules/_ctypes/libffi_ios/README
--- orig/Modules/_ctypes/libffi_ios/README 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/README 2015-02-26 10:00:00.000000000 +0800
@@ -0,0 +1,447 @@
+Status
+======
+
+libffi-3.2.1 was released on November 12, 2014. Check the libffi web
+page for updates: .
+
+
+What is libffi?
+===============
+
+Compilers for high level languages generate code that follow certain
+conventions. These conventions are necessary, in part, for separate
+compilation to work. One such convention is the "calling
+convention". The "calling convention" is essentially a set of
+assumptions made by the compiler about where function arguments will
+be found on entry to a function. A "calling convention" also specifies
+where the return value for a function is found.
+
+Some programs may not know at the time of compilation what arguments
+are to be passed to a function. For instance, an interpreter may be
+told at run-time about the number and types of arguments used to call
+a given function. Libffi can be used in such programs to provide a
+bridge from the interpreter program to compiled code.
+
+The libffi library provides a portable, high level programming
+interface to various calling conventions. This allows a programmer to
+call any function specified by a call interface description at run
+time.
+
+FFI stands for Foreign Function Interface. A foreign function
+interface is the popular name for the interface that allows code
+written in one language to call code written in another language. The
+libffi library really only provides the lowest, machine dependent
+layer of a fully featured foreign function interface. A layer must
+exist above libffi that handles type conversions for values passed
+between the two languages.
+
+
+Supported Platforms
+===================
+
+Libffi has been ported to many different platforms.
+For specific configuration details and testing status, please
+refer to the wiki page here:
+
+ http://www.moxielogic.org/wiki/index.php?title=Libffi_3.2
+
+At the time of release, the following basic configurations have been
+tested:
+
+|-----------------+------------------+-------------------------|
+| Architecture | Operating System | Compiler |
+|-----------------+------------------+-------------------------|
+| AArch64 (ARM64) | iOS | Clang |
+| AArch64 | Linux | GCC |
+| Alpha | Linux | GCC |
+| Alpha | Tru64 | GCC |
+| ARC | Linux | GCC |
+| ARM | Linux | GCC |
+| ARM | iOS | GCC |
+| AVR32 | Linux | GCC |
+| Blackfin | uClinux | GCC |
+| HPPA | HPUX | GCC |
+| IA-64 | Linux | GCC |
+| M68K | FreeMiNT | GCC |
+| M68K | Linux | GCC |
+| M68K | RTEMS | GCC |
+| M88K | OpenBSD/mvme88k | GCC |
+| Meta | Linux | GCC |
+| MicroBlaze | Linux | GCC |
+| MIPS | IRIX | GCC |
+| MIPS | Linux | GCC |
+| MIPS | RTEMS | GCC |
+| MIPS64 | Linux | GCC |
+| Moxie | Bare metal | GCC |
+| Nios II | Linux | GCC |
+| OpenRISC | Linux | GCC |
+| PowerPC 32-bit | AIX | IBM XL C |
+| PowerPC 64-bit | AIX | IBM XL C |
+| PowerPC | AMIGA | GCC |
+| PowerPC | Linux | GCC |
+| PowerPC | Mac OSX | GCC |
+| PowerPC | FreeBSD | GCC |
+| PowerPC 64-bit | FreeBSD | GCC |
+| PowerPC 64-bit | Linux ELFv1 | GCC |
+| PowerPC 64-bit | Linux ELFv2 | GCC |
+| S390 | Linux | GCC |
+| S390X | Linux | GCC |
+| SPARC | Linux | GCC |
+| SPARC | Solaris | GCC |
+| SPARC | Solaris | Oracle Solaris Studio C |
+| SPARC64 | Linux | GCC |
+| SPARC64 | FreeBSD | GCC |
+| SPARC64 | Solaris | Oracle Solaris Studio C |
+| TILE-Gx/TILEPro | Linux | GCC |
+| VAX | OpenBSD/vax | GCC |
+| X86 | FreeBSD | GCC |
+| X86 | GNU HURD | GCC |
+| X86 | Interix | GCC |
+| X86 | kFreeBSD | GCC |
+| X86 | Linux | GCC |
+| X86 | Mac OSX | GCC |
+| X86 | OpenBSD | GCC |
+| X86 | OS/2 | GCC |
+| X86 | Solaris | GCC |
+| X86 | Solaris | Oracle Solaris Studio C |
+| X86 | Windows/Cygwin | GCC |
+| X86 | Windows/MingW | GCC |
+| X86-64 | FreeBSD | GCC |
+| X86-64 | Linux | GCC |
+| X86-64 | Linux/x32 | GCC |
+| X86-64 | OpenBSD | GCC |
+| X86-64 | Solaris | Oracle Solaris Studio C |
+| X86-64 | Windows/Cygwin | GCC |
+| X86-64 | Windows/MingW | GCC |
+| Xtensa | Linux | GCC |
+|-----------------+------------------+-------------------------|
+
+Please send additional platform test results to
+libffi-discuss@sourceware.org and feel free to update the wiki page
+above.
+
+Installing libffi
+=================
+
+First you must configure the distribution for your particular
+system. Go to the directory you wish to build libffi in and run the
+"configure" program found in the root directory of the libffi source
+distribution.
+
+If you're building libffi directly from version control, configure won't
+exist yet; run ./autogen.sh first.
+
+You may want to tell configure where to install the libffi library and
+header files. To do that, use the --prefix configure switch. Libffi
+will install under /usr/local by default.
+
+If you want to enable extra run-time debugging checks use the the
+--enable-debug configure switch. This is useful when your program dies
+mysteriously while using libffi.
+
+Another useful configure switch is --enable-purify-safety. Using this
+will add some extra code which will suppress certain warnings when you
+are using Purify with libffi. Only use this switch when using
+Purify, as it will slow down the library.
+
+It's also possible to build libffi on Windows platforms with
+Microsoft's Visual C++ compiler. In this case, use the msvcc.sh
+wrapper script during configuration like so:
+
+path/to/configure CC=path/to/msvcc.sh CXX=path/to/msvcc.sh LD=link CPP="cl -nologo -EP"
+
+For 64-bit Windows builds, use CC="path/to/msvcc.sh -m64" and
+CXX="path/to/msvcc.sh -m64". You may also need to specify --build
+appropriately.
+
+It is also possible to build libffi on Windows platforms with the LLVM
+project's clang-cl compiler, like below:
+
+path/to/configure CC="path/to/msvcc.sh -clang-cl" CXX="path/to/msvcc.sh -clang-cl" LD=link CPP="clang-cl -EP"
+
+When building with MSVC under a MingW environment, you may need to
+remove the line in configure that sets 'fix_srcfile_path' to a 'cygpath'
+command. ('cygpath' is not present in MingW, and is not required when
+using MingW-style paths.)
+
+For iOS builds, the 'libffi.xcodeproj' Xcode project is available.
+
+Configure has many other options. Use "configure --help" to see them all.
+
+Once configure has finished, type "make". Note that you must be using
+GNU make. You can ftp GNU make from ftp.gnu.org:/pub/gnu/make .
+
+To ensure that libffi is working as advertised, type "make check".
+This will require that you have DejaGNU installed.
+
+To install the library and header files, type "make install".
+
+
+History
+=======
+
+See the git log for details at http://github.com/atgreen/libffi.
+
+3.2.1 Nov-12-14
+ Build fix for non-iOS AArch64 targets.
+
+3.2 Nov-11-14
+ Add C99 Complex Type support (currently only supported on
+ s390).
+ Add support for PASCAL and REGISTER calling conventions on x86
+ Windows/Linux.
+ Add OpenRISC and Cygwin-64 support.
+ Bug fixes.
+
+3.1 May-19-14
+ Add AArch64 (ARM64) iOS support.
+ Add Nios II support.
+ Add m88k and DEC VAX support.
+ Add support for stdcall, thiscall, and fastcall on non-Windows
+ 32-bit x86 targets such as Linux.
+ Various Android, MIPS N32, x86, FreeBSD and UltraSPARC IIi
+ fixes.
+ Make the testsuite more robust: eliminate several spurious
+ failures, and respect the $CC and $CXX environment variables.
+ Archive off the manually maintained ChangeLog in favor of git
+ log.
+
+3.0.13 Mar-17-13
+ Add Meta support.
+ Add missing Moxie bits.
+ Fix stack alignment bug on 32-bit x86.
+ Build fix for m68000 targets.
+ Build fix for soft-float Power targets.
+ Fix the install dir location for some platforms when building
+ with GCC (OS X, Solaris).
+ Fix Cygwin regression.
+
+3.0.12 Feb-11-13
+ Add Moxie support.
+ Add AArch64 support.
+ Add Blackfin support.
+ Add TILE-Gx/TILEPro support.
+ Add MicroBlaze support.
+ Add Xtensa support.
+ Add support for PaX enabled kernels with MPROTECT.
+ Add support for native vendor compilers on
+ Solaris and AIX.
+ Work around LLVM/GCC interoperability issue on x86_64.
+
+3.0.11 Apr-11-12
+ Lots of build fixes.
+ Add support for variadic functions (ffi_prep_cif_var).
+ Add Linux/x32 support.
+ Add thiscall, fastcall and MSVC cdecl support on Windows.
+ Add Amiga and newer MacOS support.
+ Add m68k FreeMiNT support.
+ Integration with iOS' xcode build tools.
+ Fix Octeon and MC68881 support.
+ Fix code pessimizations.
+
+3.0.10 Aug-23-11
+ Add support for Apple's iOS.
+ Add support for ARM VFP ABI.
+ Add RTEMS support for MIPS and M68K.
+ Fix instruction cache clearing problems on
+ ARM and SPARC.
+ Fix the N64 build on mips-sgi-irix6.5.
+ Enable builds with Microsoft's compiler.
+ Enable x86 builds with Oracle's Solaris compiler.
+ Fix support for calling code compiled with Oracle's Sparc
+ Solaris compiler.
+ Testsuite fixes for Tru64 Unix.
+ Additional platform support.
+
+3.0.9 Dec-31-09
+ Add AVR32 and win64 ports. Add ARM softfp support.
+ Many fixes for AIX, Solaris, HP-UX, *BSD.
+ Several PowerPC and x86-64 bug fixes.
+ Build DLL for windows.
+
+3.0.8 Dec-19-08
+ Add *BSD, BeOS, and PA-Linux support.
+
+3.0.7 Nov-11-08
+ Fix for ppc FreeBSD.
+ (thanks to Andreas Tobler)
+
+3.0.6 Jul-17-08
+ Fix for closures on sh.
+ Mark the sh/sh64 stack as non-executable.
+ (both thanks to Kaz Kojima)
+
+3.0.5 Apr-3-08
+ Fix libffi.pc file.
+ Fix #define ARM for IcedTea users.
+ Fix x86 closure bug.
+
+3.0.4 Feb-24-08
+ Fix x86 OpenBSD configury.
+
+3.0.3 Feb-22-08
+ Enable x86 OpenBSD thanks to Thomas Heller, and
+ x86-64 FreeBSD thanks to Björn König and Andreas Tobler.
+ Clean up test instruction in README.
+
+3.0.2 Feb-21-08
+ Improved x86 FreeBSD support.
+ Thanks to Björn König.
+
+3.0.1 Feb-15-08
+ Fix instruction cache flushing bug on MIPS.
+ Thanks to David Daney.
+
+3.0.0 Feb-15-08
+ Many changes, mostly thanks to the GCC project.
+ Cygnus Solutions is now Red Hat.
+
+ [10 years go by...]
+
+1.20 Oct-5-98
+ Raffaele Sena produces ARM port.
+
+1.19 Oct-5-98
+ Fixed x86 long double and long long return support.
+ m68k bug fixes from Andreas Schwab.
+ Patch for DU assembler compatibility for the Alpha from Richard
+ Henderson.
+
+1.18 Apr-17-98
+ Bug fixes and MIPS configuration changes.
+
+1.17 Feb-24-98
+ Bug fixes and m68k port from Andreas Schwab. PowerPC port from
+ Geoffrey Keating. Various bug x86, Sparc and MIPS bug fixes.
+
+1.16 Feb-11-98
+ Richard Henderson produces Alpha port.
+
+1.15 Dec-4-97
+ Fixed an n32 ABI bug. New libtool, auto* support.
+
+1.14 May-13-97
+ libtool is now used to generate shared and static libraries.
+ Fixed a minor portability problem reported by Russ McManus
+ .
+
+1.13 Dec-2-96
+ Added --enable-purify-safety to keep Purify from complaining
+ about certain low level code.
+ Sparc fix for calling functions with < 6 args.
+ Linux x86 a.out fix.
+
+1.12 Nov-22-96
+ Added missing ffi_type_void, needed for supporting void return
+ types. Fixed test case for non MIPS machines. Cygnus Support
+ is now Cygnus Solutions.
+
+1.11 Oct-30-96
+ Added notes about GNU make.
+
+1.10 Oct-29-96
+ Added configuration fix for non GNU compilers.
+
+1.09 Oct-29-96
+ Added --enable-debug configure switch. Clean-ups based on LCLint
+ feedback. ffi_mips.h is always installed. Many configuration
+ fixes. Fixed ffitest.c for sparc builds.
+
+1.08 Oct-15-96
+ Fixed n32 problem. Many clean-ups.
+
+1.07 Oct-14-96
+ Gordon Irlam rewrites v8.S again. Bug fixes.
+
+1.06 Oct-14-96
+ Gordon Irlam improved the sparc port.
+
+1.05 Oct-14-96
+ Interface changes based on feedback.
+
+1.04 Oct-11-96
+ Sparc port complete (modulo struct passing bug).
+
+1.03 Oct-10-96
+ Passing struct args, and returning struct values works for
+ all architectures/calling conventions. Expanded tests.
+
+1.02 Oct-9-96
+ Added SGI n32 support. Fixed bugs in both o32 and Linux support.
+ Added "make test".
+
+1.01 Oct-8-96
+ Fixed float passing bug in mips version. Restructured some
+ of the code. Builds cleanly with SGI tools.
+
+1.00 Oct-7-96
+ First release. No public announcement.
+
+
+Authors & Credits
+=================
+
+libffi was originally written by Anthony Green .
+
+The developers of the GNU Compiler Collection project have made
+innumerable valuable contributions. See the ChangeLog file for
+details.
+
+Some of the ideas behind libffi were inspired by Gianni Mariani's free
+gencall library for Silicon Graphics machines.
+
+The closure mechanism was designed and implemented by Kresten Krab
+Thorup.
+
+Major processor architecture ports were contributed by the following
+developers:
+
+aarch64 Marcus Shawcroft, James Greenhalgh
+alpha Richard Henderson
+arm Raffaele Sena
+blackfin Alexandre Keunecke I. de Mendonca
+cris Simon Posnjak, Hans-Peter Nilsson
+frv Anthony Green
+ia64 Hans Boehm
+m32r Kazuhiro Inaoka
+m68k Andreas Schwab
+m88k Miod Vallat
+microblaze Nathan Rossi
+mips Anthony Green, Casey Marshall
+mips64 David Daney
+moxie Anthony Green
+nios ii Sandra Loosemore
+openrisc Sebastian Macke
+pa Randolph Chung, Dave Anglin, Andreas Tobler
+powerpc Geoffrey Keating, Andreas Tobler,
+ David Edelsohn, John Hornkvist
+powerpc64 Jakub Jelinek
+s390 Gerhard Tonn, Ulrich Weigand
+sh Kaz Kojima
+sh64 Kaz Kojima
+sparc Anthony Green, Gordon Irlam
+tile-gx/tilepro Walter Lee
+vax Miod Vallat
+x86 Anthony Green, Jon Beniston
+x86-64 Bo Thorsen
+xtensa Chris Zankel
+
+Jesper Skov and Andrew Haley both did more than their fair share of
+stepping through the code and tracking down bugs.
+
+Thanks also to Tom Tromey for bug fixes, documentation and
+configuration help.
+
+Thanks to Jim Blandy, who provided some useful feedback on the libffi
+interface.
+
+Andreas Tobler has done a tremendous amount of work on the testsuite.
+
+Alex Oliva solved the executable page problem for SElinux.
+
+The list above is almost certainly incomplete and inaccurate. I'm
+happy to make corrections or additions upon request.
+
+If you have a problem, or have found a bug, please send a note to the
+author at green@moxielogic.com, or the project mailing list at
+libffi-discuss@sourceware.org.
diff -Nru orig/Modules/_ctypes/libffi_ios/aarch64/ffi.c modified/Modules/_ctypes/libffi_ios/aarch64/ffi.c
--- orig/Modules/_ctypes/libffi_ios/aarch64/ffi.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/aarch64/ffi.c 2015-03-12 21:33:06.000000000 +0800
@@ -0,0 +1,1152 @@
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include
+#include
+#include
+#include
+#include
+#include "internal.h"
+
+/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE;
+ all further uses in this file will refer to the 128-bit type. */
+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
+# if FFI_TYPE_LONGDOUBLE != 4
+# error FFI_TYPE_LONGDOUBLE out of date
+# endif
+#else
+# undef FFI_TYPE_LONGDOUBLE
+# define FFI_TYPE_LONGDOUBLE 4
+#endif
+
+union _d
+{
+ UINT64 d;
+ UINT32 s[2];
+};
+
+struct _v
+{
+ union _d d[2] __attribute__((aligned(16)));
+};
+
+struct call_context
+{
+ struct _v v[N_V_ARG_REG];
+ UINT64 x[N_X_ARG_REG];
+};
+
+#if defined (__clang__) && defined (__APPLE__)
+extern void sys_icache_invalidate (void *start, size_t len);
+#endif
+
+static inline void
+ffi_clear_cache (void *start, void *end)
+{
+#if defined (__clang__) && defined (__APPLE__)
+ sys_icache_invalidate (start, (char *)end - (char *)start);
+#elif defined (__GNUC__)
+ __builtin___clear_cache (start, end);
+#else
+#error "Missing builtin to flush instruction cache"
+#endif
+}
+
+/* A subroutine of is_vfp_type. Given a structure type, return the type code
+ of the first non-structure element. Recurse for structure elements.
+ Return -1 if the structure is in fact empty, i.e. no nested elements. */
+
+static int
+is_hfa0 (const ffi_type *ty)
+{
+ ffi_type **elements = ty->elements;
+ int i, ret = -1;
+
+ if (elements != NULL)
+ for (i = 0; elements[i]; ++i)
+ {
+ ret = elements[i]->type;
+ if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX)
+ {
+ ret = is_hfa0 (elements[i]);
+ if (ret < 0)
+ continue;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+/* A subroutine of is_vfp_type. Given a structure type, return true if all
+ of the non-structure elements are the same as CANDIDATE. */
+
+static int
+is_hfa1 (const ffi_type *ty, int candidate)
+{
+ ffi_type **elements = ty->elements;
+ int i;
+
+ if (elements != NULL)
+ for (i = 0; elements[i]; ++i)
+ {
+ int t = elements[i]->type;
+ if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX)
+ {
+ if (!is_hfa1 (elements[i], candidate))
+ return 0;
+ }
+ else if (t != candidate)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Determine if TY may be allocated to the FP registers. This is both an
+ fp scalar type as well as an homogenous floating point aggregate (HFA).
+ That is, a structure consisting of 1 to 4 members of all the same type,
+ where that type is an fp scalar.
+
+ Returns non-zero iff TY is an HFA. The result is the AARCH64_RET_*
+ constant for the type. */
+
+static int
+is_vfp_type (const ffi_type *ty)
+{
+ ffi_type **elements;
+ int candidate, i;
+ size_t size, ele_count;
+
+ /* Quickest tests first. */
+ candidate = ty->type;
+ switch (candidate)
+ {
+ default:
+ return 0;
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ ele_count = 1;
+ goto done;
+ case FFI_TYPE_COMPLEX:
+ candidate = ty->elements[0]->type;
+ switch (candidate)
+ {
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ ele_count = 2;
+ goto done;
+ }
+ return 0;
+ case FFI_TYPE_STRUCT:
+ break;
+ }
+
+ /* No HFA types are smaller than 4 bytes, or larger than 64 bytes. */
+ size = ty->size;
+ if (size < 4 || size > 64)
+ return 0;
+
+ /* Find the type of the first non-structure member. */
+ elements = ty->elements;
+ candidate = elements[0]->type;
+ if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX)
+ {
+ for (i = 0; ; ++i)
+ {
+ candidate = is_hfa0 (elements[i]);
+ if (candidate >= 0)
+ break;
+ }
+ }
+
+ /* If the first member is not a floating point type, it's not an HFA.
+ Also quickly re-check the size of the structure. */
+ switch (candidate)
+ {
+ case FFI_TYPE_FLOAT:
+ ele_count = size / sizeof(float);
+ if (size != ele_count * sizeof(float))
+ return 0;
+ break;
+ case FFI_TYPE_DOUBLE:
+ ele_count = size / sizeof(double);
+ if (size != ele_count * sizeof(double))
+ return 0;
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ ele_count = size / sizeof(long double);
+ if (size != ele_count * sizeof(long double))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ if (ele_count > 4)
+ return 0;
+
+ /* Finally, make sure that all scalar elements are the same type. */
+ for (i = 0; elements[i]; ++i)
+ {
+ int t = elements[i]->type;
+ if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX)
+ {
+ if (!is_hfa1 (elements[i], candidate))
+ return 0;
+ }
+ else if (t != candidate)
+ return 0;
+ }
+
+ /* All tests succeeded. Encode the result. */
+ done:
+ return candidate * 4 + (4 - ele_count);
+}
+
+/* Representation of the procedure call argument marshalling
+ state.
+
+ The terse state variable names match the names used in the AARCH64
+ PCS. */
+
+struct arg_state
+{
+ unsigned ngrn; /* Next general-purpose register number. */
+ unsigned nsrn; /* Next vector register number. */
+ size_t nsaa; /* Next stack offset. */
+
+#if defined (__APPLE__)
+ unsigned allocating_variadic;
+#endif
+};
+
+/* Initialize a procedure call argument marshalling state. */
+static void
+arg_init (struct arg_state *state)
+{
+ state->ngrn = 0;
+ state->nsrn = 0;
+ state->nsaa = 0;
+#if defined (__APPLE__)
+ state->allocating_variadic = 0;
+#endif
+}
+
+/* Allocate an aligned slot on the stack and return a pointer to it. */
+static void *
+allocate_to_stack (struct arg_state *state, void *stack,
+ size_t alignment, size_t size)
+{
+ size_t nsaa = state->nsaa;
+
+ /* Round up the NSAA to the larger of 8 or the natural
+ alignment of the argument's type. */
+#if defined (__APPLE__)
+ if (state->allocating_variadic && alignment < 8)
+ alignment = 8;
+#else
+ if (alignment < 8)
+ alignment = 8;
+#endif
+
+ nsaa = ALIGN (nsaa, alignment);
+ state->nsaa = nsaa + size;
+
+ return (char *)stack + nsaa;
+}
+
+static ffi_arg
+extend_integer_type (void *source, int type)
+{
+ switch (type)
+ {
+ case FFI_TYPE_UINT8:
+ return *(UINT8 *) source;
+ case FFI_TYPE_SINT8:
+ return *(SINT8 *) source;
+ case FFI_TYPE_UINT16:
+ return *(UINT16 *) source;
+ case FFI_TYPE_SINT16:
+ return *(SINT16 *) source;
+ case FFI_TYPE_UINT32:
+ return *(UINT32 *) source;
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ return *(SINT32 *) source;
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ return *(UINT64 *) source;
+ break;
+ case FFI_TYPE_POINTER:
+ return *(uintptr_t *) source;
+ default:
+ abort();
+ }
+}
+
+static void
+extend_hfa_type (void *dest, void *src, int h)
+{
+ int f = h - AARCH64_RET_S4;
+ void *x0;
+
+ asm volatile (
+ "adr %0, 0f\n"
+" add %0, %0, %1\n"
+" br %0\n"
+"0: ldp s16, s17, [%3]\n" /* S4 */
+" ldp s18, s19, [%3, #8]\n"
+" b 4f\n"
+" ldp s16, s17, [%3]\n" /* S3 */
+" ldr s18, [%3, #8]\n"
+" b 3f\n"
+" ldp s16, s17, [%3]\n" /* S2 */
+" b 2f\n"
+" nop\n"
+" ldr s16, [%3]\n" /* S1 */
+" b 1f\n"
+" nop\n"
+" ldp d16, d17, [%3]\n" /* D4 */
+" ldp d18, d19, [%3, #16]\n"
+" b 4f\n"
+" ldp d16, d17, [%3]\n" /* D3 */
+" ldr d18, [%3, #16]\n"
+" b 3f\n"
+" ldp d16, d17, [%3]\n" /* D2 */
+" b 2f\n"
+" nop\n"
+" ldr d16, [%3]\n" /* D1 */
+" b 1f\n"
+" nop\n"
+" ldp q16, q17, [%3]\n" /* Q4 */
+" ldp q18, q19, [%3, #16]\n"
+" b 4f\n"
+" ldp q16, q17, [%3]\n" /* Q3 */
+" ldr q18, [%3, #16]\n"
+" b 3f\n"
+" ldp q16, q17, [%3]\n" /* Q2 */
+" b 2f\n"
+" nop\n"
+" ldr q16, [%3]\n" /* Q1 */
+" b 1f\n"
+"4: str q19, [%2, #48]\n"
+"3: str q18, [%2, #32]\n"
+"2: str q17, [%2, #16]\n"
+"1: str q16, [%2]"
+ : "=&r"(x0)
+ : "r"(f * 12), "r"(dest), "r"(src)
+ : "memory", "v16", "v17", "v18", "v19");
+}
+
+static void *
+compress_hfa_type (void *dest, void *reg, int h)
+{
+ switch (h)
+ {
+ case AARCH64_RET_S1:
+ if (dest == reg)
+ {
+#ifdef __AARCH64EB__
+ dest += 12;
+#endif
+ }
+ else
+ *(float *)dest = *(float *)reg;
+ break;
+ case AARCH64_RET_S2:
+ asm ("ldp q16, q17, [%1]\n\t"
+ "st2 { v16.s, v17.s }[0], [%0]"
+ : : "r"(dest), "r"(reg) : "memory", "v16", "v17");
+ break;
+ case AARCH64_RET_S3:
+ asm ("ldp q16, q17, [%1]\n\t"
+ "ldr q18, [%1, #32]\n\t"
+ "st3 { v16.s, v17.s, v18.s }[0], [%0]"
+ : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18");
+ break;
+ case AARCH64_RET_S4:
+ asm ("ldp q16, q17, [%1]\n\t"
+ "ldp q18, q19, [%1, #32]\n\t"
+ "st4 { v16.s, v17.s, v18.s, v19.s }[0], [%0]"
+ : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19");
+ break;
+
+ case AARCH64_RET_D1:
+ if (dest == reg)
+ {
+#ifdef __AARCH64EB__
+ dest += 8;
+#endif
+ }
+ else
+ *(double *)dest = *(double *)reg;
+ break;
+ case AARCH64_RET_D2:
+ asm ("ldp q16, q17, [%1]\n\t"
+ "st2 { v16.d, v17.d }[0], [%0]"
+ : : "r"(dest), "r"(reg) : "memory", "v16", "v17");
+ break;
+ case AARCH64_RET_D3:
+ asm ("ldp q16, q17, [%1]\n\t"
+ "ldr q18, [%1, #32]\n\t"
+ "st3 { v16.d, v17.d, v18.d }[0], [%0]"
+ : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18");
+ break;
+ case AARCH64_RET_D4:
+ asm ("ldp q16, q17, [%1]\n\t"
+ "ldp q18, q19, [%1, #32]\n\t"
+ "st4 { v16.d, v17.d, v18.d, v19.d }[0], [%0]"
+ : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19");
+ break;
+
+ default:
+ if (dest != reg)
+ return memcpy (dest, reg, 16 * (4 - (h & 3)));
+ break;
+ }
+ return dest;
+}
+
+/* Either allocate an appropriate register for the argument type, or if
+ none are available, allocate a stack slot and return a pointer
+ to the allocated space. */
+
+static void *
+allocate_int_to_reg_or_stack (struct call_context *context,
+ struct arg_state *state,
+ void *stack, size_t size)
+{
+ if (state->ngrn < N_X_ARG_REG)
+ return &context->x[state->ngrn++];
+
+ state->ngrn = N_X_ARG_REG;
+ return allocate_to_stack (state, stack, size, size);
+}
+
+ffi_status
+ffi_prep_cif_machdep (ffi_cif *cif)
+{
+ ffi_type *rtype = cif->rtype;
+ size_t bytes = cif->bytes;
+ int flags, i, n;
+
+ switch (rtype->type)
+ {
+ case FFI_TYPE_VOID:
+ flags = AARCH64_RET_VOID;
+ break;
+ case FFI_TYPE_UINT8:
+ flags = AARCH64_RET_UINT8;
+ break;
+ case FFI_TYPE_UINT16:
+ flags = AARCH64_RET_UINT16;
+ break;
+ case FFI_TYPE_UINT32:
+ flags = AARCH64_RET_UINT32;
+ break;
+ case FFI_TYPE_SINT8:
+ flags = AARCH64_RET_SINT8;
+ break;
+ case FFI_TYPE_SINT16:
+ flags = AARCH64_RET_SINT16;
+ break;
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ flags = AARCH64_RET_SINT32;
+ break;
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ flags = AARCH64_RET_INT64;
+ break;
+ case FFI_TYPE_POINTER:
+ flags = (sizeof(void *) == 4 ? AARCH64_RET_UINT32 : AARCH64_RET_INT64);
+ break;
+
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_STRUCT:
+ case FFI_TYPE_COMPLEX:
+ flags = is_vfp_type (rtype);
+ if (flags == 0)
+ {
+ size_t s = rtype->size;
+ if (s > 16)
+ {
+ flags = AARCH64_RET_VOID | AARCH64_RET_IN_MEM;
+ bytes += 8;
+ }
+ else if (s == 16)
+ flags = AARCH64_RET_INT128;
+ else if (s == 8)
+ flags = AARCH64_RET_INT64;
+ else
+ flags = AARCH64_RET_INT128 | AARCH64_RET_NEED_COPY;
+ }
+ break;
+
+ default:
+ abort();
+ }
+
+ for (i = 0, n = cif->nargs; i < n; i++)
+ if (is_vfp_type (cif->arg_types[i]))
+ {
+ flags |= AARCH64_FLAG_ARG_V;
+ break;
+ }
+
+ /* Round the stack up to a multiple of the stack alignment requirement. */
+ cif->bytes = ALIGN(bytes, 16);
+ cif->flags = flags;
+#if defined (__APPLE__)
+ cif->aarch64_nfixedargs = 0;
+#endif
+
+ return FFI_OK;
+}
+
+#if defined (__APPLE__)
+/* Perform Apple-specific cif processing for variadic calls */
+ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs)
+{
+ ffi_status status = ffi_prep_cif_machdep (cif);
+ cif->aarch64_nfixedargs = nfixedargs;
+ return status;
+}
+#endif /* __APPLE__ */
+
+extern void ffi_call_SYSV (struct call_context *context, void *frame,
+ void (*fn)(void), void *rvalue, int flags,
+ void *closure) FFI_HIDDEN;
+
+/* Call a function with the provided arguments and capture the return
+ value. */
+static void
+ffi_call_int (ffi_cif *cif, void (*fn)(void), void *orig_rvalue,
+ void **avalue, void *closure)
+{
+ struct call_context *context;
+ void *stack, *frame, *rvalue;
+ struct arg_state state;
+ size_t stack_bytes, rtype_size, rsize;
+ int i, nargs, flags;
+ ffi_type *rtype;
+
+ flags = cif->flags;
+ rtype = cif->rtype;
+ rtype_size = rtype->size;
+ stack_bytes = cif->bytes;
+
+ /* If the target function returns a structure via hidden pointer,
+ then we cannot allow a null rvalue. Otherwise, mash a null
+ rvalue to void return type. */
+ rsize = 0;
+ if (flags & AARCH64_RET_IN_MEM)
+ {
+ if (orig_rvalue == NULL)
+ rsize = rtype_size;
+ }
+ else if (orig_rvalue == NULL)
+ flags &= AARCH64_FLAG_ARG_V;
+ else if (flags & AARCH64_RET_NEED_COPY)
+ rsize = 16;
+
+ /* Allocate consectutive stack for everything we'll need. */
+ context = alloca (sizeof(struct call_context) + stack_bytes + 32 + rsize);
+ stack = context + 1;
+ frame = stack + stack_bytes;
+ rvalue = (rsize ? frame + 32 : orig_rvalue);
+
+ arg_init (&state);
+ for (i = 0, nargs = cif->nargs; i < nargs; i++)
+ {
+ ffi_type *ty = cif->arg_types[i];
+ size_t s = ty->size;
+ void *a = avalue[i];
+ int h, t;
+
+ t = ty->type;
+ switch (t)
+ {
+ case FFI_TYPE_VOID:
+ FFI_ASSERT (0);
+ break;
+
+ /* If the argument is a basic type the argument is allocated to an
+ appropriate register, or if none are available, to the stack. */
+ case FFI_TYPE_INT:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_POINTER:
+ do_pointer:
+ {
+ ffi_arg ext = extend_integer_type (a, t);
+ if (state.ngrn < N_X_ARG_REG)
+ context->x[state.ngrn++] = ext;
+ else
+ {
+ void *d = allocate_to_stack (&state, stack, ty->alignment, s);
+ state.ngrn = N_X_ARG_REG;
+ /* Note that the default abi extends each argument
+ to a full 64-bit slot, while the iOS abi allocates
+ only enough space. */
+#ifdef __APPLE__
+ memcpy(d, a, s);
+#else
+ *(ffi_arg *)d = ext;
+#endif
+ }
+ }
+ break;
+
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_STRUCT:
+ case FFI_TYPE_COMPLEX:
+ {
+ void *dest;
+
+ h = is_vfp_type (ty);
+ if (h)
+ {
+ int elems = 4 - (h & 3);
+ if (state.nsrn + elems <= N_V_ARG_REG)
+ {
+ dest = &context->v[state.nsrn];
+ state.nsrn += elems;
+ extend_hfa_type (dest, a, h);
+ break;
+ }
+ state.nsrn = N_V_ARG_REG;
+ dest = allocate_to_stack (&state, stack, ty->alignment, s);
+ }
+ else if (s > 16)
+ {
+ /* If the argument is a composite type that is larger than 16
+ bytes, then the argument has been copied to memory, and
+ the argument is replaced by a pointer to the copy. */
+ a = &avalue[i];
+ t = FFI_TYPE_POINTER;
+ goto do_pointer;
+ }
+ else
+ {
+ size_t n = (s + 7) / 8;
+ if (state.ngrn + n <= N_X_ARG_REG)
+ {
+ /* If the argument is a composite type and the size in
+ double-words is not more than the number of available
+ X registers, then the argument is copied into
+ consecutive X registers. */
+ dest = &context->x[state.ngrn];
+ state.ngrn += n;
+ }
+ else
+ {
+ /* Otherwise, there are insufficient X registers. Further
+ X register allocations are prevented, the NSAA is
+ adjusted and the argument is copied to memory at the
+ adjusted NSAA. */
+ state.ngrn = N_X_ARG_REG;
+ dest = allocate_to_stack (&state, stack, ty->alignment, s);
+ }
+ }
+ memcpy (dest, a, s);
+ }
+ break;
+
+ default:
+ abort();
+ }
+
+#if defined (__APPLE__)
+ if (i + 1 == cif->aarch64_nfixedargs)
+ {
+ state.ngrn = N_X_ARG_REG;
+ state.nsrn = N_V_ARG_REG;
+ state.allocating_variadic = 1;
+ }
+#endif
+ }
+
+ ffi_call_SYSV (context, frame, fn, rvalue, flags, closure);
+
+ if (flags & AARCH64_RET_NEED_COPY)
+ memcpy (orig_rvalue, rvalue, rtype_size);
+}
+
+void
+ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, NULL);
+}
+
+#ifdef FFI_GO_CLOSURES
+void
+ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue,
+ void **avalue, void *closure)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, closure);
+}
+#endif /* FFI_GO_CLOSURES */
+
+/* Build a trampoline. */
+
+extern void ffi_closure_SYSV (void) FFI_HIDDEN;
+extern void ffi_closure_SYSV_V (void) FFI_HIDDEN;
+
+#if FFI_EXEC_TRAMPOLINE_TABLE
+
+#include
+#include
+#include
+#include
+
+extern void *ffi_closure_trampoline_table_page;
+
+typedef struct ffi_trampoline_table ffi_trampoline_table;
+typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry;
+
+struct ffi_trampoline_table
+{
+ /* contiguous writable and executable pages */
+ vm_address_t config_page;
+ vm_address_t trampoline_page;
+
+ /* free list tracking */
+ uint16_t free_count;
+ ffi_trampoline_table_entry *free_list;
+ ffi_trampoline_table_entry *free_list_pool;
+
+ ffi_trampoline_table *prev;
+ ffi_trampoline_table *next;
+};
+
+struct ffi_trampoline_table_entry
+{
+ void *(*trampoline) ();
+ ffi_trampoline_table_entry *next;
+};
+
+/* The trampoline configuration is placed a page prior to the trampoline's entry point */
+#define FFI_TRAMPOLINE_CODELOC_CONFIG(codeloc) ((void **) (((uint8_t *) codeloc) - PAGE_SIZE));
+
+/* Total number of trampolines that fit in one trampoline table */
+#define FFI_TRAMPOLINE_COUNT (PAGE_SIZE / FFI_TRAMPOLINE_SIZE)
+
+static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER;
+static ffi_trampoline_table *ffi_trampoline_tables = NULL;
+
+static ffi_trampoline_table *
+ffi_trampoline_table_alloc ()
+{
+ ffi_trampoline_table *table = NULL;
+
+ /* Loop until we can allocate two contiguous pages */
+ while (table == NULL)
+ {
+ vm_address_t config_page = 0x0;
+ kern_return_t kt;
+
+ /* Try to allocate two pages */
+ kt =
+ vm_allocate (mach_task_self (), &config_page, PAGE_SIZE * 2,
+ VM_FLAGS_ANYWHERE);
+ if (kt != KERN_SUCCESS)
+ {
+ fprintf (stderr, "vm_allocate() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+ break;
+ }
+
+ /* Now drop the second half of the allocation to make room for the trampoline table */
+ vm_address_t trampoline_page = config_page + PAGE_SIZE;
+ kt = vm_deallocate (mach_task_self (), trampoline_page, PAGE_SIZE);
+ if (kt != KERN_SUCCESS)
+ {
+ fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+ break;
+ }
+
+ /* Remap the trampoline table to directly follow the config page */
+ vm_prot_t cur_prot;
+ vm_prot_t max_prot;
+
+ kt =
+ vm_remap (mach_task_self (), &trampoline_page, PAGE_SIZE, 0x0, FALSE,
+ mach_task_self (),
+ (vm_address_t) & ffi_closure_trampoline_table_page, FALSE,
+ &cur_prot, &max_prot, VM_INHERIT_SHARE);
+
+ /* If we lost access to the destination trampoline page, drop our config allocation mapping and retry */
+ if (kt != KERN_SUCCESS)
+ {
+ /* Log unexpected failures */
+ if (kt != KERN_NO_SPACE)
+ {
+ fprintf (stderr, "vm_remap() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+ }
+
+ vm_deallocate (mach_task_self (), config_page, PAGE_SIZE);
+ continue;
+ }
+
+ /* We have valid trampoline and config pages */
+ table = calloc (1, sizeof (ffi_trampoline_table));
+ table->free_count = FFI_TRAMPOLINE_COUNT;
+ table->config_page = config_page;
+ table->trampoline_page = trampoline_page;
+
+ /* Create and initialize the free list */
+ table->free_list_pool =
+ calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));
+
+ uint16_t i;
+ for (i = 0; i < table->free_count; i++)
+ {
+ ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
+ entry->trampoline =
+ (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));
+
+ if (i < table->free_count - 1)
+ entry->next = &table->free_list_pool[i + 1];
+ }
+
+ table->free_list = table->free_list_pool;
+ }
+
+ return table;
+}
+
+void *
+ffi_closure_alloc (size_t size, void **code)
+{
+ /* Create the closure */
+ ffi_closure *closure = malloc (size);
+ if (closure == NULL)
+ return NULL;
+
+ pthread_mutex_lock (&ffi_trampoline_lock);
+
+ /* Check for an active trampoline table with available entries. */
+ ffi_trampoline_table *table = ffi_trampoline_tables;
+ if (table == NULL || table->free_list == NULL)
+ {
+ table = ffi_trampoline_table_alloc ();
+ if (table == NULL)
+ {
+ free (closure);
+ return NULL;
+ }
+
+ /* Insert the new table at the top of the list */
+ table->next = ffi_trampoline_tables;
+ if (table->next != NULL)
+ table->next->prev = table;
+
+ ffi_trampoline_tables = table;
+ }
+
+ /* Claim the free entry */
+ ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list;
+ ffi_trampoline_tables->free_list = entry->next;
+ ffi_trampoline_tables->free_count--;
+ entry->next = NULL;
+
+ pthread_mutex_unlock (&ffi_trampoline_lock);
+
+ /* Initialize the return values */
+ *code = entry->trampoline;
+ closure->trampoline_table = table;
+ closure->trampoline_table_entry = entry;
+
+ return closure;
+}
+
+void
+ffi_closure_free (void *ptr)
+{
+ ffi_closure *closure = ptr;
+
+ pthread_mutex_lock (&ffi_trampoline_lock);
+
+ /* Fetch the table and entry references */
+ ffi_trampoline_table *table = closure->trampoline_table;
+ ffi_trampoline_table_entry *entry = closure->trampoline_table_entry;
+
+ /* Return the entry to the free list */
+ entry->next = table->free_list;
+ table->free_list = entry;
+ table->free_count++;
+
+ /* If all trampolines within this table are free, and at least one other table exists, deallocate
+ * the table */
+ if (table->free_count == FFI_TRAMPOLINE_COUNT
+ && ffi_trampoline_tables != table)
+ {
+ /* Remove from the list */
+ if (table->prev != NULL)
+ table->prev->next = table->next;
+
+ if (table->next != NULL)
+ table->next->prev = table->prev;
+
+ /* Deallocate pages */
+ kern_return_t kt;
+ kt = vm_deallocate (mach_task_self (), table->config_page, PAGE_SIZE);
+ if (kt != KERN_SUCCESS)
+ fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+
+ kt =
+ vm_deallocate (mach_task_self (), table->trampoline_page, PAGE_SIZE);
+ if (kt != KERN_SUCCESS)
+ fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+
+ /* Deallocate free list */
+ free (table->free_list_pool);
+ free (table);
+ }
+ else if (ffi_trampoline_tables != table)
+ {
+ /* Otherwise, bump this table to the top of the list */
+ table->prev = NULL;
+ table->next = ffi_trampoline_tables;
+ if (ffi_trampoline_tables != NULL)
+ ffi_trampoline_tables->prev = table;
+
+ ffi_trampoline_tables = table;
+ }
+
+ pthread_mutex_unlock (&ffi_trampoline_lock);
+
+ /* Free the closure */
+ free (closure);
+}
+
+#endif
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure *closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void *codeloc)
+{
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
+
+ void (*start)(void);
+
+ if (cif->flags & AARCH64_FLAG_ARG_V)
+ start = ffi_closure_SYSV_V;
+ else
+ start = ffi_closure_SYSV;
+
+#if FFI_EXEC_TRAMPOLINE_TABLE
+ void **config = FFI_TRAMPOLINE_CODELOC_CONFIG (codeloc);
+ config[0] = closure;
+ config[1] = start;
+#else
+ static const unsigned char trampoline[16] = {
+ 0x90, 0x00, 0x00, 0x58, /* ldr x16, tramp+16 */
+ 0xf1, 0xff, 0xff, 0x10, /* adr x17, tramp+0 */
+ 0x00, 0x02, 0x1f, 0xd6 /* br x16 */
+ };
+ char *tramp = closure->tramp;
+
+ memcpy (tramp, trampoline, sizeof(trampoline));
+
+ *(UINT64 *)(tramp + 16) = (uintptr_t)start;
+
+ ffi_clear_cache(tramp, tramp + FFI_TRAMPOLINE_SIZE);
+#endif
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ return FFI_OK;
+}
+
+#ifdef FFI_GO_CLOSURES
+extern void ffi_go_closure_SYSV (void) FFI_HIDDEN;
+extern void ffi_go_closure_SYSV_V (void) FFI_HIDDEN;
+
+ffi_status
+ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*))
+{
+ void (*start)(void);
+
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
+
+ if (cif->flags & AARCH64_FLAG_ARG_V)
+ start = ffi_go_closure_SYSV_V;
+ else
+ start = ffi_go_closure_SYSV;
+
+ closure->tramp = start;
+ closure->cif = cif;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+#endif /* FFI_GO_CLOSURES */
+
+/* Primary handler to setup and invoke a function within a closure.
+
+ A closure when invoked enters via the assembler wrapper
+ ffi_closure_SYSV(). The wrapper allocates a call context on the
+ stack, saves the interesting registers (from the perspective of
+ the calling convention) into the context then passes control to
+ ffi_closure_SYSV_inner() passing the saved context and a pointer to
+ the stack at the point ffi_closure_SYSV() was invoked.
+
+ On the return path the assembler wrapper will reload call context
+ registers.
+
+ ffi_closure_SYSV_inner() marshalls the call context into ffi value
+ descriptors, invokes the wrapped function, then marshalls the return
+ value back into the call context. */
+
+int FFI_HIDDEN
+ffi_closure_SYSV_inner (ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ struct call_context *context,
+ void *stack, void *rvalue, void *struct_rvalue)
+{
+ void **avalue = (void**) alloca (cif->nargs * sizeof (void*));
+ int i, h, nargs, flags;
+ struct arg_state state;
+
+ arg_init (&state);
+
+ for (i = 0, nargs = cif->nargs; i < nargs; i++)
+ {
+ ffi_type *ty = cif->arg_types[i];
+ int t = ty->type;
+ size_t n, s = ty->size;
+
+ switch (t)
+ {
+ case FFI_TYPE_VOID:
+ FFI_ASSERT (0);
+ break;
+
+ case FFI_TYPE_INT:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_POINTER:
+ avalue[i] = allocate_int_to_reg_or_stack (context, &state, stack, s);
+ break;
+
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_STRUCT:
+ case FFI_TYPE_COMPLEX:
+ h = is_vfp_type (ty);
+ if (h)
+ {
+ n = 4 - (h & 3);
+ if (state.nsrn + n <= N_V_ARG_REG)
+ {
+ void *reg = &context->v[state.nsrn];
+ state.nsrn += n;
+
+ /* Eeek! We need a pointer to the structure, however the
+ homogeneous float elements are being passed in individual
+ registers, therefore for float and double the structure
+ is not represented as a contiguous sequence of bytes in
+ our saved register context. We don't need the original
+ contents of the register storage, so we reformat the
+ structure into the same memory. */
+ avalue[i] = compress_hfa_type (reg, reg, h);
+ }
+ else
+ {
+ state.nsrn = N_V_ARG_REG;
+ avalue[i] = allocate_to_stack (&state, stack,
+ ty->alignment, s);
+ }
+ }
+ else if (s > 16)
+ {
+ /* Replace Composite type of size greater than 16 with a
+ pointer. */
+ avalue[i] = *(void **)
+ allocate_int_to_reg_or_stack (context, &state, stack,
+ sizeof (void *));
+ }
+ else
+ {
+ n = (s + 7) / 8;
+ if (state.ngrn + n <= N_X_ARG_REG)
+ {
+ avalue[i] = &context->x[state.ngrn];
+ state.ngrn += n;
+ }
+ else
+ {
+ state.ngrn = N_X_ARG_REG;
+ avalue[i] = allocate_to_stack (&state, stack,
+ ty->alignment, s);
+ }
+ }
+ break;
+
+ default:
+ abort();
+ }
+ }
+
+ flags = cif->flags;
+ if (flags & AARCH64_RET_IN_MEM)
+ rvalue = struct_rvalue;
+
+ fun (cif, rvalue, avalue, user_data);
+
+ return flags;
+}
diff -Nru orig/Modules/_ctypes/libffi_ios/aarch64/ffitarget.h modified/Modules/_ctypes/libffi_ios/aarch64/ffitarget.h
--- orig/Modules/_ctypes/libffi_ios/aarch64/ffitarget.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/aarch64/ffitarget.h 2015-03-12 21:33:06.000000000 +0800
@@ -0,0 +1,73 @@
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+#ifdef __ILP32__
+#define FFI_SIZEOF_ARG 8
+typedef unsigned long long ffi_arg;
+typedef signed long long ffi_sarg;
+#else
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+#endif
+
+typedef enum ffi_abi
+ {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+ } ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#if defined (__APPLE__)
+#define FFI_TRAMPOLINE_SIZE 20
+#define FFI_TRAMPOLINE_CLOSURE_OFFSET 16
+#else
+#define FFI_TRAMPOLINE_SIZE 24
+#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE
+#endif
+#define FFI_NATIVE_RAW_API 0
+
+/* ---- Internal ---- */
+
+#if defined (__APPLE__)
+#define FFI_TARGET_SPECIFIC_VARIADIC
+#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_nfixedargs
+#else
+/* iOS reserves x18 for the system. Disable Go closures until
+ a new static chain is chosen. */
+#define FFI_GO_CLOSURES 1
+#endif
+
+#define FFI_TARGET_HAS_COMPLEX_TYPE
+
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/aarch64/internal.h modified/Modules/_ctypes/libffi_ios/aarch64/internal.h
--- orig/Modules/_ctypes/libffi_ios/aarch64/internal.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/aarch64/internal.h 2015-03-12 21:33:06.000000000 +0800
@@ -0,0 +1,67 @@
+/*
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#define AARCH64_RET_VOID 0
+#define AARCH64_RET_INT64 1
+#define AARCH64_RET_INT128 2
+
+#define AARCH64_RET_UNUSED3 3
+#define AARCH64_RET_UNUSED4 4
+#define AARCH64_RET_UNUSED5 5
+#define AARCH64_RET_UNUSED6 6
+#define AARCH64_RET_UNUSED7 7
+
+/* Note that FFI_TYPE_FLOAT == 2, _DOUBLE == 3, _LONGDOUBLE == 4,
+ so _S4 through _Q1 are layed out as (TYPE * 4) + (4 - COUNT). */
+#define AARCH64_RET_S4 8
+#define AARCH64_RET_S3 9
+#define AARCH64_RET_S2 10
+#define AARCH64_RET_S1 11
+
+#define AARCH64_RET_D4 12
+#define AARCH64_RET_D3 13
+#define AARCH64_RET_D2 14
+#define AARCH64_RET_D1 15
+
+#define AARCH64_RET_Q4 16
+#define AARCH64_RET_Q3 17
+#define AARCH64_RET_Q2 18
+#define AARCH64_RET_Q1 19
+
+/* Note that each of the sub-64-bit integers gets two entries. */
+#define AARCH64_RET_UINT8 20
+#define AARCH64_RET_UINT16 22
+#define AARCH64_RET_UINT32 24
+
+#define AARCH64_RET_SINT8 26
+#define AARCH64_RET_SINT16 28
+#define AARCH64_RET_SINT32 30
+
+#define AARCH64_RET_MASK 31
+
+#define AARCH64_RET_IN_MEM (1 << 5)
+#define AARCH64_RET_NEED_COPY (1 << 6)
+
+#define AARCH64_FLAG_ARG_V_BIT 7
+#define AARCH64_FLAG_ARG_V (1 << AARCH64_FLAG_ARG_V_BIT)
+
+#define N_X_ARG_REG 8
+#define N_V_ARG_REG 8
+#define CALL_CONTEXT_SIZE (N_V_ARG_REG * 16 + N_X_ARG_REG * 8)
diff -Nru orig/Modules/_ctypes/libffi_ios/aarch64/sysv.S modified/Modules/_ctypes/libffi_ios/aarch64/sysv.S
--- orig/Modules/_ctypes/libffi_ios/aarch64/sysv.S 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/aarch64/sysv.S 2015-03-12 21:33:06.000000000 +0800
@@ -0,0 +1,434 @@
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#define LIBFFI_ASM
+#include
+#include
+#include
+#include "internal.h"
+
+#ifdef HAVE_MACHINE_ASM_H
+#include
+#else
+#ifdef __USER_LABEL_PREFIX__
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+#else
+#define CNAME(x) x
+#endif
+#endif
+
+#ifdef __AARCH64EB__
+# define BE(X) X
+#else
+# define BE(X) 0
+#endif
+
+#ifdef __ILP32__
+#define PTR_REG(n) w##n
+#else
+#define PTR_REG(n) x##n
+#endif
+
+#ifdef __ILP32__
+#define PTR_SIZE 4
+#else
+#define PTR_SIZE 8
+#endif
+
+ .text
+ .align 4
+
+/* ffi_call_SYSV
+ extern void ffi_call_SYSV (void *stack, void *frame,
+ void (*fn)(void), void *rvalue,
+ int flags, void *closure);
+
+ Therefore on entry we have:
+
+ x0 stack
+ x1 frame
+ x2 fn
+ x3 rvalue
+ x4 flags
+ x5 closure
+*/
+
+ cfi_startproc
+CNAME(ffi_call_SYSV):
+ /* Use a stack frame allocated by our caller. */
+ cfi_def_cfa(x1, 32);
+ stp x29, x30, [x1]
+ mov x29, x1
+ mov sp, x0
+ cfi_def_cfa_register(x29)
+ cfi_rel_offset (x29, 0)
+ cfi_rel_offset (x30, 8)
+
+ mov x9, x2 /* save fn */
+ mov x8, x3 /* install structure return */
+#ifdef FFI_GO_CLOSURES
+ mov x18, x5 /* install static chain */
+#endif
+ stp x3, x4, [x29, #16] /* save rvalue and flags */
+
+ /* Load the vector argument passing registers, if necessary. */
+ tbz w4, #AARCH64_FLAG_ARG_V_BIT, 1f
+ ldp q0, q1, [sp, #0]
+ ldp q2, q3, [sp, #32]
+ ldp q4, q5, [sp, #64]
+ ldp q6, q7, [sp, #96]
+1:
+ /* Load the core argument passing registers, including
+ the structure return pointer. */
+ ldp x0, x1, [sp, #16*N_V_ARG_REG + 0]
+ ldp x2, x3, [sp, #16*N_V_ARG_REG + 16]
+ ldp x4, x5, [sp, #16*N_V_ARG_REG + 32]
+ ldp x6, x7, [sp, #16*N_V_ARG_REG + 48]
+
+ /* Deallocate the context, leaving the stacked arguments. */
+ add sp, sp, #CALL_CONTEXT_SIZE
+
+ blr x9 /* call fn */
+
+ ldp x3, x4, [x29, #16] /* reload rvalue and flags */
+
+ /* Partially deconstruct the stack frame. */
+ mov sp, x29
+ cfi_def_cfa_register (sp)
+ ldp x29, x30, [x29]
+
+ /* Save the return value as directed. */
+ adr x5, 0f
+ and w4, w4, #AARCH64_RET_MASK
+ add x5, x5, x4, lsl #3
+ br x5
+
+ /* Note that each table entry is 2 insns, and thus 8 bytes.
+ For integer data, note that we're storing into ffi_arg
+ and therefore we want to extend to 64 bits; these types
+ have two consecutive entries allocated for them. */
+ .align 4
+0: ret /* VOID */
+ nop
+1: str x0, [x3] /* INT64 */
+ ret
+2: stp x0, x1, [x3] /* INT128 */
+ ret
+3: brk #1000 /* UNUSED */
+ ret
+4: brk #1000 /* UNUSED */
+ ret
+5: brk #1000 /* UNUSED */
+ ret
+6: brk #1000 /* UNUSED */
+ ret
+7: brk #1000 /* UNUSED */
+ ret
+8: st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */
+ ret
+9: st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */
+ ret
+10: stp s0, s1, [x3] /* S2 */
+ ret
+11: str s0, [x3] /* S1 */
+ ret
+12: st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */
+ ret
+13: st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */
+ ret
+14: stp d0, d1, [x3] /* D2 */
+ ret
+15: str d0, [x3] /* D1 */
+ ret
+16: str q3, [x3, #48] /* Q4 */
+ nop
+17: str q2, [x3, #32] /* Q3 */
+ nop
+18: stp q0, q1, [x3] /* Q2 */
+ ret
+19: str q0, [x3] /* Q1 */
+ ret
+20: uxtb w0, w0 /* UINT8 */
+ str x0, [x3]
+21: ret /* reserved */
+ nop
+22: uxth w0, w0 /* UINT16 */
+ str x0, [x3]
+23: ret /* reserved */
+ nop
+24: mov w0, w0 /* UINT32 */
+ str x0, [x3]
+25: ret /* reserved */
+ nop
+26: sxtb x0, w0 /* SINT8 */
+ str x0, [x3]
+27: ret /* reserved */
+ nop
+28: sxth x0, w0 /* SINT16 */
+ str x0, [x3]
+29: ret /* reserved */
+ nop
+30: sxtw x0, w0 /* SINT32 */
+ str x0, [x3]
+31: ret /* reserved */
+ nop
+
+ cfi_endproc
+
+ .globl CNAME(ffi_call_SYSV)
+#ifdef __ELF__
+ .type CNAME(ffi_call_SYSV), #function
+ .hidden CNAME(ffi_call_SYSV)
+ .size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV)
+#endif
+
+/* ffi_closure_SYSV
+
+ Closure invocation glue. This is the low level code invoked directly by
+ the closure trampoline to setup and call a closure.
+
+ On entry x17 points to a struct ffi_closure, x16 has been clobbered
+ all other registers are preserved.
+
+ We allocate a call context and save the argument passing registers,
+ then invoked the generic C ffi_closure_SYSV_inner() function to do all
+ the real work, on return we load the result passing registers back from
+ the call context.
+*/
+
+#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64)
+
+ .align 4
+CNAME(ffi_closure_SYSV_V):
+ cfi_startproc
+ stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
+ cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
+ cfi_rel_offset (x29, 0)
+ cfi_rel_offset (x30, 8)
+
+ /* Save the argument passing vector registers. */
+ stp q0, q1, [sp, #16 + 0]
+ stp q2, q3, [sp, #16 + 32]
+ stp q4, q5, [sp, #16 + 64]
+ stp q6, q7, [sp, #16 + 96]
+ b 0f
+ cfi_endproc
+
+ .globl CNAME(ffi_closure_SYSV_V)
+#ifdef __ELF__
+ .type CNAME(ffi_closure_SYSV_V), #function
+ .hidden CNAME(ffi_closure_SYSV_V)
+ .size CNAME(ffi_closure_SYSV_V), . - CNAME(ffi_closure_SYSV_V)
+#endif
+
+ .align 4
+ cfi_startproc
+CNAME(ffi_closure_SYSV):
+ stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
+ cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
+ cfi_rel_offset (x29, 0)
+ cfi_rel_offset (x30, 8)
+0:
+ mov x29, sp
+
+ /* Save the argument passing core registers. */
+ stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
+ stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
+ stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
+ stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
+
+ /* Load ffi_closure_inner arguments. */
+ ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */
+ ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */
+.Ldo_closure:
+ add x3, sp, #16 /* load context */
+ add x4, sp, #ffi_closure_SYSV_FS /* load stack */
+ add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */
+ mov x6, x8 /* load struct_rval */
+ bl CNAME(ffi_closure_SYSV_inner)
+
+ /* Load the return value as directed. */
+ adr x1, 0f
+ and w0, w0, #AARCH64_RET_MASK
+ add x1, x1, x0, lsl #3
+ add x3, sp, #16+CALL_CONTEXT_SIZE
+ br x1
+
+ /* Note that each table entry is 2 insns, and thus 8 bytes. */
+ .align 4
+0: b 99f /* VOID */
+ nop
+1: ldr x0, [x3] /* INT64 */
+ b 99f
+2: ldp x0, x1, [x3] /* INT128 */
+ b 99f
+3: brk #1000 /* UNUSED */
+ nop
+4: brk #1000 /* UNUSED */
+ nop
+5: brk #1000 /* UNUSED */
+ nop
+6: brk #1000 /* UNUSED */
+ nop
+7: brk #1000 /* UNUSED */
+ nop
+8: ldr s3, [x3, #12] /* S4 */
+ nop
+9: ldr s2, [x2, #8] /* S3 */
+ nop
+10: ldp s0, s1, [x3] /* S2 */
+ b 99f
+11: ldr s0, [x3] /* S1 */
+ b 99f
+12: ldr d3, [x3, #24] /* D4 */
+ nop
+13: ldr d2, [x3, #16] /* D3 */
+ nop
+14: ldp d0, d1, [x3] /* D2 */
+ b 99f
+15: ldr d0, [x3] /* D1 */
+ b 99f
+16: ldr q3, [x3, #48] /* Q4 */
+ nop
+17: ldr q2, [x3, #32] /* Q3 */
+ nop
+18: ldp q0, q1, [x3] /* Q2 */
+ b 99f
+19: ldr q0, [x3] /* Q1 */
+ b 99f
+20: ldrb w0, [x3, #BE(7)] /* UINT8 */
+ b 99f
+21: brk #1000 /* reserved */
+ nop
+22: ldrh w0, [x3, #BE(6)] /* UINT16 */
+ b 99f
+23: brk #1000 /* reserved */
+ nop
+24: ldr w0, [x3, #BE(4)] /* UINT32 */
+ b 99f
+25: brk #1000 /* reserved */
+ nop
+26: ldrsb x0, [x3, #BE(7)] /* SINT8 */
+ b 99f
+27: brk #1000 /* reserved */
+ nop
+28: ldrsh x0, [x3, #BE(6)] /* SINT16 */
+ b 99f
+29: brk #1000 /* reserved */
+ nop
+30: ldrsw x0, [x3, #BE(4)] /* SINT32 */
+ nop
+31: /* reserved */
+99: ldp x29, x30, [sp], #ffi_closure_SYSV_FS
+ cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS)
+ cfi_restore (x29)
+ cfi_restore (x30)
+ ret
+ cfi_endproc
+
+ .globl CNAME(ffi_closure_SYSV)
+#ifdef __ELF__
+ .type CNAME(ffi_closure_SYSV), #function
+ .hidden CNAME(ffi_closure_SYSV)
+ .size CNAME(ffi_closure_SYSV), . - CNAME(ffi_closure_SYSV)
+#endif
+
+#if FFI_EXEC_TRAMPOLINE_TABLE
+ .align 12
+CNAME(ffi_closure_trampoline_table_page):
+ .rept 16384 / FFI_TRAMPOLINE_SIZE
+ adr x17, -16384
+ adr x16, -16380
+ ldr x16, [x16]
+ ldr x17, [x17]
+ br x16
+ .endr
+
+ .globl CNAME(ffi_closure_trampoline_table_page)
+ #ifdef __ELF__
+ .type CNAME(ffi_closure_trampoline_table_page), #function
+ .hidden CNAME(ffi_closure_trampoline_table_page)
+ .size CNAME(ffi_closure_trampoline_table_page), . - CNAME(ffi_closure_trampoline_table_page)
+ #endif
+#endif
+
+#ifdef FFI_GO_CLOSURES
+ .align 4
+CNAME(ffi_go_closure_SYSV_V):
+ cfi_startproc
+ stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
+ cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
+ cfi_rel_offset (x29, 0)
+ cfi_rel_offset (x30, 8)
+
+ /* Save the argument passing vector registers. */
+ stp q0, q1, [sp, #16 + 0]
+ stp q2, q3, [sp, #16 + 32]
+ stp q4, q5, [sp, #16 + 64]
+ stp q6, q7, [sp, #16 + 96]
+ b 0f
+ cfi_endproc
+
+ .globl CNAME(ffi_go_closure_SYSV_V)
+#ifdef __ELF__
+ .type CNAME(ffi_go_closure_SYSV_V), #function
+ .hidden CNAME(ffi_go_closure_SYSV_V)
+ .size CNAME(ffi_go_closure_SYSV_V), . - CNAME(ffi_go_closure_SYSV_V)
+#endif
+
+ .align 4
+ cfi_startproc
+CNAME(ffi_go_closure_SYSV):
+ stp x29, x30, [sp, #-ffi_closure_SYSV_FS]!
+ cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
+ cfi_rel_offset (x29, 0)
+ cfi_rel_offset (x30, 8)
+0:
+ mov x29, sp
+
+ /* Save the argument passing core registers. */
+ stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
+ stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
+ stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
+ stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
+
+ /* Load ffi_closure_inner arguments. */
+ ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */
+ mov x2, x18 /* load user_data */
+ b .Ldo_closure
+ cfi_endproc
+
+ .globl CNAME(ffi_go_closure_SYSV)
+#ifdef __ELF__
+ .type CNAME(ffi_go_closure_SYSV), #function
+ .hidden CNAME(ffi_go_closure_SYSV)
+ .size CNAME(ffi_go_closure_SYSV), . - CNAME(ffi_go_closure_SYSV)
+#endif
+#endif /* FFI_GO_CLOSURES */
+
+#if defined __ELF__ && defined __linux__
+ .section .note.GNU-stack,"",%progbits
+#endif
+
diff -Nru orig/Modules/_ctypes/libffi_ios/arm/ffi.c modified/Modules/_ctypes/libffi_ios/arm/ffi.c
--- orig/Modules/_ctypes/libffi_ios/arm/ffi.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/arm/ffi.c 2015-03-12 21:33:17.000000000 +0800
@@ -0,0 +1,1043 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2011 Timothy Wall
+ Copyright (c) 2011 Plausible Labs Cooperative, Inc.
+ Copyright (c) 2011 Anthony Green
+ Copyright (c) 2011 Free Software Foundation
+ Copyright (c) 1998, 2008, 2011 Red Hat, Inc.
+
+ ARM Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include
+#include
+#include
+#include "internal.h"
+
+/* Forward declares. */
+static int vfp_type_p (const ffi_type *);
+static void layout_vfp_args (ffi_cif *);
+
+static void *
+ffi_align (ffi_type *ty, void *p)
+{
+ /* Align if necessary */
+ size_t alignment;
+#ifdef _WIN32_WCE
+ alignment = 4;
+#else
+ alignment = ty->alignment;
+ if (alignment < 4)
+ alignment = 4;
+#endif
+ return (void *) ALIGN (p, alignment);
+}
+
+static size_t
+ffi_put_arg (ffi_type *ty, void *src, void *dst)
+{
+ size_t z = ty->size;
+
+ switch (ty->type)
+ {
+ case FFI_TYPE_SINT8:
+ *(UINT32 *)dst = *(SINT8 *)src;
+ break;
+ case FFI_TYPE_UINT8:
+ *(UINT32 *)dst = *(UINT8 *)src;
+ break;
+ case FFI_TYPE_SINT16:
+ *(UINT32 *)dst = *(SINT16 *)src;
+ break;
+ case FFI_TYPE_UINT16:
+ *(UINT32 *)dst = *(UINT16 *)src;
+ break;
+
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_FLOAT:
+ *(UINT32 *)dst = *(UINT32 *)src;
+ break;
+
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_DOUBLE:
+ *(UINT64 *)dst = *(UINT64 *)src;
+ break;
+
+ case FFI_TYPE_STRUCT:
+ case FFI_TYPE_COMPLEX:
+ memcpy (dst, src, z);
+ break;
+
+ default:
+ abort();
+ }
+
+ return ALIGN (z, 4);
+}
+
+/* ffi_prep_args is called once stack space has been allocated
+ for the function's arguments.
+
+ The vfp_space parameter is the load area for VFP regs, the return
+ value is cif->vfp_used (word bitset of VFP regs used for passing
+ arguments). These are only used for the VFP hard-float ABI.
+*/
+static void
+ffi_prep_args_SYSV (ffi_cif *cif, int flags, void *rvalue,
+ void **avalue, char *argp)
+{
+ ffi_type **arg_types = cif->arg_types;
+ int i, n;
+
+ if (flags == ARM_TYPE_STRUCT)
+ {
+ *(void **) argp = rvalue;
+ argp += 4;
+ }
+
+ for (i = 0, n = cif->nargs; i < n; i++)
+ {
+ ffi_type *ty = arg_types[i];
+ argp = ffi_align (ty, argp);
+ argp += ffi_put_arg (ty, avalue[i], argp);
+ }
+}
+
+static void
+ffi_prep_args_VFP (ffi_cif *cif, int flags, void *rvalue,
+ void **avalue, char *stack, char *vfp_space)
+{
+ ffi_type **arg_types = cif->arg_types;
+ int i, n, vi = 0;
+ char *argp, *regp, *eo_regp;
+ char stack_used = 0;
+ char done_with_regs = 0;
+
+ /* The first 4 words on the stack are used for values
+ passed in core registers. */
+ regp = stack;
+ eo_regp = argp = regp + 16;
+
+ /* If the function returns an FFI_TYPE_STRUCT in memory,
+ that address is passed in r0 to the function. */
+ if (flags == ARM_TYPE_STRUCT)
+ {
+ *(void **) regp = rvalue;
+ regp += 4;
+ }
+
+ for (i = 0, n = cif->nargs; i < n; i++)
+ {
+ ffi_type *ty = arg_types[i];
+ void *a = avalue[i];
+ int is_vfp_type = vfp_type_p (ty);
+
+ /* Allocated in VFP registers. */
+ if (vi < cif->vfp_nargs && is_vfp_type)
+ {
+ char *vfp_slot = vfp_space + cif->vfp_args[vi++] * 4;
+ ffi_put_arg (ty, a, vfp_slot);
+ continue;
+ }
+ /* Try allocating in core registers. */
+ else if (!done_with_regs && !is_vfp_type)
+ {
+ char *tregp = ffi_align (ty, regp);
+ size_t size = ty->size;
+ size = (size < 4) ? 4 : size; // pad
+ /* Check if there is space left in the aligned register
+ area to place the argument. */
+ if (tregp + size <= eo_regp)
+ {
+ regp = tregp + ffi_put_arg (ty, a, tregp);
+ done_with_regs = (regp == argp);
+ // ensure we did not write into the stack area
+ FFI_ASSERT (regp <= argp);
+ continue;
+ }
+ /* In case there are no arguments in the stack area yet,
+ the argument is passed in the remaining core registers
+ and on the stack. */
+ else if (!stack_used)
+ {
+ stack_used = 1;
+ done_with_regs = 1;
+ argp = tregp + ffi_put_arg (ty, a, tregp);
+ FFI_ASSERT (eo_regp < argp);
+ continue;
+ }
+ }
+ /* Base case, arguments are passed on the stack */
+ stack_used = 1;
+ argp = ffi_align (ty, argp);
+ argp += ffi_put_arg (ty, a, argp);
+ }
+}
+
+/* Perform machine dependent cif processing */
+ffi_status
+ffi_prep_cif_machdep (ffi_cif *cif)
+{
+ int flags = 0, cabi = cif->abi;
+ size_t bytes = cif->bytes;
+
+ /* Map out the register placements of VFP register args. The VFP
+ hard-float calling conventions are slightly more sophisticated
+ than the base calling conventions, so we do it here instead of
+ in ffi_prep_args(). */
+ if (cabi == FFI_VFP)
+ layout_vfp_args (cif);
+
+ /* Set the return type flag */
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_VOID:
+ flags = ARM_TYPE_VOID;
+ break;
+
+ case FFI_TYPE_INT:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_POINTER:
+ flags = ARM_TYPE_INT;
+ break;
+
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ flags = ARM_TYPE_INT64;
+ break;
+
+ case FFI_TYPE_FLOAT:
+ flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_S : ARM_TYPE_INT);
+ break;
+ case FFI_TYPE_DOUBLE:
+ flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_D : ARM_TYPE_INT64);
+ break;
+
+ case FFI_TYPE_STRUCT:
+ case FFI_TYPE_COMPLEX:
+ if (cabi == FFI_VFP)
+ {
+ int h = vfp_type_p (cif->rtype);
+
+ flags = ARM_TYPE_VFP_N;
+ if (h == 0x100 + FFI_TYPE_FLOAT)
+ flags = ARM_TYPE_VFP_S;
+ if (h == 0x100 + FFI_TYPE_DOUBLE)
+ flags = ARM_TYPE_VFP_D;
+ if (h != 0)
+ break;
+ }
+
+ /* A Composite Type not larger than 4 bytes is returned in r0.
+ A Composite Type larger than 4 bytes, or whose size cannot
+ be determined statically ... is stored in memory at an
+ address passed [in r0]. */
+ if (cif->rtype->size <= 4)
+ flags = ARM_TYPE_INT;
+ else
+ {
+ flags = ARM_TYPE_STRUCT;
+ bytes += 4;
+ }
+ break;
+
+ default:
+ abort();
+ }
+
+ /* Round the stack up to a multiple of 8 bytes. This isn't needed
+ everywhere, but it is on some platforms, and it doesn't harm anything
+ when it isn't needed. */
+ bytes = ALIGN (bytes, 8);
+
+ /* Minimum stack space is the 4 register arguments that we pop. */
+ if (bytes < 4*4)
+ bytes = 4*4;
+
+ cif->bytes = bytes;
+ cif->flags = flags;
+
+ return FFI_OK;
+}
+
+/* Perform machine dependent cif processing for variadic calls */
+ffi_status
+ffi_prep_cif_machdep_var (ffi_cif * cif,
+ unsigned int nfixedargs, unsigned int ntotalargs)
+{
+ /* VFP variadic calls actually use the SYSV ABI */
+ if (cif->abi == FFI_VFP)
+ cif->abi = FFI_SYSV;
+
+ return ffi_prep_cif_machdep (cif);
+}
+
+/* Prototypes for assembly functions, in sysv.S. */
+
+struct call_frame
+{
+ void *fp;
+ void *lr;
+ void *rvalue;
+ int flags;
+ void *closure;
+};
+
+extern void ffi_call_SYSV (void *stack, struct call_frame *,
+ void (*fn) (void)) FFI_HIDDEN;
+extern void ffi_call_VFP (void *vfp_space, struct call_frame *,
+ void (*fn) (void), unsigned vfp_used) FFI_HIDDEN;
+
+static void
+ffi_call_int (ffi_cif * cif, void (*fn) (void), void *rvalue,
+ void **avalue, void *closure)
+{
+ int flags = cif->flags;
+ ffi_type *rtype = cif->rtype;
+ size_t bytes, rsize, vfp_size;
+ char *stack, *vfp_space, *new_rvalue;
+ struct call_frame *frame;
+
+ rsize = 0;
+ if (rvalue == NULL)
+ {
+ /* If the return value is a struct and we don't have a return
+ value address then we need to make one. Otherwise the return
+ value is in registers and we can ignore them. */
+ if (flags == ARM_TYPE_STRUCT)
+ rsize = rtype->size;
+ else
+ flags = ARM_TYPE_VOID;
+ }
+ else if (flags == ARM_TYPE_VFP_N)
+ {
+ /* Largest case is double x 4. */
+ rsize = 32;
+ }
+ else if (flags == ARM_TYPE_INT && rtype->type == FFI_TYPE_STRUCT)
+ rsize = 4;
+
+ /* Largest case. */
+ vfp_size = (cif->abi == FFI_VFP && cif->vfp_used ? 8*8: 0);
+
+ bytes = cif->bytes;
+ stack = alloca (vfp_size + bytes + sizeof(struct call_frame) + rsize);
+
+ vfp_space = NULL;
+ if (vfp_size)
+ {
+ vfp_space = stack;
+ stack += vfp_size;
+ }
+
+ frame = (struct call_frame *)(stack + bytes);
+
+ new_rvalue = rvalue;
+ if (rsize)
+ new_rvalue = (void *)(frame + 1);
+
+ frame->rvalue = new_rvalue;
+ frame->flags = flags;
+ frame->closure = closure;
+
+ if (vfp_space)
+ {
+ ffi_prep_args_VFP (cif, flags, new_rvalue, avalue, stack, vfp_space);
+ ffi_call_VFP (vfp_space, frame, fn, cif->vfp_used);
+ }
+ else
+ {
+ ffi_prep_args_SYSV (cif, flags, new_rvalue, avalue, stack);
+ ffi_call_SYSV (stack, frame, fn);
+ }
+
+ if (rvalue && rvalue != new_rvalue)
+ memcpy (rvalue, new_rvalue, rtype->size);
+}
+
+void
+ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, NULL);
+}
+
+void
+ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue,
+ void **avalue, void *closure)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, closure);
+}
+
+static void *
+ffi_prep_incoming_args_SYSV (ffi_cif *cif, void *rvalue,
+ char *argp, void **avalue)
+{
+ ffi_type **arg_types = cif->arg_types;
+ int i, n;
+
+ if (cif->flags == ARM_TYPE_STRUCT)
+ {
+ rvalue = *(void **) argp;
+ argp += 4;
+ }
+
+ for (i = 0, n = cif->nargs; i < n; i++)
+ {
+ ffi_type *ty = arg_types[i];
+ size_t z = ty->size;
+
+ argp = ffi_align (ty, argp);
+ avalue[i] = (void *) argp;
+ argp += z;
+ }
+
+ return rvalue;
+}
+
+static void *
+ffi_prep_incoming_args_VFP (ffi_cif *cif, void *rvalue, char *stack,
+ char *vfp_space, void **avalue)
+{
+ ffi_type **arg_types = cif->arg_types;
+ int i, n, vi = 0;
+ char *argp, *regp, *eo_regp;
+ char done_with_regs = 0;
+ char stack_used = 0;
+
+ regp = stack;
+ eo_regp = argp = regp + 16;
+
+ if (cif->flags == ARM_TYPE_STRUCT)
+ {
+ rvalue = *(void **) regp;
+ regp += 4;
+ }
+
+ for (i = 0, n = cif->nargs; i < n; i++)
+ {
+ ffi_type *ty = arg_types[i];
+ int is_vfp_type = vfp_type_p (ty);
+ size_t z = ty->size;
+
+ if (vi < cif->vfp_nargs && is_vfp_type)
+ {
+ avalue[i] = vfp_space + cif->vfp_args[vi++] * 4;
+ continue;
+ }
+ else if (!done_with_regs && !is_vfp_type)
+ {
+ char *tregp = ffi_align (ty, regp);
+
+ z = (z < 4) ? 4 : z; // pad
+
+ /* If the arguments either fits into the registers or uses registers
+ and stack, while we haven't read other things from the stack */
+ if (tregp + z <= eo_regp || !stack_used)
+ {
+ /* Because we're little endian, this is what it turns into. */
+ avalue[i] = (void *) tregp;
+ regp = tregp + z;
+
+ /* If we read past the last core register, make sure we
+ have not read from the stack before and continue
+ reading after regp. */
+ if (regp > eo_regp)
+ {
+ FFI_ASSERT (!stack_used);
+ argp = regp;
+ }
+ if (regp >= eo_regp)
+ {
+ done_with_regs = 1;
+ stack_used = 1;
+ }
+ continue;
+ }
+ }
+
+ stack_used = 1;
+ argp = ffi_align (ty, argp);
+ avalue[i] = (void *) argp;
+ argp += z;
+ }
+
+ return rvalue;
+}
+
+struct closure_frame
+{
+ char vfp_space[8*8] __attribute__((aligned(8)));
+ char result[8*4];
+ char argp[];
+};
+
+int FFI_HIDDEN
+ffi_closure_inner_SYSV (ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *),
+ void *user_data,
+ struct closure_frame *frame)
+{
+ void **avalue = (void **) alloca (cif->nargs * sizeof (void *));
+ void *rvalue = ffi_prep_incoming_args_SYSV (cif, frame->result,
+ frame->argp, avalue);
+ fun (cif, rvalue, avalue, user_data);
+ return cif->flags;
+}
+
+int FFI_HIDDEN
+ffi_closure_inner_VFP (ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *),
+ void *user_data,
+ struct closure_frame *frame)
+{
+ void **avalue = (void **) alloca (cif->nargs * sizeof (void *));
+ void *rvalue = ffi_prep_incoming_args_VFP (cif, frame->result, frame->argp,
+ frame->vfp_space, avalue);
+ fun (cif, rvalue, avalue, user_data);
+ return cif->flags;
+}
+
+void ffi_closure_SYSV (void) FFI_HIDDEN;
+void ffi_closure_VFP (void) FFI_HIDDEN;
+void ffi_go_closure_SYSV (void) FFI_HIDDEN;
+void ffi_go_closure_VFP (void) FFI_HIDDEN;
+
+#if FFI_EXEC_TRAMPOLINE_TABLE
+
+#include
+#include
+#include
+#include
+
+extern void *ffi_closure_trampoline_table_page;
+
+typedef struct ffi_trampoline_table ffi_trampoline_table;
+typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry;
+
+struct ffi_trampoline_table
+{
+ /* contiguous writable and executable pages */
+ vm_address_t config_page;
+ vm_address_t trampoline_page;
+
+ /* free list tracking */
+ uint16_t free_count;
+ ffi_trampoline_table_entry *free_list;
+ ffi_trampoline_table_entry *free_list_pool;
+
+ ffi_trampoline_table *prev;
+ ffi_trampoline_table *next;
+};
+
+struct ffi_trampoline_table_entry
+{
+ void *(*trampoline) ();
+ ffi_trampoline_table_entry *next;
+};
+
+/* Override the standard architecture trampoline size */
+// XXX TODO - Fix
+#undef FFI_TRAMPOLINE_SIZE
+#define FFI_TRAMPOLINE_SIZE 12
+
+/* The trampoline configuration is placed at 4080 bytes prior to the trampoline's entry point */
+#define FFI_TRAMPOLINE_CODELOC_CONFIG(codeloc) ((void **) (((uint8_t *) codeloc) - 4080));
+
+/* The first 16 bytes of the config page are unused, as they are unaddressable from the trampoline page. */
+#define FFI_TRAMPOLINE_CONFIG_PAGE_OFFSET 16
+
+/* Total number of trampolines that fit in one trampoline table */
+#define FFI_TRAMPOLINE_COUNT ((PAGE_SIZE - FFI_TRAMPOLINE_CONFIG_PAGE_OFFSET) / FFI_TRAMPOLINE_SIZE)
+
+static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER;
+static ffi_trampoline_table *ffi_trampoline_tables = NULL;
+
+static ffi_trampoline_table *
+ffi_trampoline_table_alloc ()
+{
+ ffi_trampoline_table *table = NULL;
+
+ /* Loop until we can allocate two contiguous pages */
+ while (table == NULL)
+ {
+ vm_address_t config_page = 0x0;
+ kern_return_t kt;
+
+ /* Try to allocate two pages */
+ kt =
+ vm_allocate (mach_task_self (), &config_page, PAGE_SIZE * 2,
+ VM_FLAGS_ANYWHERE);
+ if (kt != KERN_SUCCESS)
+ {
+ fprintf (stderr, "vm_allocate() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+ break;
+ }
+
+ /* Now drop the second half of the allocation to make room for the trampoline table */
+ vm_address_t trampoline_page = config_page + PAGE_SIZE;
+ kt = vm_deallocate (mach_task_self (), trampoline_page, PAGE_SIZE);
+ if (kt != KERN_SUCCESS)
+ {
+ fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+ break;
+ }
+
+ /* Remap the trampoline table to directly follow the config page */
+ vm_prot_t cur_prot;
+ vm_prot_t max_prot;
+
+ kt =
+ vm_remap (mach_task_self (), &trampoline_page, PAGE_SIZE, 0x0, FALSE,
+ mach_task_self (),
+ (vm_address_t) & ffi_closure_trampoline_table_page, FALSE,
+ &cur_prot, &max_prot, VM_INHERIT_SHARE);
+
+ /* If we lost access to the destination trampoline page, drop our config allocation mapping and retry */
+ if (kt != KERN_SUCCESS)
+ {
+ /* Log unexpected failures */
+ if (kt != KERN_NO_SPACE)
+ {
+ fprintf (stderr, "vm_remap() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+ }
+
+ vm_deallocate (mach_task_self (), config_page, PAGE_SIZE);
+ continue;
+ }
+
+ /* We have valid trampoline and config pages */
+ table = calloc (1, sizeof (ffi_trampoline_table));
+ table->free_count = FFI_TRAMPOLINE_COUNT;
+ table->config_page = config_page;
+ table->trampoline_page = trampoline_page;
+
+ /* Create and initialize the free list */
+ table->free_list_pool =
+ calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));
+
+ uint16_t i;
+ for (i = 0; i < table->free_count; i++)
+ {
+ ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
+ entry->trampoline =
+ (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));
+
+ if (i < table->free_count - 1)
+ entry->next = &table->free_list_pool[i + 1];
+ }
+
+ table->free_list = table->free_list_pool;
+ }
+
+ return table;
+}
+
+void *
+ffi_closure_alloc (size_t size, void **code)
+{
+ /* Create the closure */
+ ffi_closure *closure = malloc (size);
+ if (closure == NULL)
+ return NULL;
+
+ pthread_mutex_lock (&ffi_trampoline_lock);
+
+ /* Check for an active trampoline table with available entries. */
+ ffi_trampoline_table *table = ffi_trampoline_tables;
+ if (table == NULL || table->free_list == NULL)
+ {
+ table = ffi_trampoline_table_alloc ();
+ if (table == NULL)
+ {
+ free (closure);
+ return NULL;
+ }
+
+ /* Insert the new table at the top of the list */
+ table->next = ffi_trampoline_tables;
+ if (table->next != NULL)
+ table->next->prev = table;
+
+ ffi_trampoline_tables = table;
+ }
+
+ /* Claim the free entry */
+ ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list;
+ ffi_trampoline_tables->free_list = entry->next;
+ ffi_trampoline_tables->free_count--;
+ entry->next = NULL;
+
+ pthread_mutex_unlock (&ffi_trampoline_lock);
+
+ /* Initialize the return values */
+ *code = entry->trampoline;
+ closure->trampoline_table = table;
+ closure->trampoline_table_entry = entry;
+
+ return closure;
+}
+
+void
+ffi_closure_free (void *ptr)
+{
+ ffi_closure *closure = ptr;
+
+ pthread_mutex_lock (&ffi_trampoline_lock);
+
+ /* Fetch the table and entry references */
+ ffi_trampoline_table *table = closure->trampoline_table;
+ ffi_trampoline_table_entry *entry = closure->trampoline_table_entry;
+
+ /* Return the entry to the free list */
+ entry->next = table->free_list;
+ table->free_list = entry;
+ table->free_count++;
+
+ /* If all trampolines within this table are free, and at least one other table exists, deallocate
+ * the table */
+ if (table->free_count == FFI_TRAMPOLINE_COUNT
+ && ffi_trampoline_tables != table)
+ {
+ /* Remove from the list */
+ if (table->prev != NULL)
+ table->prev->next = table->next;
+
+ if (table->next != NULL)
+ table->next->prev = table->prev;
+
+ /* Deallocate pages */
+ kern_return_t kt;
+ kt = vm_deallocate (mach_task_self (), table->config_page, PAGE_SIZE);
+ if (kt != KERN_SUCCESS)
+ fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+
+ kt =
+ vm_deallocate (mach_task_self (), table->trampoline_page, PAGE_SIZE);
+ if (kt != KERN_SUCCESS)
+ fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
+ __FILE__, __LINE__);
+
+ /* Deallocate free list */
+ free (table->free_list_pool);
+ free (table);
+ }
+ else if (ffi_trampoline_tables != table)
+ {
+ /* Otherwise, bump this table to the top of the list */
+ table->prev = NULL;
+ table->next = ffi_trampoline_tables;
+ if (ffi_trampoline_tables != NULL)
+ ffi_trampoline_tables->prev = table;
+
+ ffi_trampoline_tables = table;
+ }
+
+ pthread_mutex_unlock (&ffi_trampoline_lock);
+
+ /* Free the closure */
+ free (closure);
+}
+
+#else
+
+extern unsigned int ffi_arm_trampoline[2] FFI_HIDDEN;
+
+#endif
+
+/* the cif must already be prep'ed */
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure * closure,
+ ffi_cif * cif,
+ void (*fun) (ffi_cif *, void *, void **, void *),
+ void *user_data, void *codeloc)
+{
+ void (*closure_func) (void) = ffi_closure_SYSV;
+
+ if (cif->abi == FFI_VFP)
+ {
+ /* We only need take the vfp path if there are vfp arguments. */
+ if (cif->vfp_used)
+ closure_func = ffi_closure_VFP;
+ }
+ else if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
+
+#if FFI_EXEC_TRAMPOLINE_TABLE
+ void **config = FFI_TRAMPOLINE_CODELOC_CONFIG (codeloc);
+ config[0] = closure;
+ config[1] = closure_func;
+#else
+ memcpy (closure->tramp, ffi_arm_trampoline, 8);
+ __clear_cache(closure->tramp, closure->tramp + 8); /* clear data map */
+ __clear_cache(codeloc, codeloc + 8); /* clear insn map */
+ *(void (**)(void))(closure->tramp + 8) = closure_func;
+#endif
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ return FFI_OK;
+}
+
+ffi_status
+ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *))
+{
+ void (*closure_func) (void) = ffi_go_closure_SYSV;
+
+ if (cif->abi == FFI_VFP)
+ {
+ /* We only need take the vfp path if there are vfp arguments. */
+ if (cif->vfp_used)
+ closure_func = ffi_go_closure_VFP;
+ }
+ else if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
+
+ closure->tramp = closure_func;
+ closure->cif = cif;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+/* Below are routines for VFP hard-float support. */
+
+/* A subroutine of vfp_type_p. Given a structure type, return the type code
+ of the first non-structure element. Recurse for structure elements.
+ Return -1 if the structure is in fact empty, i.e. no nested elements. */
+
+static int
+is_hfa0 (const ffi_type *ty)
+{
+ ffi_type **elements = ty->elements;
+ int i, ret = -1;
+
+ if (elements != NULL)
+ for (i = 0; elements[i]; ++i)
+ {
+ ret = elements[i]->type;
+ if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX)
+ {
+ ret = is_hfa0 (elements[i]);
+ if (ret < 0)
+ continue;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+/* A subroutine of vfp_type_p. Given a structure type, return true if all
+ of the non-structure elements are the same as CANDIDATE. */
+
+static int
+is_hfa1 (const ffi_type *ty, int candidate)
+{
+ ffi_type **elements = ty->elements;
+ int i;
+
+ if (elements != NULL)
+ for (i = 0; elements[i]; ++i)
+ {
+ int t = elements[i]->type;
+ if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX)
+ {
+ if (!is_hfa1 (elements[i], candidate))
+ return 0;
+ }
+ else if (t != candidate)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Determine if TY is an homogenous floating point aggregate (HFA).
+ That is, a structure consisting of 1 to 4 members of all the same type,
+ where that type is a floating point scalar.
+
+ Returns non-zero iff TY is an HFA. The result is an encoded value where
+ bits 0-7 contain the type code, and bits 8-10 contain the element count. */
+
+static int
+vfp_type_p (const ffi_type *ty)
+{
+ ffi_type **elements;
+ int candidate, i;
+ size_t size, ele_count;
+
+ /* Quickest tests first. */
+ candidate = ty->type;
+ switch (ty->type)
+ {
+ default:
+ return 0;
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ ele_count = 1;
+ goto done;
+ case FFI_TYPE_COMPLEX:
+ candidate = ty->elements[0]->type;
+ if (candidate != FFI_TYPE_FLOAT && candidate != FFI_TYPE_DOUBLE)
+ return 0;
+ ele_count = 2;
+ goto done;
+ case FFI_TYPE_STRUCT:
+ break;
+ }
+
+ /* No HFA types are smaller than 4 bytes, or larger than 32 bytes. */
+ size = ty->size;
+ if (size < 4 || size > 32)
+ return 0;
+
+ /* Find the type of the first non-structure member. */
+ elements = ty->elements;
+ candidate = elements[0]->type;
+ if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX)
+ {
+ for (i = 0; ; ++i)
+ {
+ candidate = is_hfa0 (elements[i]);
+ if (candidate >= 0)
+ break;
+ }
+ }
+
+ /* If the first member is not a floating point type, it's not an HFA.
+ Also quickly re-check the size of the structure. */
+ switch (candidate)
+ {
+ case FFI_TYPE_FLOAT:
+ ele_count = size / sizeof(float);
+ if (size != ele_count * sizeof(float))
+ return 0;
+ break;
+ case FFI_TYPE_DOUBLE:
+ ele_count = size / sizeof(double);
+ if (size != ele_count * sizeof(double))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ if (ele_count > 4)
+ return 0;
+
+ /* Finally, make sure that all scalar elements are the same type. */
+ for (i = 0; elements[i]; ++i)
+ {
+ int t = elements[i]->type;
+ if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX)
+ {
+ if (!is_hfa1 (elements[i], candidate))
+ return 0;
+ }
+ else if (t != candidate)
+ return 0;
+ }
+
+ /* All tests succeeded. Encode the result. */
+ done:
+ return (ele_count << 8) | candidate;
+}
+
+static int
+place_vfp_arg (ffi_cif *cif, int h)
+{
+ unsigned short reg = cif->vfp_reg_free;
+ int align = 1, nregs = h >> 8;
+
+ if ((h & 0xff) == FFI_TYPE_DOUBLE)
+ align = 2, nregs *= 2;
+
+ /* Align register number. */
+ if ((reg & 1) && align == 2)
+ reg++;
+
+ while (reg + nregs <= 16)
+ {
+ int s, new_used = 0;
+ for (s = reg; s < reg + nregs; s++)
+ {
+ new_used |= (1 << s);
+ if (cif->vfp_used & (1 << s))
+ {
+ reg += align;
+ goto next_reg;
+ }
+ }
+ /* Found regs to allocate. */
+ cif->vfp_used |= new_used;
+ cif->vfp_args[cif->vfp_nargs++] = reg;
+
+ /* Update vfp_reg_free. */
+ if (cif->vfp_used & (1 << cif->vfp_reg_free))
+ {
+ reg += nregs;
+ while (cif->vfp_used & (1 << reg))
+ reg += 1;
+ cif->vfp_reg_free = reg;
+ }
+ return 0;
+ next_reg:;
+ }
+ // done, mark all regs as used
+ cif->vfp_reg_free = 16;
+ cif->vfp_used = 0xFFFF;
+ return 1;
+}
+
+static void
+layout_vfp_args (ffi_cif * cif)
+{
+ int i;
+ /* Init VFP fields */
+ cif->vfp_used = 0;
+ cif->vfp_nargs = 0;
+ cif->vfp_reg_free = 0;
+ memset (cif->vfp_args, -1, 16); /* Init to -1. */
+
+ for (i = 0; i < cif->nargs; i++)
+ {
+ int h = vfp_type_p (cif->arg_types[i]);
+ if (h && place_vfp_arg (cif, h) == 1)
+ break;
+ }
+}
diff -Nru orig/Modules/_ctypes/libffi_ios/arm/ffitarget.h modified/Modules/_ctypes/libffi_ios/arm/ffitarget.h
--- orig/Modules/_ctypes/libffi_ios/arm/ffitarget.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/arm/ffitarget.h 2015-03-12 21:33:17.000000000 +0800
@@ -0,0 +1,69 @@
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 2010 CodeSourcery
+ Copyright (c) 1996-2003 Red Hat, Inc.
+
+ Target configuration macros for ARM.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_VFP,
+ FFI_LAST_ABI,
+#ifdef __ARM_PCS_VFP
+ FFI_DEFAULT_ABI = FFI_VFP,
+#else
+ FFI_DEFAULT_ABI = FFI_SYSV,
+#endif
+} ffi_abi;
+#endif
+
+#define FFI_EXTRA_CIF_FIELDS \
+ int vfp_used; \
+ unsigned short vfp_reg_free, vfp_nargs; \
+ signed char vfp_args[16] \
+
+#define FFI_TARGET_SPECIFIC_VARIADIC
+#define FFI_TARGET_HAS_COMPLEX_TYPE
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_GO_CLOSURES 1
+#define FFI_TRAMPOLINE_SIZE 12
+#define FFI_NATIVE_RAW_API 0
+
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/arm/internal.h modified/Modules/_ctypes/libffi_ios/arm/internal.h
--- orig/Modules/_ctypes/libffi_ios/arm/internal.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/arm/internal.h 2015-03-12 21:33:17.000000000 +0800
@@ -0,0 +1,7 @@
+#define ARM_TYPE_VFP_S 0
+#define ARM_TYPE_VFP_D 1
+#define ARM_TYPE_VFP_N 2
+#define ARM_TYPE_INT64 3
+#define ARM_TYPE_INT 4
+#define ARM_TYPE_VOID 5
+#define ARM_TYPE_STRUCT 6
diff -Nru orig/Modules/_ctypes/libffi_ios/arm/sysv.S modified/Modules/_ctypes/libffi_ios/arm/sysv.S
--- orig/Modules/_ctypes/libffi_ios/arm/sysv.S 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/arm/sysv.S 2015-03-12 21:33:17.000000000 +0800
@@ -0,0 +1,335 @@
+/* -----------------------------------------------------------------------
+ sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc.
+ Copyright (c) 2011 Plausible Labs Cooperative, Inc.
+
+ ARM Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include
+#include
+#include
+#include "internal.h"
+
+/* GCC 4.8 provides __ARM_ARCH; construct it otherwise. */
+#ifndef __ARM_ARCH
+# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7EM__)
+# define __ARM_ARCH 7
+# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
+ || defined(__ARM_ARCH_6M__)
+# define __ARM_ARCH 6
+# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+# define __ARM_ARCH 5
+# else
+# define __ARM_ARCH 4
+# endif
+#endif
+
+/* Conditionally compile unwinder directives. */
+.macro UNWIND text:vararg
+#ifdef __ARM_EABI__
+ \text
+#endif
+.endm
+#if defined(HAVE_AS_CFI_PSEUDO_OP) && defined(__ARM_EABI__)
+ .cfi_sections .debug_frame
+#endif
+
+#define CONCAT(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+#ifdef __USER_LABEL_PREFIX__
+# define CNAME(X) CONCAT (__USER_LABEL_PREFIX__, X)
+#else
+# define CNAME(X) X
+#endif
+#ifdef __ELF__
+# define SIZE(X) .size CNAME(X), . - CNAME(X)
+# define TYPE(X, Y) .type CNAME(X), Y
+#else
+# define SIZE(X)
+# define TYPE(X, Y)
+#endif
+
+#define ARM_FUNC_START(name, gl) \
+ .align 3; \
+ .ifne gl; .globl CNAME(name); FFI_HIDDEN(CNAME(name)); .endif; \
+ TYPE(name, %function); \
+ CNAME(name):
+
+#define ARM_FUNC_END(name) \
+ SIZE(name)
+
+/* Aid in defining a jump table with 8 bytes between entries. */
+.macro E index
+ .if . - 0b - 8*\index
+ .error "type table out of sync"
+ .endif
+.endm
+
+ .text
+ .syntax unified
+ .arm
+
+ /* We require interworking on LDM, which implies ARMv5T,
+ which implies the existance of BLX. */
+ .arch armv5t
+
+ /* Note that we use STC and LDC to encode VFP instructions,
+ so that we do not need ".fpu vfp", nor get that added to
+ the object file attributes. These will not be executed
+ unless the FFI_VFP abi is used. */
+
+ @ r0: stack
+ @ r1: frame
+ @ r2: fn
+ @ r3: vfp_used
+
+ARM_FUNC_START(ffi_call_VFP, 1)
+ UNWIND .fnstart
+ cfi_startproc
+
+ cmp r3, #3 @ load only d0 if possible
+ ldcle p11, cr0, [r0] @ vldrle d0, [sp]
+ ldcgt p11, cr0, [r0], {16} @ vldmgt sp, {d0-d7}
+ add r0, r0, #64 @ discard the vfp register args
+ /* FALLTHRU */
+ARM_FUNC_END(ffi_call_VFP)
+
+ARM_FUNC_START(ffi_call_SYSV, 1)
+ stm r1, {fp, lr}
+ mov fp, r1
+
+ @ This is a bit of a lie wrt the origin of the unwind info, but
+ @ now we've got the usual frame pointer and two saved registers.
+ UNWIND .save {fp,lr}
+ UNWIND .setfp fp, sp
+ cfi_def_cfa(fp, 8)
+ cfi_rel_offset(fp, 0)
+ cfi_rel_offset(lr, 4)
+
+ mov sp, r0 @ install the stack pointer
+ mov lr, r2 @ move the fn pointer out of the way
+ ldr ip, [fp, #16] @ install the static chain
+ ldmia sp!, {r0-r3} @ move first 4 parameters in registers.
+ blx lr @ call fn
+
+ @ Load r2 with the pointer to storage for the return value
+ @ Load r3 with the return type code
+ ldr r2, [fp, #8]
+ ldr r3, [fp, #12]
+
+ @ Deallocate the stack with the arguments.
+ mov sp, fp
+ cfi_def_cfa_register(sp)
+
+ @ Store values stored in registers.
+ .align 3
+ add pc, pc, r3, lsl #3
+ nop
+0:
+E ARM_TYPE_VFP_S
+ stc p10, cr0, [r2] @ vstr s0, [r2]
+ pop {fp,pc}
+E ARM_TYPE_VFP_D
+ stc p11, cr0, [r2] @ vstr d0, [r2]
+ pop {fp,pc}
+E ARM_TYPE_VFP_N
+ stc p11, cr0, [r2], {8} @ vstm r2, {d0-d3}
+ pop {fp,pc}
+E ARM_TYPE_INT64
+ str r1, [r2, #4]
+ nop
+E ARM_TYPE_INT
+ str r0, [r2]
+ pop {fp,pc}
+E ARM_TYPE_VOID
+ pop {fp,pc}
+ nop
+E ARM_TYPE_STRUCT
+ pop {fp,pc}
+
+ cfi_endproc
+ UNWIND .fnend
+ARM_FUNC_END(ffi_call_SYSV)
+
+
+/*
+ int ffi_closure_inner_* (cif, fun, user_data, frame)
+*/
+
+ARM_FUNC_START(ffi_go_closure_SYSV, 1)
+ cfi_startproc
+ stmdb sp!, {r0-r3} @ save argument regs
+ cfi_adjust_cfa_offset(16)
+ ldr r0, [ip, #4] @ load cif
+ ldr r1, [ip, #8] @ load fun
+ mov r2, ip @ load user_data
+ b 0f
+ cfi_endproc
+ARM_FUNC_END(ffi_go_closure_SYSV)
+
+ARM_FUNC_START(ffi_closure_SYSV, 1)
+ UNWIND .fnstart
+ cfi_startproc
+ stmdb sp!, {r0-r3} @ save argument regs
+ cfi_adjust_cfa_offset(16)
+ ldr r0, [ip, #FFI_TRAMPOLINE_SIZE] @ load cif
+ ldr r1, [ip, #FFI_TRAMPOLINE_SIZE+4] @ load fun
+ ldr r2, [ip, #FFI_TRAMPOLINE_SIZE+8] @ load user_data
+0:
+ add ip, sp, #16 @ compute entry sp
+ sub sp, sp, #64+32 @ allocate frame
+ cfi_adjust_cfa_offset(64+32)
+ stmdb sp!, {ip,lr}
+
+ /* Remember that EABI unwind info only applies at call sites.
+ We need do nothing except note the save of the stack pointer
+ and the link registers. */
+ UNWIND .save {sp,lr}
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(lr, 4)
+
+ add r3, sp, #8 @ load frame
+ bl CNAME(ffi_closure_inner_SYSV)
+
+ @ Load values returned in registers.
+ add r2, sp, #8+64 @ load result
+ adr r3, CNAME(ffi_closure_ret)
+ add pc, r3, r0, lsl #3
+ cfi_endproc
+ UNWIND .fnend
+ARM_FUNC_END(ffi_closure_SYSV)
+
+ARM_FUNC_START(ffi_go_closure_VFP, 1)
+ cfi_startproc
+ stmdb sp!, {r0-r3} @ save argument regs
+ cfi_adjust_cfa_offset(16)
+ ldr r0, [ip, #4] @ load cif
+ ldr r1, [ip, #8] @ load fun
+ mov r2, ip @ load user_data
+ b 0f
+ cfi_endproc
+ARM_FUNC_END(ffi_go_closure_VFP)
+
+ARM_FUNC_START(ffi_closure_VFP, 1)
+ UNWIND .fnstart
+ cfi_startproc
+ stmdb sp!, {r0-r3} @ save argument regs
+ cfi_adjust_cfa_offset(16)
+ ldr r0, [ip, #FFI_TRAMPOLINE_SIZE] @ load cif
+ ldr r1, [ip, #FFI_TRAMPOLINE_SIZE+4] @ load fun
+ ldr r2, [ip, #FFI_TRAMPOLINE_SIZE+8] @ load user_data
+0:
+ add ip, sp, #16
+ sub sp, sp, #64+32 @ allocate frame
+ cfi_adjust_cfa_offset(64+32)
+ stc p11, cr0, [sp], {16} @ vstm sp, {d0-d7}
+ stmdb sp!, {ip,lr}
+
+ /* See above. */
+ UNWIND .save {sp,lr}
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(lr, 4)
+
+ add r3, sp, #8 @ load frame
+ bl CNAME(ffi_closure_inner_VFP)
+
+ @ Load values returned in registers.
+ add r2, sp, #8+64 @ load result
+ adr r3, CNAME(ffi_closure_ret)
+ add pc, r3, r0, lsl #3
+ cfi_endproc
+ UNWIND .fnend
+ARM_FUNC_END(ffi_closure_VFP)
+
+/* Load values returned in registers for both closure entry points.
+ Note that we use LDM with SP in the register set. This is deprecated
+ by ARM, but not yet unpredictable. */
+
+ARM_FUNC_START(ffi_closure_ret, 0)
+ cfi_startproc
+ cfi_rel_offset(sp, 0)
+ cfi_rel_offset(lr, 4)
+0:
+E ARM_TYPE_VFP_S
+ ldc p10, cr0, [r2] @ vldr s0, [r2]
+ ldm sp, {sp,pc}
+E ARM_TYPE_VFP_D
+ ldc p11, cr0, [r2] @ vldr d0, [r2]
+ ldm sp, {sp,pc}
+E ARM_TYPE_VFP_N
+ ldc p11, cr0, [r2], {8} @ vldm r2, {d0-d3}
+ ldm sp, {sp,pc}
+E ARM_TYPE_INT64
+ ldr r1, [r2, #4]
+ nop
+E ARM_TYPE_INT
+ ldr r0, [r2]
+ ldm sp, {sp,pc}
+E ARM_TYPE_VOID
+ ldm sp, {sp,pc}
+ nop
+E ARM_TYPE_STRUCT
+ ldm sp, {sp,pc}
+ cfi_endproc
+ARM_FUNC_END(ffi_closure_ret)
+
+#if FFI_EXEC_TRAMPOLINE_TABLE
+
+/* ??? The iOS support should be updated. The first insn used to
+ be STMFD, but that's been moved into ffi_closure_SYSV. If the
+ writable page is put after this one we can make use of the
+ pc+8 feature of the architecture. We can also reduce the size
+ of the thunk to 8 and pack more of these into the page.
+
+ In the meantime, simply replace the STMFD with a NOP so as to
+ keep all the magic numbers the same within ffi.c. */
+
+ .align 12
+ARM_FUNC_START(ffi_closure_trampoline_table_page)
+.rept 4096 / 12
+ nop
+ ldr ip, [pc, #-4092]
+ ldr pc, [pc, #-4092]
+.endr
+
+#else
+
+ARM_FUNC_START(ffi_arm_trampoline, 1)
+0: adr ip, 0b
+ ldr pc, 1f
+1: .long 0
+ARM_FUNC_END(ffi_arm_trampoline)
+
+#endif /* FFI_EXEC_TRAMPOLINE_TABLE */
+
+#if defined __ELF__ && defined __linux__
+ .section .note.GNU-stack,"",%progbits
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/closures.c modified/Modules/_ctypes/libffi_ios/closures.c
--- orig/Modules/_ctypes/libffi_ios/closures.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/closures.c 2015-03-12 21:34:00.000000000 +0800
@@ -0,0 +1,688 @@
+/* -----------------------------------------------------------------------
+ closures.c - Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
+ Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
+ Copyright (c) 2011 Plausible Labs Cooperative, Inc.
+
+ Code to allocate and deallocate memory for closures.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#if defined __linux__ && !defined _GNU_SOURCE
+#define _GNU_SOURCE 1
+#endif
+
+#include
+#include
+
+#if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
+# if __gnu_linux__ && !defined(__ANDROID__)
+/* This macro indicates it may be forbidden to map anonymous memory
+ with both write and execute permission. Code compiled when this
+ option is defined will attempt to map such pages once, but if it
+ fails, it falls back to creating a temporary file in a writable and
+ executable filesystem and mapping pages from it into separate
+ locations in the virtual memory space, one location writable and
+ another executable. */
+# define FFI_MMAP_EXEC_WRIT 1
+# define HAVE_MNTENT 1
+# endif
+# if defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)
+/* Windows systems may have Data Execution Protection (DEP) enabled,
+ which requires the use of VirtualMalloc/VirtualFree to alloc/free
+ executable memory. */
+# define FFI_MMAP_EXEC_WRIT 1
+# endif
+#endif
+
+#if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX
+# ifdef __linux__
+/* When defined to 1 check for SELinux and if SELinux is active,
+ don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that
+ might cause audit messages. */
+# define FFI_MMAP_EXEC_SELINUX 1
+# endif
+#endif
+
+#if FFI_CLOSURES
+
+# if FFI_EXEC_TRAMPOLINE_TABLE
+
+// Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
+
+# elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
+
+#define USE_LOCKS 1
+#define USE_DL_PREFIX 1
+#ifdef __GNUC__
+#ifndef USE_BUILTIN_FFS
+#define USE_BUILTIN_FFS 1
+#endif
+#endif
+
+/* We need to use mmap, not sbrk. */
+#define HAVE_MORECORE 0
+
+/* We could, in theory, support mremap, but it wouldn't buy us anything. */
+#define HAVE_MREMAP 0
+
+/* We have no use for this, so save some code and data. */
+#define NO_MALLINFO 1
+
+/* We need all allocations to be in regular segments, otherwise we
+ lose track of the corresponding code address. */
+#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+
+/* Don't allocate more than a page unless needed. */
+#define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize)
+
+#if FFI_CLOSURE_TEST
+/* Don't release single pages, to avoid a worst-case scenario of
+ continuously allocating and releasing single pages, but release
+ pairs of pages, which should do just as well given that allocations
+ are likely to be small. */
+#define DEFAULT_TRIM_THRESHOLD ((size_t)malloc_getpagesize)
+#endif
+
+#include
+#include
+#include
+#include
+#ifndef _MSC_VER
+#include
+#endif
+#include
+#include
+#if !defined(X86_WIN32) && !defined(X86_WIN64)
+#ifdef HAVE_MNTENT
+#include
+#endif /* HAVE_MNTENT */
+#include
+#include
+
+/* We don't want sys/mman.h to be included after we redefine mmap and
+ dlmunmap. */
+#include
+#define LACKS_SYS_MMAN_H 1
+
+#if FFI_MMAP_EXEC_SELINUX
+#include
+#include
+
+static int selinux_enabled = -1;
+
+static int
+selinux_enabled_check (void)
+{
+ struct statfs sfs;
+ FILE *f;
+ char *buf = NULL;
+ size_t len = 0;
+
+ if (statfs ("/selinux", &sfs) >= 0
+ && (unsigned int) sfs.f_type == 0xf97cff8cU)
+ return 1;
+ f = fopen ("/proc/mounts", "r");
+ if (f == NULL)
+ return 0;
+ while (getline (&buf, &len, f) >= 0)
+ {
+ char *p = strchr (buf, ' ');
+ if (p == NULL)
+ break;
+ p = strchr (p + 1, ' ');
+ if (p == NULL)
+ break;
+ if (strncmp (p + 1, "selinuxfs ", 10) == 0)
+ {
+ free (buf);
+ fclose (f);
+ return 1;
+ }
+ }
+ free (buf);
+ fclose (f);
+ return 0;
+}
+
+#define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \
+ : (selinux_enabled = selinux_enabled_check ()))
+
+#else
+
+#define is_selinux_enabled() 0
+
+#endif /* !FFI_MMAP_EXEC_SELINUX */
+
+/* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */
+#ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX
+#include
+
+static int emutramp_enabled = -1;
+
+static int
+emutramp_enabled_check (void)
+{
+ char *buf = NULL;
+ size_t len = 0;
+ FILE *f;
+ int ret;
+ f = fopen ("/proc/self/status", "r");
+ if (f == NULL)
+ return 0;
+ ret = 0;
+
+ while (getline (&buf, &len, f) != -1)
+ if (!strncmp (buf, "PaX:", 4))
+ {
+ char emutramp;
+ if (sscanf (buf, "%*s %*c%c", &emutramp) == 1)
+ ret = (emutramp == 'E');
+ break;
+ }
+ free (buf);
+ fclose (f);
+ return ret;
+}
+
+#define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \
+ : (emutramp_enabled = emutramp_enabled_check ()))
+#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
+
+#elif defined (__CYGWIN__) || defined(__INTERIX)
+
+#include
+
+/* Cygwin is Linux-like, but not quite that Linux-like. */
+#define is_selinux_enabled() 0
+
+#endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */
+
+#ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX
+#define is_emutramp_enabled() 0
+#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
+
+/* Declare all functions defined in dlmalloc.c as static. */
+static void *dlmalloc(size_t);
+static void dlfree(void*);
+static void *dlcalloc(size_t, size_t) MAYBE_UNUSED;
+static void *dlrealloc(void *, size_t) MAYBE_UNUSED;
+static void *dlmemalign(size_t, size_t) MAYBE_UNUSED;
+static void *dlvalloc(size_t) MAYBE_UNUSED;
+static int dlmallopt(int, int) MAYBE_UNUSED;
+static size_t dlmalloc_footprint(void) MAYBE_UNUSED;
+static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED;
+static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED;
+static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED;
+static void *dlpvalloc(size_t) MAYBE_UNUSED;
+static int dlmalloc_trim(size_t) MAYBE_UNUSED;
+static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED;
+static void dlmalloc_stats(void) MAYBE_UNUSED;
+
+#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
+/* Use these for mmap and munmap within dlmalloc.c. */
+static void *dlmmap(void *, size_t, int, int, int, off_t);
+static int dlmunmap(void *, size_t);
+#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
+
+#define mmap dlmmap
+#define munmap dlmunmap
+
+#include "dlmalloc.c"
+
+#undef mmap
+#undef munmap
+
+#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
+
+/* A mutex used to synchronize access to *exec* variables in this file. */
+static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* A file descriptor of a temporary file from which we'll map
+ executable pages. */
+static int execfd = -1;
+
+/* The amount of space already allocated from the temporary file. */
+static size_t execsize = 0;
+
+/* Open a temporary file name, and immediately unlink it. */
+static int
+open_temp_exec_file_name (char *name, int flags)
+{
+ int fd;
+
+#ifdef HAVE_MKOSTEMP
+ fd = mkostemp (name, flags);
+#else
+ fd = mkstemp (name);
+#endif
+
+ if (fd != -1)
+ unlink (name);
+
+ return fd;
+}
+
+/* Open a temporary file in the named directory. */
+static int
+open_temp_exec_file_dir (const char *dir)
+{
+ static const char suffix[] = "/ffiXXXXXX";
+ int lendir, flags;
+ char *tempname;
+#ifdef O_TMPFILE
+ int fd;
+#endif
+
+#ifdef O_CLOEXEC
+ flags = O_CLOEXEC;
+#else
+ flags = 0;
+#endif
+
+#ifdef O_TMPFILE
+ fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700);
+ /* If the running system does not support the O_TMPFILE flag then retry without it. */
+ if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) {
+ return fd;
+ } else {
+ errno = 0;
+ }
+#endif
+
+ lendir = strlen (dir);
+ tempname = __builtin_alloca (lendir + sizeof (suffix));
+
+ if (!tempname)
+ return -1;
+
+ memcpy (tempname, dir, lendir);
+ memcpy (tempname + lendir, suffix, sizeof (suffix));
+
+ return open_temp_exec_file_name (tempname, flags);
+}
+
+/* Open a temporary file in the directory in the named environment
+ variable. */
+static int
+open_temp_exec_file_env (const char *envvar)
+{
+ const char *value = getenv (envvar);
+
+ if (!value)
+ return -1;
+
+ return open_temp_exec_file_dir (value);
+}
+
+#ifdef HAVE_MNTENT
+/* Open a temporary file in an executable and writable mount point
+ listed in the mounts file. Subsequent calls with the same mounts
+ keep searching for mount points in the same file. Providing NULL
+ as the mounts file closes the file. */
+static int
+open_temp_exec_file_mnt (const char *mounts)
+{
+ static const char *last_mounts;
+ static FILE *last_mntent;
+
+ if (mounts != last_mounts)
+ {
+ if (last_mntent)
+ endmntent (last_mntent);
+
+ last_mounts = mounts;
+
+ if (mounts)
+ last_mntent = setmntent (mounts, "r");
+ else
+ last_mntent = NULL;
+ }
+
+ if (!last_mntent)
+ return -1;
+
+ for (;;)
+ {
+ int fd;
+ struct mntent mnt;
+ char buf[MAXPATHLEN * 3];
+
+ if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL)
+ return -1;
+
+ if (hasmntopt (&mnt, "ro")
+ || hasmntopt (&mnt, "noexec")
+ || access (mnt.mnt_dir, W_OK))
+ continue;
+
+ fd = open_temp_exec_file_dir (mnt.mnt_dir);
+
+ if (fd != -1)
+ return fd;
+ }
+}
+#endif /* HAVE_MNTENT */
+
+/* Instructions to look for a location to hold a temporary file that
+ can be mapped in for execution. */
+static struct
+{
+ int (*func)(const char *);
+ const char *arg;
+ int repeat;
+} open_temp_exec_file_opts[] = {
+ { open_temp_exec_file_env, "TMPDIR", 0 },
+ { open_temp_exec_file_dir, "/tmp", 0 },
+ { open_temp_exec_file_dir, "/var/tmp", 0 },
+ { open_temp_exec_file_dir, "/dev/shm", 0 },
+ { open_temp_exec_file_env, "HOME", 0 },
+#ifdef HAVE_MNTENT
+ { open_temp_exec_file_mnt, "/etc/mtab", 1 },
+ { open_temp_exec_file_mnt, "/proc/mounts", 1 },
+#endif /* HAVE_MNTENT */
+};
+
+/* Current index into open_temp_exec_file_opts. */
+static int open_temp_exec_file_opts_idx = 0;
+
+/* Reset a current multi-call func, then advances to the next entry.
+ If we're at the last, go back to the first and return nonzero,
+ otherwise return zero. */
+static int
+open_temp_exec_file_opts_next (void)
+{
+ if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
+ open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL);
+
+ open_temp_exec_file_opts_idx++;
+ if (open_temp_exec_file_opts_idx
+ == (sizeof (open_temp_exec_file_opts)
+ / sizeof (*open_temp_exec_file_opts)))
+ {
+ open_temp_exec_file_opts_idx = 0;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return a file descriptor of a temporary zero-sized file in a
+ writable and executable filesystem. */
+static int
+open_temp_exec_file (void)
+{
+ int fd;
+
+ do
+ {
+ fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func
+ (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg);
+
+ if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat
+ || fd == -1)
+ {
+ if (open_temp_exec_file_opts_next ())
+ break;
+ }
+ }
+ while (fd == -1);
+
+ return fd;
+}
+
+/* Map in a chunk of memory from the temporary exec file into separate
+ locations in the virtual memory address space, one writable and one
+ executable. Returns the address of the writable portion, after
+ storing an offset to the corresponding executable portion at the
+ last word of the requested chunk. */
+static void *
+dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset)
+{
+ void *ptr;
+
+ if (execfd == -1)
+ {
+ open_temp_exec_file_opts_idx = 0;
+ retry_open:
+ execfd = open_temp_exec_file ();
+ if (execfd == -1)
+ return MFAIL;
+ }
+
+ offset = execsize;
+
+ if (ftruncate (execfd, offset + length))
+ return MFAIL;
+
+ flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS);
+ flags |= MAP_SHARED;
+
+ ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC,
+ flags, execfd, offset);
+ if (ptr == MFAIL)
+ {
+ if (!offset)
+ {
+ close (execfd);
+ goto retry_open;
+ }
+ ftruncate (execfd, offset);
+ return MFAIL;
+ }
+ else if (!offset
+ && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
+ open_temp_exec_file_opts_next ();
+
+ start = mmap (start, length, prot, flags, execfd, offset);
+
+ if (start == MFAIL)
+ {
+ munmap (ptr, length);
+ ftruncate (execfd, offset);
+ return start;
+ }
+
+ mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start;
+
+ execsize += length;
+
+ return start;
+}
+
+/* Map in a writable and executable chunk of memory if possible.
+ Failing that, fall back to dlmmap_locked. */
+static void *
+dlmmap (void *start, size_t length, int prot,
+ int flags, int fd, off_t offset)
+{
+ void *ptr;
+
+ assert (start == NULL && length % malloc_getpagesize == 0
+ && prot == (PROT_READ | PROT_WRITE)
+ && flags == (MAP_PRIVATE | MAP_ANONYMOUS)
+ && fd == -1 && offset == 0);
+
+#if FFI_CLOSURE_TEST
+ printf ("mapping in %zi\n", length);
+#endif
+
+ if (execfd == -1 && is_emutramp_enabled ())
+ {
+ ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
+ return ptr;
+ }
+
+ if (execfd == -1 && !is_selinux_enabled ())
+ {
+ ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset);
+
+ if (ptr != MFAIL || (errno != EPERM && errno != EACCES))
+ /* Cool, no need to mess with separate segments. */
+ return ptr;
+
+ /* If MREMAP_DUP is ever introduced and implemented, try mmap
+ with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with
+ MREMAP_DUP and prot at this point. */
+ }
+
+ if (execsize == 0 || execfd == -1)
+ {
+ pthread_mutex_lock (&open_temp_exec_file_mutex);
+ ptr = dlmmap_locked (start, length, prot, flags, offset);
+ pthread_mutex_unlock (&open_temp_exec_file_mutex);
+
+ return ptr;
+ }
+
+ return dlmmap_locked (start, length, prot, flags, offset);
+}
+
+/* Release memory at the given address, as well as the corresponding
+ executable page if it's separate. */
+static int
+dlmunmap (void *start, size_t length)
+{
+ /* We don't bother decreasing execsize or truncating the file, since
+ we can't quite tell whether we're unmapping the end of the file.
+ We don't expect frequent deallocation anyway. If we did, we
+ could locate pages in the file by writing to the pages being
+ deallocated and checking that the file contents change.
+ Yuck. */
+ msegmentptr seg = segment_holding (gm, start);
+ void *code;
+
+#if FFI_CLOSURE_TEST
+ printf ("unmapping %zi\n", length);
+#endif
+
+ if (seg && (code = add_segment_exec_offset (start, seg)) != start)
+ {
+ int ret = munmap (code, length);
+ if (ret)
+ return ret;
+ }
+
+ return munmap (start, length);
+}
+
+#if FFI_CLOSURE_FREE_CODE
+/* Return segment holding given code address. */
+static msegmentptr
+segment_holding_code (mstate m, char* addr)
+{
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if (addr >= add_segment_exec_offset (sp->base, sp)
+ && addr < add_segment_exec_offset (sp->base, sp) + sp->size)
+ return sp;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
+#endif
+
+#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
+
+/* Allocate a chunk of memory with the given size. Returns a pointer
+ to the writable address, and sets *CODE to the executable
+ corresponding virtual address. */
+void *
+ffi_closure_alloc (size_t size, void **code)
+{
+ void *ptr;
+
+ if (!code)
+ return NULL;
+
+ ptr = dlmalloc (size);
+
+ if (ptr)
+ {
+ msegmentptr seg = segment_holding (gm, ptr);
+
+ *code = add_segment_exec_offset (ptr, seg);
+ }
+
+ return ptr;
+}
+
+/* Release a chunk of memory allocated with ffi_closure_alloc. If
+ FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the
+ writable or the executable address given. Otherwise, only the
+ writable address can be provided here. */
+void
+ffi_closure_free (void *ptr)
+{
+#if FFI_CLOSURE_FREE_CODE
+ msegmentptr seg = segment_holding_code (gm, ptr);
+
+ if (seg)
+ ptr = sub_segment_exec_offset (ptr, seg);
+#endif
+
+ dlfree (ptr);
+}
+
+
+#if FFI_CLOSURE_TEST
+/* Do some internal sanity testing to make sure allocation and
+ deallocation of pages are working as intended. */
+int main ()
+{
+ void *p[3];
+#define GET(idx, len) do { p[idx] = dlmalloc (len); printf ("allocated %zi for p[%i]\n", (len), (idx)); } while (0)
+#define PUT(idx) do { printf ("freeing p[%i]\n", (idx)); dlfree (p[idx]); } while (0)
+ GET (0, malloc_getpagesize / 2);
+ GET (1, 2 * malloc_getpagesize - 64 * sizeof (void*));
+ PUT (1);
+ GET (1, 2 * malloc_getpagesize);
+ GET (2, malloc_getpagesize / 2);
+ PUT (1);
+ PUT (0);
+ PUT (2);
+ return 0;
+}
+#endif /* FFI_CLOSURE_TEST */
+# else /* ! FFI_MMAP_EXEC_WRIT */
+
+/* On many systems, memory returned by malloc is writable and
+ executable, so just use it. */
+
+#include
+
+void *
+ffi_closure_alloc (size_t size, void **code)
+{
+ if (!code)
+ return NULL;
+
+ return *code = malloc (size);
+}
+
+void
+ffi_closure_free (void *ptr)
+{
+ free (ptr);
+}
+
+# endif /* ! FFI_MMAP_EXEC_WRIT */
+#endif /* FFI_CLOSURES */
diff -Nru orig/Modules/_ctypes/libffi_ios/debug.c modified/Modules/_ctypes/libffi_ios/debug.c
--- orig/Modules/_ctypes/libffi_ios/debug.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/debug.c 2015-03-12 21:34:00.000000000 +0800
@@ -0,0 +1,64 @@
+/* -----------------------------------------------------------------------
+ debug.c - Copyright (c) 1996 Red Hat, Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include
+#include
+#include
+#include
+
+/* General debugging routines */
+
+void ffi_stop_here(void)
+{
+ /* This function is only useful for debugging purposes.
+ Place a breakpoint on ffi_stop_here to be notified of
+ significant events. */
+}
+
+/* This function should only be called via the FFI_ASSERT() macro */
+
+void ffi_assert(char *expr, char *file, int line)
+{
+ fprintf(stderr, "ASSERTION FAILURE: %s at %s:%d\n", expr, file, line);
+ ffi_stop_here();
+ abort();
+}
+
+/* Perform a sanity check on an ffi_type structure */
+
+void ffi_type_test(ffi_type *a, char *file, int line)
+{
+ FFI_ASSERT_AT(a != NULL, file, line);
+
+ FFI_ASSERT_AT(a->type <= FFI_TYPE_LAST, file, line);
+ FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->size > 0, file, line);
+ FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->alignment > 0, file, line);
+ FFI_ASSERT_AT((a->type != FFI_TYPE_STRUCT && a->type != FFI_TYPE_COMPLEX)
+ || a->elements != NULL, file, line);
+ FFI_ASSERT_AT(a->type != FFI_TYPE_COMPLEX
+ || (a->elements != NULL
+ && a->elements[0] != NULL && a->elements[1] == NULL),
+ file, line);
+
+}
diff -Nru orig/Modules/_ctypes/libffi_ios/dlmalloc.c modified/Modules/_ctypes/libffi_ios/dlmalloc.c
--- orig/Modules/_ctypes/libffi_ios/dlmalloc.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/dlmalloc.c 2015-03-12 21:34:00.000000000 +0800
@@ -0,0 +1,5161 @@
+/*
+ This is a version (aka dlmalloc) of malloc/free/realloc written by
+ Doug Lea and released to the public domain, as explained at
+ http://creativecommons.org/licenses/publicdomain. Send questions,
+ comments, complaints, performance data, etc to dl@cs.oswego.edu
+
+* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+* Quickstart
+
+ This library is all in one file to simplify the most common usage:
+ ftp it, compile it (-O3), and link it into another program. All of
+ the compile-time options default to reasonable values for use on
+ most platforms. You might later want to step through various
+ compile-time and dynamic tuning options.
+
+ For convenience, an include file for code using this malloc is at:
+ ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
+ You don't really need this .h file unless you call functions not
+ defined in your system include files. The .h file contains only the
+ excerpts from this file needed for using this malloc on ANSI C/C++
+ systems, so long as you haven't changed compile-time options about
+ naming and tuning parameters. If you do, then you can create your
+ own malloc.h that does include all settings by cutting at the point
+ indicated below. Note that you may already by default be using a C
+ library containing a malloc that is based on some version of this
+ malloc (for example in linux). You might still want to use the one
+ in this file to customize settings or to avoid overheads associated
+ with library versions.
+
+* Vital statistics:
+
+ Supported pointer/size_t representation: 4 or 8 bytes
+ size_t MUST be an unsigned type of the same width as
+ pointers. (If you are using an ancient system that declares
+ size_t as a signed type, or need it to be a different width
+ than pointers, you can use a previous release of this malloc
+ (e.g. 2.7.2) supporting these.)
+
+ Alignment: 8 bytes (default)
+ This suffices for nearly all current machines and C compilers.
+ However, you can define MALLOC_ALIGNMENT to be wider than this
+ if necessary (up to 128bytes), at the expense of using more space.
+
+ Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
+ 8 or 16 bytes (if 8byte sizes)
+ Each malloced chunk has a hidden word of overhead holding size
+ and status information, and additional cross-check word
+ if FOOTERS is defined.
+
+ Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
+ 8-byte ptrs: 32 bytes (including overhead)
+
+ Even a request for zero bytes (i.e., malloc(0)) returns a
+ pointer to something of the minimum allocatable size.
+ The maximum overhead wastage (i.e., number of extra bytes
+ allocated than were requested in malloc) is less than or equal
+ to the minimum size, except for requests >= mmap_threshold that
+ are serviced via mmap(), where the worst case wastage is about
+ 32 bytes plus the remainder from a system page (the minimal
+ mmap unit); typically 4096 or 8192 bytes.
+
+ Security: static-safe; optionally more or less
+ The "security" of malloc refers to the ability of malicious
+ code to accentuate the effects of errors (for example, freeing
+ space that is not currently malloc'ed or overwriting past the
+ ends of chunks) in code that calls malloc. This malloc
+ guarantees not to modify any memory locations below the base of
+ heap, i.e., static variables, even in the presence of usage
+ errors. The routines additionally detect most improper frees
+ and reallocs. All this holds as long as the static bookkeeping
+ for malloc itself is not corrupted by some other means. This
+ is only one aspect of security -- these checks do not, and
+ cannot, detect all possible programming errors.
+
+ If FOOTERS is defined nonzero, then each allocated chunk
+ carries an additional check word to verify that it was malloced
+ from its space. These check words are the same within each
+ execution of a program using malloc, but differ across
+ executions, so externally crafted fake chunks cannot be
+ freed. This improves security by rejecting frees/reallocs that
+ could corrupt heap memory, in addition to the checks preventing
+ writes to statics that are always on. This may further improve
+ security at the expense of time and space overhead. (Note that
+ FOOTERS may also be worth using with MSPACES.)
+
+ By default detected errors cause the program to abort (calling
+ "abort()"). You can override this to instead proceed past
+ errors by defining PROCEED_ON_ERROR. In this case, a bad free
+ has no effect, and a malloc that encounters a bad address
+ caused by user overwrites will ignore the bad address by
+ dropping pointers and indices to all known memory. This may
+ be appropriate for programs that should continue if at all
+ possible in the face of programming errors, although they may
+ run out of memory because dropped memory is never reclaimed.
+
+ If you don't like either of these options, you can define
+ CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
+ else. And if if you are sure that your program using malloc has
+ no errors or vulnerabilities, you can define INSECURE to 1,
+ which might (or might not) provide a small performance improvement.
+
+ Thread-safety: NOT thread-safe unless USE_LOCKS defined
+ When USE_LOCKS is defined, each public call to malloc, free,
+ etc is surrounded with either a pthread mutex or a win32
+ spinlock (depending on WIN32). This is not especially fast, and
+ can be a major bottleneck. It is designed only to provide
+ minimal protection in concurrent environments, and to provide a
+ basis for extensions. If you are using malloc in a concurrent
+ program, consider instead using ptmalloc, which is derived from
+ a version of this malloc. (See http://www.malloc.de).
+
+ System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
+ This malloc can use unix sbrk or any emulation (invoked using
+ the CALL_MORECORE macro) and/or mmap/munmap or any emulation
+ (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
+ memory. On most unix systems, it tends to work best if both
+ MORECORE and MMAP are enabled. On Win32, it uses emulations
+ based on VirtualAlloc. It also uses common C library functions
+ like memset.
+
+ Compliance: I believe it is compliant with the Single Unix Specification
+ (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
+ others as well.
+
+* Overview of algorithms
+
+ This is not the fastest, most space-conserving, most portable, or
+ most tunable malloc ever written. However it is among the fastest
+ while also being among the most space-conserving, portable and
+ tunable. Consistent balance across these factors results in a good
+ general-purpose allocator for malloc-intensive programs.
+
+ In most ways, this malloc is a best-fit allocator. Generally, it
+ chooses the best-fitting existing chunk for a request, with ties
+ broken in approximately least-recently-used order. (This strategy
+ normally maintains low fragmentation.) However, for requests less
+ than 256bytes, it deviates from best-fit when there is not an
+ exactly fitting available chunk by preferring to use space adjacent
+ to that used for the previous small request, as well as by breaking
+ ties in approximately most-recently-used order. (These enhance
+ locality of series of small allocations.) And for very large requests
+ (>= 256Kb by default), it relies on system memory mapping
+ facilities, if supported. (This helps avoid carrying around and
+ possibly fragmenting memory used only for large chunks.)
+
+ All operations (except malloc_stats and mallinfo) have execution
+ times that are bounded by a constant factor of the number of bits in
+ a size_t, not counting any clearing in calloc or copying in realloc,
+ or actions surrounding MORECORE and MMAP that have times
+ proportional to the number of non-contiguous regions returned by
+ system allocation routines, which is often just 1.
+
+ The implementation is not very modular and seriously overuses
+ macros. Perhaps someday all C compilers will do as good a job
+ inlining modular code as can now be done by brute-force expansion,
+ but now, enough of them seem not to.
+
+ Some compilers issue a lot of warnings about code that is
+ dead/unreachable only on some platforms, and also about intentional
+ uses of negation on unsigned types. All known cases of each can be
+ ignored.
+
+ For a longer but out of date high-level description, see
+ http://gee.cs.oswego.edu/dl/html/malloc.html
+
+* MSPACES
+ If MSPACES is defined, then in addition to malloc, free, etc.,
+ this file also defines mspace_malloc, mspace_free, etc. These
+ are versions of malloc routines that take an "mspace" argument
+ obtained using create_mspace, to control all internal bookkeeping.
+ If ONLY_MSPACES is defined, only these versions are compiled.
+ So if you would like to use this allocator for only some allocations,
+ and your system malloc for others, you can compile with
+ ONLY_MSPACES and then do something like...
+ static mspace mymspace = create_mspace(0,0); // for example
+ #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
+
+ (Note: If you only need one instance of an mspace, you can instead
+ use "USE_DL_PREFIX" to relabel the global malloc.)
+
+ You can similarly create thread-local allocators by storing
+ mspaces as thread-locals. For example:
+ static __thread mspace tlms = 0;
+ void* tlmalloc(size_t bytes) {
+ if (tlms == 0) tlms = create_mspace(0, 0);
+ return mspace_malloc(tlms, bytes);
+ }
+ void tlfree(void* mem) { mspace_free(tlms, mem); }
+
+ Unless FOOTERS is defined, each mspace is completely independent.
+ You cannot allocate from one and free to another (although
+ conformance is only weakly checked, so usage errors are not always
+ caught). If FOOTERS is defined, then each chunk carries around a tag
+ indicating its originating mspace, and frees are directed to their
+ originating spaces.
+
+ ------------------------- Compile-time options ---------------------------
+
+Be careful in setting #define values for numerical constants of type
+size_t. On some systems, literal values are not automatically extended
+to size_t precision unless they are explicitly casted.
+
+WIN32 default: defined if _WIN32 defined
+ Defining WIN32 sets up defaults for MS environment and compilers.
+ Otherwise defaults are for unix.
+
+MALLOC_ALIGNMENT default: (size_t)8
+ Controls the minimum alignment for malloc'ed chunks. It must be a
+ power of two and at least 8, even on machines for which smaller
+ alignments would suffice. It may be defined as larger than this
+ though. Note however that code and data structures are optimized for
+ the case of 8-byte alignment.
+
+MSPACES default: 0 (false)
+ If true, compile in support for independent allocation spaces.
+ This is only supported if HAVE_MMAP is true.
+
+ONLY_MSPACES default: 0 (false)
+ If true, only compile in mspace versions, not regular versions.
+
+USE_LOCKS default: 0 (false)
+ Causes each call to each public routine to be surrounded with
+ pthread or WIN32 mutex lock/unlock. (If set true, this can be
+ overridden on a per-mspace basis for mspace versions.)
+
+FOOTERS default: 0
+ If true, provide extra checking and dispatching by placing
+ information in the footers of allocated chunks. This adds
+ space and time overhead.
+
+INSECURE default: 0
+ If true, omit checks for usage errors and heap space overwrites.
+
+USE_DL_PREFIX default: NOT defined
+ Causes compiler to prefix all public routines with the string 'dl'.
+ This can be useful when you only want to use this malloc in one part
+ of a program, using your regular system malloc elsewhere.
+
+ABORT default: defined as abort()
+ Defines how to abort on failed checks. On most systems, a failed
+ check cannot die with an "assert" or even print an informative
+ message, because the underlying print routines in turn call malloc,
+ which will fail again. Generally, the best policy is to simply call
+ abort(). It's not very useful to do more than this because many
+ errors due to overwriting will show up as address faults (null, odd
+ addresses etc) rather than malloc-triggered checks, so will also
+ abort. Also, most compilers know that abort() does not return, so
+ can better optimize code conditionally calling it.
+
+PROCEED_ON_ERROR default: defined as 0 (false)
+ Controls whether detected bad addresses cause them to bypassed
+ rather than aborting. If set, detected bad arguments to free and
+ realloc are ignored. And all bookkeeping information is zeroed out
+ upon a detected overwrite of freed heap space, thus losing the
+ ability to ever return it from malloc again, but enabling the
+ application to proceed. If PROCEED_ON_ERROR is defined, the
+ static variable malloc_corruption_error_count is compiled in
+ and can be examined to see if errors have occurred. This option
+ generates slower code than the default abort policy.
+
+DEBUG default: NOT defined
+ The DEBUG setting is mainly intended for people trying to modify
+ this code or diagnose problems when porting to new platforms.
+ However, it may also be able to better isolate user errors than just
+ using runtime checks. The assertions in the check routines spell
+ out in more detail the assumptions and invariants underlying the
+ algorithms. The checking is fairly extensive, and will slow down
+ execution noticeably. Calling malloc_stats or mallinfo with DEBUG
+ set will attempt to check every non-mmapped allocated and free chunk
+ in the course of computing the summaries.
+
+ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
+ Debugging assertion failures can be nearly impossible if your
+ version of the assert macro causes malloc to be called, which will
+ lead to a cascade of further failures, blowing the runtime stack.
+ ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
+ which will usually make debugging easier.
+
+MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
+ The action to take before "return 0" when malloc fails to be able to
+ return memory because there is none available.
+
+HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
+ True if this system supports sbrk or an emulation of it.
+
+MORECORE default: sbrk
+ The name of the sbrk-style system routine to call to obtain more
+ memory. See below for guidance on writing custom MORECORE
+ functions. The type of the argument to sbrk/MORECORE varies across
+ systems. It cannot be size_t, because it supports negative
+ arguments, so it is normally the signed type of the same width as
+ size_t (sometimes declared as "intptr_t"). It doesn't much matter
+ though. Internally, we only call it with arguments less than half
+ the max value of a size_t, which should work across all reasonable
+ possibilities, although sometimes generating compiler warnings. See
+ near the end of this file for guidelines for creating a custom
+ version of MORECORE.
+
+MORECORE_CONTIGUOUS default: 1 (true)
+ If true, take advantage of fact that consecutive calls to MORECORE
+ with positive arguments always return contiguous increasing
+ addresses. This is true of unix sbrk. It does not hurt too much to
+ set it true anyway, since malloc copes with non-contiguities.
+ Setting it false when definitely non-contiguous saves time
+ and possibly wasted space it would take to discover this though.
+
+MORECORE_CANNOT_TRIM default: NOT defined
+ True if MORECORE cannot release space back to the system when given
+ negative arguments. This is generally necessary only if you are
+ using a hand-crafted MORECORE function that cannot handle negative
+ arguments.
+
+HAVE_MMAP default: 1 (true)
+ True if this system supports mmap or an emulation of it. If so, and
+ HAVE_MORECORE is not true, MMAP is used for all system
+ allocation. If set and HAVE_MORECORE is true as well, MMAP is
+ primarily used to directly allocate very large blocks. It is also
+ used as a backup strategy in cases where MORECORE fails to provide
+ space from system. Note: A single call to MUNMAP is assumed to be
+ able to unmap memory that may have be allocated using multiple calls
+ to MMAP, so long as they are adjacent.
+
+HAVE_MREMAP default: 1 on linux, else 0
+ If true realloc() uses mremap() to re-allocate large blocks and
+ extend or shrink allocation spaces.
+
+MMAP_CLEARS default: 1 on unix
+ True if mmap clears memory so calloc doesn't need to. This is true
+ for standard unix mmap using /dev/zero.
+
+USE_BUILTIN_FFS default: 0 (i.e., not used)
+ Causes malloc to use the builtin ffs() function to compute indices.
+ Some compilers may recognize and intrinsify ffs to be faster than the
+ supplied C version. Also, the case of x86 using gcc is special-cased
+ to an asm instruction, so is already as fast as it can be, and so
+ this setting has no effect. (On most x86s, the asm version is only
+ slightly faster than the C version.)
+
+malloc_getpagesize default: derive from system includes, or 4096.
+ The system page size. To the extent possible, this malloc manages
+ memory from the system in page-size units. This may be (and
+ usually is) a function rather than a constant. This is ignored
+ if WIN32, where page size is determined using getSystemInfo during
+ initialization.
+
+USE_DEV_RANDOM default: 0 (i.e., not used)
+ Causes malloc to use /dev/random to initialize secure magic seed for
+ stamping footers. Otherwise, the current time is used.
+
+NO_MALLINFO default: 0
+ If defined, don't compile "mallinfo". This can be a simple way
+ of dealing with mismatches between system declarations and
+ those in this file.
+
+MALLINFO_FIELD_TYPE default: size_t
+ The type of the fields in the mallinfo struct. This was originally
+ defined as "int" in SVID etc, but is more usefully defined as
+ size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
+
+REALLOC_ZERO_BYTES_FREES default: not defined
+ This should be set if a call to realloc with zero bytes should
+ be the same as a call to free. Some people think it should. Otherwise,
+ since this malloc returns a unique pointer for malloc(0), so does
+ realloc(p, 0).
+
+LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
+LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
+LACKS_STDLIB_H default: NOT defined unless on WIN32
+ Define these if your system does not have these header files.
+ You might need to manually insert some of the declarations they provide.
+
+DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
+ system_info.dwAllocationGranularity in WIN32,
+ otherwise 64K.
+ Also settable using mallopt(M_GRANULARITY, x)
+ The unit for allocating and deallocating memory from the system. On
+ most systems with contiguous MORECORE, there is no reason to
+ make this more than a page. However, systems with MMAP tend to
+ either require or encourage larger granularities. You can increase
+ this value to prevent system allocation functions to be called so
+ often, especially if they are slow. The value must be at least one
+ page and must be a power of two. Setting to 0 causes initialization
+ to either page size or win32 region size. (Note: In previous
+ versions of malloc, the equivalent of this option was called
+ "TOP_PAD")
+
+DEFAULT_TRIM_THRESHOLD default: 2MB
+ Also settable using mallopt(M_TRIM_THRESHOLD, x)
+ The maximum amount of unused top-most memory to keep before
+ releasing via malloc_trim in free(). Automatic trimming is mainly
+ useful in long-lived programs using contiguous MORECORE. Because
+ trimming via sbrk can be slow on some systems, and can sometimes be
+ wasteful (in cases where programs immediately afterward allocate
+ more large chunks) the value should be high enough so that your
+ overall system performance would improve by releasing this much
+ memory. As a rough guide, you might set to a value close to the
+ average size of a process (program) running on your system.
+ Releasing this much memory would allow such a process to run in
+ memory. Generally, it is worth tuning trim thresholds when a
+ program undergoes phases where several large chunks are allocated
+ and released in ways that can reuse each other's storage, perhaps
+ mixed with phases where there are no such chunks at all. The trim
+ value must be greater than page size to have any useful effect. To
+ disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
+ some people use of mallocing a huge space and then freeing it at
+ program startup, in an attempt to reserve system memory, doesn't
+ have the intended effect under automatic trimming, since that memory
+ will immediately be returned to the system.
+
+DEFAULT_MMAP_THRESHOLD default: 256K
+ Also settable using mallopt(M_MMAP_THRESHOLD, x)
+ The request size threshold for using MMAP to directly service a
+ request. Requests of at least this size that cannot be allocated
+ using already-existing space will be serviced via mmap. (If enough
+ normal freed space already exists it is used instead.) Using mmap
+ segregates relatively large chunks of memory so that they can be
+ individually obtained and released from the host system. A request
+ serviced through mmap is never reused by any other request (at least
+ not directly; the system may just so happen to remap successive
+ requests to the same locations). Segregating space in this way has
+ the benefits that: Mmapped space can always be individually released
+ back to the system, which helps keep the system level memory demands
+ of a long-lived program low. Also, mapped memory doesn't become
+ `locked' between other chunks, as can happen with normally allocated
+ chunks, which means that even trimming via malloc_trim would not
+ release them. However, it has the disadvantage that the space
+ cannot be reclaimed, consolidated, and then used to service later
+ requests, as happens with normal chunks. The advantages of mmap
+ nearly always outweigh disadvantages for "large" chunks, but the
+ value of "large" may vary across systems. The default is an
+ empirically derived value that works well in most systems. You can
+ disable mmap by setting to MAX_SIZE_T.
+
+*/
+
+#ifndef WIN32
+#ifdef _WIN32
+#define WIN32 1
+#endif /* _WIN32 */
+#endif /* WIN32 */
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include
+#define HAVE_MMAP 1
+#define HAVE_MORECORE 0
+#define LACKS_UNISTD_H
+#define LACKS_SYS_PARAM_H
+#define LACKS_SYS_MMAN_H
+#define LACKS_STRING_H
+#define LACKS_STRINGS_H
+#define LACKS_SYS_TYPES_H
+#define LACKS_ERRNO_H
+#define MALLOC_FAILURE_ACTION
+#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
+#endif /* WIN32 */
+
+#ifdef __OS2__
+#define INCL_DOS
+#include
+#define HAVE_MMAP 1
+#define HAVE_MORECORE 0
+#define LACKS_SYS_MMAN_H
+#endif /* __OS2__ */
+
+#if defined(DARWIN) || defined(_DARWIN)
+/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
+#ifndef HAVE_MORECORE
+#define HAVE_MORECORE 0
+#define HAVE_MMAP 1
+#endif /* HAVE_MORECORE */
+#endif /* DARWIN */
+
+#ifndef LACKS_SYS_TYPES_H
+#include /* For size_t */
+#endif /* LACKS_SYS_TYPES_H */
+
+/* The maximum possible size_t value has all bits set */
+#define MAX_SIZE_T (~(size_t)0)
+
+#ifndef ONLY_MSPACES
+#define ONLY_MSPACES 0
+#endif /* ONLY_MSPACES */
+#ifndef MSPACES
+#if ONLY_MSPACES
+#define MSPACES 1
+#else /* ONLY_MSPACES */
+#define MSPACES 0
+#endif /* ONLY_MSPACES */
+#endif /* MSPACES */
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT ((size_t)8U)
+#endif /* MALLOC_ALIGNMENT */
+#ifndef FOOTERS
+#define FOOTERS 0
+#endif /* FOOTERS */
+#ifndef ABORT
+#define ABORT abort()
+#endif /* ABORT */
+#ifndef ABORT_ON_ASSERT_FAILURE
+#define ABORT_ON_ASSERT_FAILURE 1
+#endif /* ABORT_ON_ASSERT_FAILURE */
+#ifndef PROCEED_ON_ERROR
+#define PROCEED_ON_ERROR 0
+#endif /* PROCEED_ON_ERROR */
+#ifndef USE_LOCKS
+#define USE_LOCKS 0
+#endif /* USE_LOCKS */
+#ifndef INSECURE
+#define INSECURE 0
+#endif /* INSECURE */
+#ifndef HAVE_MMAP
+#define HAVE_MMAP 1
+#endif /* HAVE_MMAP */
+#ifndef MMAP_CLEARS
+#define MMAP_CLEARS 1
+#endif /* MMAP_CLEARS */
+#ifndef HAVE_MREMAP
+#ifdef linux
+#define HAVE_MREMAP 1
+#else /* linux */
+#define HAVE_MREMAP 0
+#endif /* linux */
+#endif /* HAVE_MREMAP */
+#ifndef MALLOC_FAILURE_ACTION
+#define MALLOC_FAILURE_ACTION errno = ENOMEM;
+#endif /* MALLOC_FAILURE_ACTION */
+#ifndef HAVE_MORECORE
+#if ONLY_MSPACES
+#define HAVE_MORECORE 0
+#else /* ONLY_MSPACES */
+#define HAVE_MORECORE 1
+#endif /* ONLY_MSPACES */
+#endif /* HAVE_MORECORE */
+#if !HAVE_MORECORE
+#define MORECORE_CONTIGUOUS 0
+#else /* !HAVE_MORECORE */
+#ifndef MORECORE
+#define MORECORE sbrk
+#endif /* MORECORE */
+#ifndef MORECORE_CONTIGUOUS
+#define MORECORE_CONTIGUOUS 1
+#endif /* MORECORE_CONTIGUOUS */
+#endif /* HAVE_MORECORE */
+#ifndef DEFAULT_GRANULARITY
+#if MORECORE_CONTIGUOUS
+#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
+#else /* MORECORE_CONTIGUOUS */
+#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
+#endif /* MORECORE_CONTIGUOUS */
+#endif /* DEFAULT_GRANULARITY */
+#ifndef DEFAULT_TRIM_THRESHOLD
+#ifndef MORECORE_CANNOT_TRIM
+#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+#else /* MORECORE_CANNOT_TRIM */
+#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
+#endif /* MORECORE_CANNOT_TRIM */
+#endif /* DEFAULT_TRIM_THRESHOLD */
+#ifndef DEFAULT_MMAP_THRESHOLD
+#if HAVE_MMAP
+#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
+#else /* HAVE_MMAP */
+#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+#endif /* HAVE_MMAP */
+#endif /* DEFAULT_MMAP_THRESHOLD */
+#ifndef USE_BUILTIN_FFS
+#define USE_BUILTIN_FFS 0
+#endif /* USE_BUILTIN_FFS */
+#ifndef USE_DEV_RANDOM
+#define USE_DEV_RANDOM 0
+#endif /* USE_DEV_RANDOM */
+#ifndef NO_MALLINFO
+#define NO_MALLINFO 0
+#endif /* NO_MALLINFO */
+#ifndef MALLINFO_FIELD_TYPE
+#define MALLINFO_FIELD_TYPE size_t
+#endif /* MALLINFO_FIELD_TYPE */
+
+/*
+ mallopt tuning options. SVID/XPG defines four standard parameter
+ numbers for mallopt, normally defined in malloc.h. None of these
+ are used in this malloc, so setting them has no effect. But this
+ malloc does support the following options.
+*/
+
+#define M_TRIM_THRESHOLD (-1)
+#define M_GRANULARITY (-2)
+#define M_MMAP_THRESHOLD (-3)
+
+/* ------------------------ Mallinfo declarations ------------------------ */
+
+#if !NO_MALLINFO
+/*
+ This version of malloc supports the standard SVID/XPG mallinfo
+ routine that returns a struct containing usage properties and
+ statistics. It should work on any system that has a
+ /usr/include/malloc.h defining struct mallinfo. The main
+ declaration needed is the mallinfo struct that is returned (by-copy)
+ by mallinfo(). The malloinfo struct contains a bunch of fields that
+ are not even meaningful in this version of malloc. These fields are
+ are instead filled by mallinfo() with other numbers that might be of
+ interest.
+
+ HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
+ /usr/include/malloc.h file that includes a declaration of struct
+ mallinfo. If so, it is included; else a compliant version is
+ declared below. These must be precisely the same for mallinfo() to
+ work. The original SVID version of this struct, defined on most
+ systems with mallinfo, declares all fields as ints. But some others
+ define as unsigned long. If your system defines the fields using a
+ type of different width than listed here, you MUST #include your
+ system version and #define HAVE_USR_INCLUDE_MALLOC_H.
+*/
+
+/* #define HAVE_USR_INCLUDE_MALLOC_H */
+
+#ifdef HAVE_USR_INCLUDE_MALLOC_H
+#include "/usr/include/malloc.h"
+#else /* HAVE_USR_INCLUDE_MALLOC_H */
+
+/* HP-UX's stdlib.h redefines mallinfo unless _STRUCT_MALLINFO is defined */
+#define _STRUCT_MALLINFO
+
+struct mallinfo {
+ MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
+ MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
+ MALLINFO_FIELD_TYPE smblks; /* always 0 */
+ MALLINFO_FIELD_TYPE hblks; /* always 0 */
+ MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
+ MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
+ MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
+ MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
+ MALLINFO_FIELD_TYPE fordblks; /* total free space */
+ MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
+};
+
+#endif /* HAVE_USR_INCLUDE_MALLOC_H */
+#endif /* NO_MALLINFO */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#if !ONLY_MSPACES
+
+/* ------------------- Declarations of public routines ------------------- */
+
+#ifndef USE_DL_PREFIX
+#define dlcalloc calloc
+#define dlfree free
+#define dlmalloc malloc
+#define dlmemalign memalign
+#define dlrealloc realloc
+#define dlvalloc valloc
+#define dlpvalloc pvalloc
+#define dlmallinfo mallinfo
+#define dlmallopt mallopt
+#define dlmalloc_trim malloc_trim
+#define dlmalloc_stats malloc_stats
+#define dlmalloc_usable_size malloc_usable_size
+#define dlmalloc_footprint malloc_footprint
+#define dlmalloc_max_footprint malloc_max_footprint
+#define dlindependent_calloc independent_calloc
+#define dlindependent_comalloc independent_comalloc
+#endif /* USE_DL_PREFIX */
+
+
+/*
+ malloc(size_t n)
+ Returns a pointer to a newly allocated chunk of at least n bytes, or
+ null if no space is available, in which case errno is set to ENOMEM
+ on ANSI C systems.
+
+ If n is zero, malloc returns a minimum-sized chunk. (The minimum
+ size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
+ systems.) Note that size_t is an unsigned type, so calls with
+ arguments that would be negative if signed are interpreted as
+ requests for huge amounts of space, which will often fail. The
+ maximum supported value of n differs across systems, but is in all
+ cases less than the maximum representable value of a size_t.
+*/
+void* dlmalloc(size_t);
+
+/*
+ free(void* p)
+ Releases the chunk of memory pointed to by p, that had been previously
+ allocated using malloc or a related routine such as realloc.
+ It has no effect if p is null. If p was not malloced or already
+ freed, free(p) will by default cause the current program to abort.
+*/
+void dlfree(void*);
+
+/*
+ calloc(size_t n_elements, size_t element_size);
+ Returns a pointer to n_elements * element_size bytes, with all locations
+ set to zero.
+*/
+void* dlcalloc(size_t, size_t);
+
+/*
+ realloc(void* p, size_t n)
+ Returns a pointer to a chunk of size n that contains the same data
+ as does chunk p up to the minimum of (n, p's size) bytes, or null
+ if no space is available.
+
+ The returned pointer may or may not be the same as p. The algorithm
+ prefers extending p in most cases when possible, otherwise it
+ employs the equivalent of a malloc-copy-free sequence.
+
+ If p is null, realloc is equivalent to malloc.
+
+ If space is not available, realloc returns null, errno is set (if on
+ ANSI) and p is NOT freed.
+
+ if n is for fewer bytes than already held by p, the newly unused
+ space is lopped off and freed if possible. realloc with a size
+ argument of zero (re)allocates a minimum-sized chunk.
+
+ The old unix realloc convention of allowing the last-free'd chunk
+ to be used as an argument to realloc is not supported.
+*/
+
+void* dlrealloc(void*, size_t);
+
+/*
+ memalign(size_t alignment, size_t n);
+ Returns a pointer to a newly allocated chunk of n bytes, aligned
+ in accord with the alignment argument.
+
+ The alignment argument should be a power of two. If the argument is
+ not a power of two, the nearest greater power is used.
+ 8-byte alignment is guaranteed by normal malloc calls, so don't
+ bother calling memalign with an argument of 8 or less.
+
+ Overreliance on memalign is a sure way to fragment space.
+*/
+void* dlmemalign(size_t, size_t);
+
+/*
+ valloc(size_t n);
+ Equivalent to memalign(pagesize, n), where pagesize is the page
+ size of the system. If the pagesize is unknown, 4096 is used.
+*/
+void* dlvalloc(size_t);
+
+/*
+ mallopt(int parameter_number, int parameter_value)
+ Sets tunable parameters The format is to provide a
+ (parameter-number, parameter-value) pair. mallopt then sets the
+ corresponding parameter to the argument value if it can (i.e., so
+ long as the value is meaningful), and returns 1 if successful else
+ 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
+ normally defined in malloc.h. None of these are use in this malloc,
+ so setting them has no effect. But this malloc also supports other
+ options in mallopt. See below for details. Briefly, supported
+ parameters are as follows (listed defaults are for "typical"
+ configurations).
+
+ Symbol param # default allowed param values
+ M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables)
+ M_GRANULARITY -2 page size any power of 2 >= page size
+ M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
+*/
+int dlmallopt(int, int);
+
+/*
+ malloc_footprint();
+ Returns the number of bytes obtained from the system. The total
+ number of bytes allocated by malloc, realloc etc., is less than this
+ value. Unlike mallinfo, this function returns only a precomputed
+ result, so can be called frequently to monitor memory consumption.
+ Even if locks are otherwise defined, this function does not use them,
+ so results might not be up to date.
+*/
+size_t dlmalloc_footprint(void);
+
+/*
+ malloc_max_footprint();
+ Returns the maximum number of bytes obtained from the system. This
+ value will be greater than current footprint if deallocated space
+ has been reclaimed by the system. The peak number of bytes allocated
+ by malloc, realloc etc., is less than this value. Unlike mallinfo,
+ this function returns only a precomputed result, so can be called
+ frequently to monitor memory consumption. Even if locks are
+ otherwise defined, this function does not use them, so results might
+ not be up to date.
+*/
+size_t dlmalloc_max_footprint(void);
+
+#if !NO_MALLINFO
+/*
+ mallinfo()
+ Returns (by copy) a struct containing various summary statistics:
+
+ arena: current total non-mmapped bytes allocated from system
+ ordblks: the number of free chunks
+ smblks: always zero.
+ hblks: current number of mmapped regions
+ hblkhd: total bytes held in mmapped regions
+ usmblks: the maximum total allocated space. This will be greater
+ than current total if trimming has occurred.
+ fsmblks: always zero
+ uordblks: current total allocated space (normal or mmapped)
+ fordblks: total free space
+ keepcost: the maximum number of bytes that could ideally be released
+ back to system via malloc_trim. ("ideally" means that
+ it ignores page restrictions etc.)
+
+ Because these fields are ints, but internal bookkeeping may
+ be kept as longs, the reported values may wrap around zero and
+ thus be inaccurate.
+*/
+struct mallinfo dlmallinfo(void);
+#endif /* NO_MALLINFO */
+
+/*
+ independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
+
+ independent_calloc is similar to calloc, but instead of returning a
+ single cleared space, it returns an array of pointers to n_elements
+ independent elements that can hold contents of size elem_size, each
+ of which starts out cleared, and can be independently freed,
+ realloc'ed etc. The elements are guaranteed to be adjacently
+ allocated (this is not guaranteed to occur with multiple callocs or
+ mallocs), which may also improve cache locality in some
+ applications.
+
+ The "chunks" argument is optional (i.e., may be null, which is
+ probably the most typical usage). If it is null, the returned array
+ is itself dynamically allocated and should also be freed when it is
+ no longer needed. Otherwise, the chunks array must be of at least
+ n_elements in length. It is filled in with the pointers to the
+ chunks.
+
+ In either case, independent_calloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and "chunks"
+ is null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be individually freed when it is no longer
+ needed. If you'd like to instead be able to free all at once, you
+ should instead use regular calloc and assign pointers into this
+ space to represent elements. (In this case though, you cannot
+ independently free elements.)
+
+ independent_calloc simplifies and speeds up implementations of many
+ kinds of pools. It may also be useful when constructing large data
+ structures that initially have a fixed number of fixed-sized nodes,
+ but the number is not known at compile time, and some of the nodes
+ may later need to be freed. For example:
+
+ struct Node { int item; struct Node* next; };
+
+ struct Node* build_list() {
+ struct Node** pool;
+ int n = read_number_of_nodes_needed();
+ if (n <= 0) return 0;
+ pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
+ if (pool == 0) die();
+ // organize into a linked list...
+ struct Node* first = pool[0];
+ for (i = 0; i < n-1; ++i)
+ pool[i]->next = pool[i+1];
+ free(pool); // Can now free the array (or not, if it is needed later)
+ return first;
+ }
+*/
+void** dlindependent_calloc(size_t, size_t, void**);
+
+/*
+ independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
+
+ independent_comalloc allocates, all at once, a set of n_elements
+ chunks with sizes indicated in the "sizes" array. It returns
+ an array of pointers to these elements, each of which can be
+ independently freed, realloc'ed etc. The elements are guaranteed to
+ be adjacently allocated (this is not guaranteed to occur with
+ multiple callocs or mallocs), which may also improve cache locality
+ in some applications.
+
+ The "chunks" argument is optional (i.e., may be null). If it is null
+ the returned array is itself dynamically allocated and should also
+ be freed when it is no longer needed. Otherwise, the chunks array
+ must be of at least n_elements in length. It is filled in with the
+ pointers to the chunks.
+
+ In either case, independent_comalloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and chunks is
+ null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be individually freed when it is no longer
+ needed. If you'd like to instead be able to free all at once, you
+ should instead use a single regular malloc, and assign pointers at
+ particular offsets in the aggregate space. (In this case though, you
+ cannot independently free elements.)
+
+ independent_comallac differs from independent_calloc in that each
+ element may have a different size, and also that it does not
+ automatically clear elements.
+
+ independent_comalloc can be used to speed up allocation in cases
+ where several structs or objects must always be allocated at the
+ same time. For example:
+
+ struct Head { ... }
+ struct Foot { ... }
+
+ void send_message(char* msg) {
+ int msglen = strlen(msg);
+ size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
+ void* chunks[3];
+ if (independent_comalloc(3, sizes, chunks) == 0)
+ die();
+ struct Head* head = (struct Head*)(chunks[0]);
+ char* body = (char*)(chunks[1]);
+ struct Foot* foot = (struct Foot*)(chunks[2]);
+ // ...
+ }
+
+ In general though, independent_comalloc is worth using only for
+ larger values of n_elements. For small values, you probably won't
+ detect enough difference from series of malloc calls to bother.
+
+ Overuse of independent_comalloc can increase overall memory usage,
+ since it cannot reuse existing noncontiguous small chunks that
+ might be available for some of the elements.
+*/
+void** dlindependent_comalloc(size_t, size_t*, void**);
+
+
+/*
+ pvalloc(size_t n);
+ Equivalent to valloc(minimum-page-that-holds(n)), that is,
+ round up n to nearest pagesize.
+ */
+void* dlpvalloc(size_t);
+
+/*
+ malloc_trim(size_t pad);
+
+ If possible, gives memory back to the system (via negative arguments
+ to sbrk) if there is unused memory at the `high' end of the malloc
+ pool or in unused MMAP segments. You can call this after freeing
+ large blocks of memory to potentially reduce the system-level memory
+ requirements of a program. However, it cannot guarantee to reduce
+ memory. Under some allocation patterns, some large free blocks of
+ memory will be locked between two used chunks, so they cannot be
+ given back to the system.
+
+ The `pad' argument to malloc_trim represents the amount of free
+ trailing space to leave untrimmed. If this argument is zero, only
+ the minimum amount of memory to maintain internal data structures
+ will be left. Non-zero arguments can be supplied to maintain enough
+ trailing space to service future expected allocations without having
+ to re-obtain memory from the system.
+
+ Malloc_trim returns 1 if it actually released any memory, else 0.
+*/
+int dlmalloc_trim(size_t);
+
+/*
+ malloc_usable_size(void* p);
+
+ Returns the number of bytes you can actually use in
+ an allocated chunk, which may be more than you requested (although
+ often not) due to alignment and minimum size constraints.
+ You can use this many bytes without worrying about
+ overwriting other allocated objects. This is not a particularly great
+ programming practice. malloc_usable_size can be more useful in
+ debugging and assertions, for example:
+
+ p = malloc(n);
+ assert(malloc_usable_size(p) >= 256);
+*/
+size_t dlmalloc_usable_size(void*);
+
+/*
+ malloc_stats();
+ Prints on stderr the amount of space obtained from the system (both
+ via sbrk and mmap), the maximum amount (which may be more than
+ current if malloc_trim and/or munmap got called), and the current
+ number of bytes allocated via malloc (or realloc, etc) but not yet
+ freed. Note that this is the number of bytes allocated, not the
+ number requested. It will be larger than the number requested
+ because of alignment and bookkeeping overhead. Because it includes
+ alignment wastage as being in use, this figure may be greater than
+ zero even when no user-level chunks are allocated.
+
+ The reported current and maximum system memory can be inaccurate if
+ a program makes other calls to system memory allocation functions
+ (normally sbrk) outside of malloc.
+
+ malloc_stats prints only the most commonly interesting statistics.
+ More information can be obtained by calling mallinfo.
+*/
+void dlmalloc_stats(void);
+
+#endif /* ONLY_MSPACES */
+
+#if MSPACES
+
+/*
+ mspace is an opaque type representing an independent
+ region of space that supports mspace_malloc, etc.
+*/
+typedef void* mspace;
+
+/*
+ create_mspace creates and returns a new independent space with the
+ given initial capacity, or, if 0, the default granularity size. It
+ returns null if there is no system memory available to create the
+ space. If argument locked is non-zero, the space uses a separate
+ lock to control access. The capacity of the space will grow
+ dynamically as needed to service mspace_malloc requests. You can
+ control the sizes of incremental increases of this space by
+ compiling with a different DEFAULT_GRANULARITY or dynamically
+ setting with mallopt(M_GRANULARITY, value).
+*/
+mspace create_mspace(size_t capacity, int locked);
+
+/*
+ destroy_mspace destroys the given space, and attempts to return all
+ of its memory back to the system, returning the total number of
+ bytes freed. After destruction, the results of access to all memory
+ used by the space become undefined.
+*/
+size_t destroy_mspace(mspace msp);
+
+/*
+ create_mspace_with_base uses the memory supplied as the initial base
+ of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
+ space is used for bookkeeping, so the capacity must be at least this
+ large. (Otherwise 0 is returned.) When this initial space is
+ exhausted, additional memory will be obtained from the system.
+ Destroying this space will deallocate all additionally allocated
+ space (if possible) but not the initial base.
+*/
+mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+
+/*
+ mspace_malloc behaves as malloc, but operates within
+ the given space.
+*/
+void* mspace_malloc(mspace msp, size_t bytes);
+
+/*
+ mspace_free behaves as free, but operates within
+ the given space.
+
+ If compiled with FOOTERS==1, mspace_free is not actually needed.
+ free may be called instead of mspace_free because freed chunks from
+ any space are handled by their originating spaces.
+*/
+void mspace_free(mspace msp, void* mem);
+
+/*
+ mspace_realloc behaves as realloc, but operates within
+ the given space.
+
+ If compiled with FOOTERS==1, mspace_realloc is not actually
+ needed. realloc may be called instead of mspace_realloc because
+ realloced chunks from any space are handled by their originating
+ spaces.
+*/
+void* mspace_realloc(mspace msp, void* mem, size_t newsize);
+
+/*
+ mspace_calloc behaves as calloc, but operates within
+ the given space.
+*/
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+
+/*
+ mspace_memalign behaves as memalign, but operates within
+ the given space.
+*/
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
+
+/*
+ mspace_independent_calloc behaves as independent_calloc, but
+ operates within the given space.
+*/
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+ size_t elem_size, void* chunks[]);
+
+/*
+ mspace_independent_comalloc behaves as independent_comalloc, but
+ operates within the given space.
+*/
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+ size_t sizes[], void* chunks[]);
+
+/*
+ mspace_footprint() returns the number of bytes obtained from the
+ system for this space.
+*/
+size_t mspace_footprint(mspace msp);
+
+/*
+ mspace_max_footprint() returns the peak number of bytes obtained from the
+ system for this space.
+*/
+size_t mspace_max_footprint(mspace msp);
+
+
+#if !NO_MALLINFO
+/*
+ mspace_mallinfo behaves as mallinfo, but reports properties of
+ the given space.
+*/
+struct mallinfo mspace_mallinfo(mspace msp);
+#endif /* NO_MALLINFO */
+
+/*
+ mspace_malloc_stats behaves as malloc_stats, but reports
+ properties of the given space.
+*/
+void mspace_malloc_stats(mspace msp);
+
+/*
+ mspace_trim behaves as malloc_trim, but
+ operates within the given space.
+*/
+int mspace_trim(mspace msp, size_t pad);
+
+/*
+ An alias for mallopt.
+*/
+int mspace_mallopt(int, int);
+
+#endif /* MSPACES */
+
+#ifdef __cplusplus
+}; /* end of extern "C" */
+#endif /* __cplusplus */
+
+/*
+ ========================================================================
+ To make a fully customizable malloc.h header file, cut everything
+ above this line, put into file malloc.h, edit to suit, and #include it
+ on the next line, as well as in programs that use this malloc.
+ ========================================================================
+*/
+
+/* #include "malloc.h" */
+
+/*------------------------------ internal #includes ---------------------- */
+
+#ifdef _MSC_VER
+#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
+#endif /* _MSC_VER */
+
+#include /* for printing in malloc_stats */
+
+#ifndef LACKS_ERRNO_H
+#include /* for MALLOC_FAILURE_ACTION */
+#endif /* LACKS_ERRNO_H */
+#if FOOTERS
+#include /* for magic initialization */
+#endif /* FOOTERS */
+#ifndef LACKS_STDLIB_H
+#include /* for abort() */
+#endif /* LACKS_STDLIB_H */
+#ifdef DEBUG
+#if ABORT_ON_ASSERT_FAILURE
+#define assert(x) if(!(x)) ABORT
+#else /* ABORT_ON_ASSERT_FAILURE */
+#include
+#endif /* ABORT_ON_ASSERT_FAILURE */
+#else /* DEBUG */
+#define assert(x)
+#endif /* DEBUG */
+#ifndef LACKS_STRING_H
+#include /* for memset etc */
+#endif /* LACKS_STRING_H */
+#if USE_BUILTIN_FFS
+#ifndef LACKS_STRINGS_H
+#include /* for ffs */
+#endif /* LACKS_STRINGS_H */
+#endif /* USE_BUILTIN_FFS */
+#if HAVE_MMAP
+#ifndef LACKS_SYS_MMAN_H
+#include /* for mmap */
+#endif /* LACKS_SYS_MMAN_H */
+#ifndef LACKS_FCNTL_H
+#include
+#endif /* LACKS_FCNTL_H */
+#endif /* HAVE_MMAP */
+#if HAVE_MORECORE
+#ifndef LACKS_UNISTD_H
+#include /* for sbrk */
+#else /* LACKS_UNISTD_H */
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+extern void* sbrk(ptrdiff_t);
+#endif /* FreeBSD etc */
+#endif /* LACKS_UNISTD_H */
+#endif /* HAVE_MMAP */
+
+#ifndef WIN32
+#ifndef malloc_getpagesize
+# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
+# ifndef _SC_PAGE_SIZE
+# define _SC_PAGE_SIZE _SC_PAGESIZE
+# endif
+# endif
+# ifdef _SC_PAGE_SIZE
+# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
+# else
+# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+ extern size_t getpagesize();
+# define malloc_getpagesize getpagesize()
+# else
+# ifdef WIN32 /* use supplied emulation of getpagesize */
+# define malloc_getpagesize getpagesize()
+# else
+# ifndef LACKS_SYS_PARAM_H
+# include
+# endif
+# ifdef EXEC_PAGESIZE
+# define malloc_getpagesize EXEC_PAGESIZE
+# else
+# ifdef NBPG
+# ifndef CLSIZE
+# define malloc_getpagesize NBPG
+# else
+# define malloc_getpagesize (NBPG * CLSIZE)
+# endif
+# else
+# ifdef NBPC
+# define malloc_getpagesize NBPC
+# else
+# ifdef PAGESIZE
+# define malloc_getpagesize PAGESIZE
+# else /* just guess */
+# define malloc_getpagesize ((size_t)4096U)
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+#endif
+#endif
+
+/* ------------------- size_t and alignment properties -------------------- */
+
+/* The byte and bit size of a size_t */
+#define SIZE_T_SIZE (sizeof(size_t))
+#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
+
+/* Some constants coerced to size_t */
+/* Annoying but necessary to avoid errors on some platforms */
+#define SIZE_T_ZERO ((size_t)0)
+#define SIZE_T_ONE ((size_t)1)
+#define SIZE_T_TWO ((size_t)2)
+#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
+
+/* The bit mask value corresponding to MALLOC_ALIGNMENT */
+#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
+
+/* True if address a has acceptable alignment */
+#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+
+/* the number of bytes to offset an address to align it */
+#define align_offset(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+ ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
+
+/* -------------------------- MMAP preliminaries ------------------------- */
+
+/*
+ If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
+ checks to fail so compiler optimizer can delete code rather than
+ using so many "#if"s.
+*/
+
+
+/* MORECORE and MMAP must return MFAIL on failure */
+#define MFAIL ((void*)(MAX_SIZE_T))
+#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
+
+#if !HAVE_MMAP
+#define IS_MMAPPED_BIT (SIZE_T_ZERO)
+#define USE_MMAP_BIT (SIZE_T_ZERO)
+#define CALL_MMAP(s) MFAIL
+#define CALL_MUNMAP(a, s) (-1)
+#define DIRECT_MMAP(s) MFAIL
+
+#else /* HAVE_MMAP */
+#define IS_MMAPPED_BIT (SIZE_T_ONE)
+#define USE_MMAP_BIT (SIZE_T_ONE)
+
+#if !defined(WIN32) && !defined (__OS2__)
+#define CALL_MUNMAP(a, s) munmap((a), (s))
+#define MMAP_PROT (PROT_READ|PROT_WRITE)
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+#define MAP_ANONYMOUS MAP_ANON
+#endif /* MAP_ANON */
+#ifdef MAP_ANONYMOUS
+#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
+#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
+#else /* MAP_ANONYMOUS */
+/*
+ Nearly all versions of mmap support MAP_ANONYMOUS, so the following
+ is unlikely to be needed, but is supplied just in case.
+*/
+#define MMAP_FLAGS (MAP_PRIVATE)
+static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
+ (dev_zero_fd = open("/dev/zero", O_RDWR), \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
+#endif /* MAP_ANONYMOUS */
+
+#define DIRECT_MMAP(s) CALL_MMAP(s)
+
+#elif defined(__OS2__)
+
+/* OS/2 MMAP via DosAllocMem */
+static void* os2mmap(size_t size) {
+ void* ptr;
+ if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) &&
+ DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE))
+ return MFAIL;
+ return ptr;
+}
+
+#define os2direct_mmap(n) os2mmap(n)
+
+/* This function supports releasing coalesed segments */
+static int os2munmap(void* ptr, size_t size) {
+ while (size) {
+ ULONG ulSize = size;
+ ULONG ulFlags = 0;
+ if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0)
+ return -1;
+ if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 ||
+ ulSize > size)
+ return -1;
+ if (DosFreeMem(ptr) != 0)
+ return -1;
+ ptr = ( void * ) ( ( char * ) ptr + ulSize );
+ size -= ulSize;
+ }
+ return 0;
+}
+
+#define CALL_MMAP(s) os2mmap(s)
+#define CALL_MUNMAP(a, s) os2munmap((a), (s))
+#define DIRECT_MMAP(s) os2direct_mmap(s)
+
+#else /* WIN32 */
+
+/* Win32 MMAP via VirtualAlloc */
+static void* win32mmap(size_t size) {
+ void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE);
+ return (ptr != 0)? ptr: MFAIL;
+}
+
+/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+static void* win32direct_mmap(size_t size) {
+ void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
+ PAGE_EXECUTE_READWRITE);
+ return (ptr != 0)? ptr: MFAIL;
+}
+
+/* This function supports releasing coalesed segments */
+static int win32munmap(void* ptr, size_t size) {
+ MEMORY_BASIC_INFORMATION minfo;
+ char* cptr = ptr;
+ while (size) {
+ if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
+ return -1;
+ if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
+ minfo.State != MEM_COMMIT || minfo.RegionSize > size)
+ return -1;
+ if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
+ return -1;
+ cptr += minfo.RegionSize;
+ size -= minfo.RegionSize;
+ }
+ return 0;
+}
+
+#define CALL_MMAP(s) win32mmap(s)
+#define CALL_MUNMAP(a, s) win32munmap((a), (s))
+#define DIRECT_MMAP(s) win32direct_mmap(s)
+#endif /* WIN32 */
+#endif /* HAVE_MMAP */
+
+#if HAVE_MMAP && HAVE_MREMAP
+#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
+#else /* HAVE_MMAP && HAVE_MREMAP */
+#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
+#endif /* HAVE_MMAP && HAVE_MREMAP */
+
+#if HAVE_MORECORE
+#define CALL_MORECORE(S) MORECORE(S)
+#else /* HAVE_MORECORE */
+#define CALL_MORECORE(S) MFAIL
+#endif /* HAVE_MORECORE */
+
+/* mstate bit set if contiguous morecore disabled or failed */
+#define USE_NONCONTIGUOUS_BIT (4U)
+
+/* segment bit set in create_mspace_with_base */
+#define EXTERN_BIT (8U)
+
+
+/* --------------------------- Lock preliminaries ------------------------ */
+
+#if USE_LOCKS
+
+/*
+ When locks are defined, there are up to two global locks:
+
+ * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
+ MORECORE. In many cases sys_alloc requires two calls, that should
+ not be interleaved with calls by other threads. This does not
+ protect against direct calls to MORECORE by other threads not
+ using this lock, so there is still code to cope the best we can on
+ interference.
+
+ * magic_init_mutex ensures that mparams.magic and other
+ unique mparams values are initialized only once.
+*/
+
+#if !defined(WIN32) && !defined(__OS2__)
+/* By default use posix locks */
+#include
+#define MLOCK_T pthread_mutex_t
+#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
+#define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
+#define RELEASE_LOCK(l) pthread_mutex_unlock(l)
+
+#if HAVE_MORECORE
+static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif /* HAVE_MORECORE */
+
+static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+#elif defined(__OS2__)
+#define MLOCK_T HMTX
+#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE)
+#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT)
+#define RELEASE_LOCK(l) DosReleaseMutexSem(*l)
+#if HAVE_MORECORE
+static MLOCK_T morecore_mutex;
+#endif /* HAVE_MORECORE */
+static MLOCK_T magic_init_mutex;
+
+#else /* WIN32 */
+/*
+ Because lock-protected regions have bounded times, and there
+ are no recursive lock calls, we can use simple spinlocks.
+*/
+
+#define MLOCK_T long
+static int win32_acquire_lock (MLOCK_T *sl) {
+ for (;;) {
+#ifdef InterlockedCompareExchangePointer
+ if (!InterlockedCompareExchange(sl, 1, 0))
+ return 0;
+#else /* Use older void* version */
+ if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
+ return 0;
+#endif /* InterlockedCompareExchangePointer */
+ Sleep (0);
+ }
+}
+
+static void win32_release_lock (MLOCK_T *sl) {
+ InterlockedExchange (sl, 0);
+}
+
+#define INITIAL_LOCK(l) *(l)=0
+#define ACQUIRE_LOCK(l) win32_acquire_lock(l)
+#define RELEASE_LOCK(l) win32_release_lock(l)
+#if HAVE_MORECORE
+static MLOCK_T morecore_mutex;
+#endif /* HAVE_MORECORE */
+static MLOCK_T magic_init_mutex;
+#endif /* WIN32 */
+
+#define USE_LOCK_BIT (2U)
+#else /* USE_LOCKS */
+#define USE_LOCK_BIT (0U)
+#define INITIAL_LOCK(l)
+#endif /* USE_LOCKS */
+
+#if USE_LOCKS && HAVE_MORECORE
+#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
+#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
+#else /* USE_LOCKS && HAVE_MORECORE */
+#define ACQUIRE_MORECORE_LOCK()
+#define RELEASE_MORECORE_LOCK()
+#endif /* USE_LOCKS && HAVE_MORECORE */
+
+#if USE_LOCKS
+#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
+#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
+#else /* USE_LOCKS */
+#define ACQUIRE_MAGIC_INIT_LOCK()
+#define RELEASE_MAGIC_INIT_LOCK()
+#endif /* USE_LOCKS */
+
+
+/* ----------------------- Chunk representations ------------------------ */
+
+/*
+ (The following includes lightly edited explanations by Colin Plumb.)
+
+ The malloc_chunk declaration below is misleading (but accurate and
+ necessary). It declares a "view" into memory allowing access to
+ necessary fields at known offsets from a given base.
+
+ Chunks of memory are maintained using a `boundary tag' method as
+ originally described by Knuth. (See the paper by Paul Wilson
+ ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
+ techniques.) Sizes of free chunks are stored both in the front of
+ each chunk and at the end. This makes consolidating fragmented
+ chunks into bigger chunks fast. The head fields also hold bits
+ representing whether chunks are free or in use.
+
+ Here are some pictures to make it clearer. They are "exploded" to
+ show that the state of a chunk can be thought of as extending from
+ the high 31 bits of the head field of its header through the
+ prev_foot and PINUSE_BIT bit of the following chunk header.
+
+ A chunk that's in use looks like:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk (if P = 1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+ | Size of this chunk 1| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ +- -+
+ | |
+ +- -+
+ | :
+ +- size - sizeof(size_t) available payload bytes -+
+ : |
+ chunk-> +- -+
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
+ | Size of next chunk (may or may not be in use) | +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ And if it's free, it looks like this:
+
+ chunk-> +- -+
+ | User payload (must be in use, or we would have merged!) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+ | Size of this chunk 0| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Next pointer |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Prev pointer |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | :
+ +- size - sizeof(struct chunk) unused bytes -+
+ : |
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of this chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
+ | Size of next chunk (must be in use, or we would have merged)| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | :
+ +- User payload -+
+ : |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |0|
+ +-+
+ Note that since we always merge adjacent free chunks, the chunks
+ adjacent to a free chunk must be in use.
+
+ Given a pointer to a chunk (which can be derived trivially from the
+ payload pointer) we can, in O(1) time, find out whether the adjacent
+ chunks are free, and if so, unlink them from the lists that they
+ are on and merge them with the current chunk.
+
+ Chunks always begin on even word boundaries, so the mem portion
+ (which is returned to the user) is also on an even word boundary, and
+ thus at least double-word aligned.
+
+ The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
+ chunk size (which is always a multiple of two words), is an in-use
+ bit for the *previous* chunk. If that bit is *clear*, then the
+ word before the current chunk size contains the previous chunk
+ size, and can be used to find the front of the previous chunk.
+ The very first chunk allocated always has this bit set, preventing
+ access to non-existent (or non-owned) memory. If pinuse is set for
+ any given chunk, then you CANNOT determine the size of the
+ previous chunk, and might even get a memory addressing fault when
+ trying to do so.
+
+ The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
+ the chunk size redundantly records whether the current chunk is
+ inuse. This redundancy enables usage checks within free and realloc,
+ and reduces indirection when freeing and consolidating chunks.
+
+ Each freshly allocated chunk must have both cinuse and pinuse set.
+ That is, each allocated chunk borders either a previously allocated
+ and still in-use chunk, or the base of its memory arena. This is
+ ensured by making all allocations from the the `lowest' part of any
+ found chunk. Further, no free chunk physically borders another one,
+ so each free chunk is known to be preceded and followed by either
+ inuse chunks or the ends of memory.
+
+ Note that the `foot' of the current chunk is actually represented
+ as the prev_foot of the NEXT chunk. This makes it easier to
+ deal with alignments etc but can be very confusing when trying
+ to extend or adapt this code.
+
+ The exceptions to all this are
+
+ 1. The special chunk `top' is the top-most available chunk (i.e.,
+ the one bordering the end of available memory). It is treated
+ specially. Top is never included in any bin, is used only if
+ no other chunk is available, and is released back to the
+ system if it is very large (see M_TRIM_THRESHOLD). In effect,
+ the top chunk is treated as larger (and thus less well
+ fitting) than any other available chunk. The top chunk
+ doesn't update its trailing size field since there is no next
+ contiguous chunk that would have to index off it. However,
+ space is still allocated for it (TOP_FOOT_SIZE) to enable
+ separation or merging when space is extended.
+
+ 3. Chunks allocated via mmap, which have the lowest-order bit
+ (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
+ PINUSE_BIT in their head fields. Because they are allocated
+ one-by-one, each must carry its own prev_foot field, which is
+ also used to hold the offset this chunk has within its mmapped
+ region, which is needed to preserve alignment. Each mmapped
+ chunk is trailed by the first two fields of a fake next-chunk
+ for sake of usage checks.
+
+*/
+
+struct malloc_chunk {
+ size_t prev_foot; /* Size of previous chunk (if free). */
+ size_t head; /* Size and inuse bits. */
+ struct malloc_chunk* fd; /* double links -- used only if free. */
+ struct malloc_chunk* bk;
+};
+
+typedef struct malloc_chunk mchunk;
+typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
+typedef size_t bindex_t; /* Described below */
+typedef unsigned int binmap_t; /* Described below */
+typedef unsigned int flag_t; /* The type of various bit flag sets */
+
+/* ------------------- Chunks sizes and alignments ----------------------- */
+
+#define MCHUNK_SIZE (sizeof(mchunk))
+
+#if FOOTERS
+#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+#else /* FOOTERS */
+#define CHUNK_OVERHEAD (SIZE_T_SIZE)
+#endif /* FOOTERS */
+
+/* MMapped chunks need a second word of overhead ... */
+#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+/* ... and additional padding for fake next-chunk at foot */
+#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+#define MIN_CHUNK_SIZE\
+ ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* conversion from malloc headers to user pointers, and back */
+#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
+#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
+/* chunk associated with aligned address A */
+#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
+
+/* Bounds on request (not chunk) sizes. */
+#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
+#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
+
+/* pad request bytes into a usable size */
+#define pad_request(req) \
+ (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* pad request, checking for minimum (but not maximum) */
+#define request2size(req) \
+ (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
+
+
+/* ------------------ Operations on head and foot fields ----------------- */
+
+/*
+ The head field of a chunk is or'ed with PINUSE_BIT when previous
+ adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
+ use. If the chunk was obtained with mmap, the prev_foot field has
+ IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
+ mmapped region to the base of the chunk.
+*/
+
+#define PINUSE_BIT (SIZE_T_ONE)
+#define CINUSE_BIT (SIZE_T_TWO)
+#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
+
+/* Head value for fenceposts */
+#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
+
+/* extraction of fields from head words */
+#define cinuse(p) ((p)->head & CINUSE_BIT)
+#define pinuse(p) ((p)->head & PINUSE_BIT)
+#define chunksize(p) ((p)->head & ~(INUSE_BITS))
+
+#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
+#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
+
+/* Treat space at ptr +/- offset as a chunk */
+#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
+#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
+
+/* Ptr to next or previous physical malloc_chunk. */
+#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
+#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
+
+/* extract next chunk's pinuse bit */
+#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
+
+/* Get/set size at footer */
+#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
+#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
+
+/* Set size, pinuse bit, and foot */
+#define set_size_and_pinuse_of_free_chunk(p, s)\
+ ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
+
+/* Set size, pinuse bit, foot, and clear next pinuse */
+#define set_free_with_pinuse(p, s, n)\
+ (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
+
+#define is_mmapped(p)\
+ (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
+
+/* Get the internal overhead associated with chunk p */
+#define overhead_for(p)\
+ (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
+
+/* Return true if malloced space is not necessarily cleared */
+#if MMAP_CLEARS
+#define calloc_must_clear(p) (!is_mmapped(p))
+#else /* MMAP_CLEARS */
+#define calloc_must_clear(p) (1)
+#endif /* MMAP_CLEARS */
+
+/* ---------------------- Overlaid data structures ----------------------- */
+
+/*
+ When chunks are not in use, they are treated as nodes of either
+ lists or trees.
+
+ "Small" chunks are stored in circular doubly-linked lists, and look
+ like this:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `head:' | Size of chunk, in bytes |P|
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Forward pointer to next chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Back pointer to previous chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Unused space (may be 0 bytes long) .
+ . .
+ . |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `foot:' | Size of chunk, in bytes |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Larger chunks are kept in a form of bitwise digital trees (aka
+ tries) keyed on chunksizes. Because malloc_tree_chunks are only for
+ free chunks greater than 256 bytes, their size doesn't impose any
+ constraints on user chunk sizes. Each node looks like:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `head:' | Size of chunk, in bytes |P|
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Forward pointer to next chunk of same size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Back pointer to previous chunk of same size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Pointer to left child (child[0]) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Pointer to right child (child[1]) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Pointer to parent |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | bin index of this chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Unused space .
+ . |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `foot:' | Size of chunk, in bytes |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Each tree holding treenodes is a tree of unique chunk sizes. Chunks
+ of the same size are arranged in a circularly-linked list, with only
+ the oldest chunk (the next to be used, in our FIFO ordering)
+ actually in the tree. (Tree members are distinguished by a non-null
+ parent pointer.) If a chunk with the same size an an existing node
+ is inserted, it is linked off the existing node using pointers that
+ work in the same way as fd/bk pointers of small chunks.
+
+ Each tree contains a power of 2 sized range of chunk sizes (the
+ smallest is 0x100 <= x < 0x180), which is is divided in half at each
+ tree level, with the chunks in the smaller half of the range (0x100
+ <= x < 0x140 for the top nose) in the left subtree and the larger
+ half (0x140 <= x < 0x180) in the right subtree. This is, of course,
+ done by inspecting individual bits.
+
+ Using these rules, each node's left subtree contains all smaller
+ sizes than its right subtree. However, the node at the root of each
+ subtree has no particular ordering relationship to either. (The
+ dividing line between the subtree sizes is based on trie relation.)
+ If we remove the last chunk of a given size from the interior of the
+ tree, we need to replace it with a leaf node. The tree ordering
+ rules permit a node to be replaced by any leaf below it.
+
+ The smallest chunk in a tree (a common operation in a best-fit
+ allocator) can be found by walking a path to the leftmost leaf in
+ the tree. Unlike a usual binary tree, where we follow left child
+ pointers until we reach a null, here we follow the right child
+ pointer any time the left one is null, until we reach a leaf with
+ both child pointers null. The smallest chunk in the tree will be
+ somewhere along that path.
+
+ The worst case number of steps to add, find, or remove a node is
+ bounded by the number of bits differentiating chunks within
+ bins. Under current bin calculations, this ranges from 6 up to 21
+ (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
+ is of course much better.
+*/
+
+struct malloc_tree_chunk {
+ /* The first four fields must be compatible with malloc_chunk */
+ size_t prev_foot;
+ size_t head;
+ struct malloc_tree_chunk* fd;
+ struct malloc_tree_chunk* bk;
+
+ struct malloc_tree_chunk* child[2];
+ struct malloc_tree_chunk* parent;
+ bindex_t index;
+};
+
+typedef struct malloc_tree_chunk tchunk;
+typedef struct malloc_tree_chunk* tchunkptr;
+typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
+
+/* A little helper macro for trees */
+#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
+
+/* ----------------------------- Segments -------------------------------- */
+
+/*
+ Each malloc space may include non-contiguous segments, held in a
+ list headed by an embedded malloc_segment record representing the
+ top-most space. Segments also include flags holding properties of
+ the space. Large chunks that are directly allocated by mmap are not
+ included in this list. They are instead independently created and
+ destroyed without otherwise keeping track of them.
+
+ Segment management mainly comes into play for spaces allocated by
+ MMAP. Any call to MMAP might or might not return memory that is
+ adjacent to an existing segment. MORECORE normally contiguously
+ extends the current space, so this space is almost always adjacent,
+ which is simpler and faster to deal with. (This is why MORECORE is
+ used preferentially to MMAP when both are available -- see
+ sys_alloc.) When allocating using MMAP, we don't use any of the
+ hinting mechanisms (inconsistently) supported in various
+ implementations of unix mmap, or distinguish reserving from
+ committing memory. Instead, we just ask for space, and exploit
+ contiguity when we get it. It is probably possible to do
+ better than this on some systems, but no general scheme seems
+ to be significantly better.
+
+ Management entails a simpler variant of the consolidation scheme
+ used for chunks to reduce fragmentation -- new adjacent memory is
+ normally prepended or appended to an existing segment. However,
+ there are limitations compared to chunk consolidation that mostly
+ reflect the fact that segment processing is relatively infrequent
+ (occurring only when getting memory from system) and that we
+ don't expect to have huge numbers of segments:
+
+ * Segments are not indexed, so traversal requires linear scans. (It
+ would be possible to index these, but is not worth the extra
+ overhead and complexity for most programs on most platforms.)
+ * New segments are only appended to old ones when holding top-most
+ memory; if they cannot be prepended to others, they are held in
+ different segments.
+
+ Except for the top-most segment of an mstate, each segment record
+ is kept at the tail of its segment. Segments are added by pushing
+ segment records onto the list headed by &mstate.seg for the
+ containing mstate.
+
+ Segment flags control allocation/merge/deallocation policies:
+ * If EXTERN_BIT set, then we did not allocate this segment,
+ and so should not try to deallocate or merge with others.
+ (This currently holds only for the initial segment passed
+ into create_mspace_with_base.)
+ * If IS_MMAPPED_BIT set, the segment may be merged with
+ other surrounding mmapped segments and trimmed/de-allocated
+ using munmap.
+ * If neither bit is set, then the segment was obtained using
+ MORECORE so can be merged with surrounding MORECORE'd segments
+ and deallocated/trimmed using MORECORE with negative arguments.
+*/
+
+struct malloc_segment {
+ char* base; /* base address */
+ size_t size; /* allocated size */
+ struct malloc_segment* next; /* ptr to next segment */
+#if FFI_MMAP_EXEC_WRIT
+ /* The mmap magic is supposed to store the address of the executable
+ segment at the very end of the requested block. */
+
+# define mmap_exec_offset(b,s) (*(ptrdiff_t*)((b)+(s)-sizeof(ptrdiff_t)))
+
+ /* We can only merge segments if their corresponding executable
+ segments are at identical offsets. */
+# define check_segment_merge(S,b,s) \
+ (mmap_exec_offset((b),(s)) == (S)->exec_offset)
+
+# define add_segment_exec_offset(p,S) ((char*)(p) + (S)->exec_offset)
+# define sub_segment_exec_offset(p,S) ((char*)(p) - (S)->exec_offset)
+
+ /* The removal of sflags only works with HAVE_MORECORE == 0. */
+
+# define get_segment_flags(S) (IS_MMAPPED_BIT)
+# define set_segment_flags(S,v) \
+ (((v) != IS_MMAPPED_BIT) ? (ABORT, (v)) : \
+ (((S)->exec_offset = \
+ mmap_exec_offset((S)->base, (S)->size)), \
+ (mmap_exec_offset((S)->base + (S)->exec_offset, (S)->size) != \
+ (S)->exec_offset) ? (ABORT, (v)) : \
+ (mmap_exec_offset((S)->base, (S)->size) = 0), (v)))
+
+ /* We use an offset here, instead of a pointer, because then, when
+ base changes, we don't have to modify this. On architectures
+ with segmented addresses, this might not work. */
+ ptrdiff_t exec_offset;
+#else
+
+# define get_segment_flags(S) ((S)->sflags)
+# define set_segment_flags(S,v) ((S)->sflags = (v))
+# define check_segment_merge(S,b,s) (1)
+
+ flag_t sflags; /* mmap and extern flag */
+#endif
+};
+
+#define is_mmapped_segment(S) (get_segment_flags(S) & IS_MMAPPED_BIT)
+#define is_extern_segment(S) (get_segment_flags(S) & EXTERN_BIT)
+
+typedef struct malloc_segment msegment;
+typedef struct malloc_segment* msegmentptr;
+
+/* ---------------------------- malloc_state ----------------------------- */
+
+/*
+ A malloc_state holds all of the bookkeeping for a space.
+ The main fields are:
+
+ Top
+ The topmost chunk of the currently active segment. Its size is
+ cached in topsize. The actual size of topmost space is
+ topsize+TOP_FOOT_SIZE, which includes space reserved for adding
+ fenceposts and segment records if necessary when getting more
+ space from the system. The size at which to autotrim top is
+ cached from mparams in trim_check, except that it is disabled if
+ an autotrim fails.
+
+ Designated victim (dv)
+ This is the preferred chunk for servicing small requests that
+ don't have exact fits. It is normally the chunk split off most
+ recently to service another small request. Its size is cached in
+ dvsize. The link fields of this chunk are not maintained since it
+ is not kept in a bin.
+
+ SmallBins
+ An array of bin headers for free chunks. These bins hold chunks
+ with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
+ chunks of all the same size, spaced 8 bytes apart. To simplify
+ use in double-linked lists, each bin header acts as a malloc_chunk
+ pointing to the real first node, if it exists (else pointing to
+ itself). This avoids special-casing for headers. But to avoid
+ waste, we allocate only the fd/bk pointers of bins, and then use
+ repositioning tricks to treat these as the fields of a chunk.
+
+ TreeBins
+ Treebins are pointers to the roots of trees holding a range of
+ sizes. There are 2 equally spaced treebins for each power of two
+ from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
+ larger.
+
+ Bin maps
+ There is one bit map for small bins ("smallmap") and one for
+ treebins ("treemap). Each bin sets its bit when non-empty, and
+ clears the bit when empty. Bit operations are then used to avoid
+ bin-by-bin searching -- nearly all "search" is done without ever
+ looking at bins that won't be selected. The bit maps
+ conservatively use 32 bits per map word, even if on 64bit system.
+ For a good description of some of the bit-based techniques used
+ here, see Henry S. Warren Jr's book "Hacker's Delight" (and
+ supplement at http://hackersdelight.org/). Many of these are
+ intended to reduce the branchiness of paths through malloc etc, as
+ well as to reduce the number of memory locations read or written.
+
+ Segments
+ A list of segments headed by an embedded malloc_segment record
+ representing the initial space.
+
+ Address check support
+ The least_addr field is the least address ever obtained from
+ MORECORE or MMAP. Attempted frees and reallocs of any address less
+ than this are trapped (unless INSECURE is defined).
+
+ Magic tag
+ A cross-check field that should always hold same value as mparams.magic.
+
+ Flags
+ Bits recording whether to use MMAP, locks, or contiguous MORECORE
+
+ Statistics
+ Each space keeps track of current and maximum system memory
+ obtained via MORECORE or MMAP.
+
+ Locking
+ If USE_LOCKS is defined, the "mutex" lock is acquired and released
+ around every public call using this mspace.
+*/
+
+/* Bin types, widths and sizes */
+#define NSMALLBINS (32U)
+#define NTREEBINS (32U)
+#define SMALLBIN_SHIFT (3U)
+#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
+#define TREEBIN_SHIFT (8U)
+#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
+#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
+#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
+
+struct malloc_state {
+ binmap_t smallmap;
+ binmap_t treemap;
+ size_t dvsize;
+ size_t topsize;
+ char* least_addr;
+ mchunkptr dv;
+ mchunkptr top;
+ size_t trim_check;
+ size_t magic;
+ mchunkptr smallbins[(NSMALLBINS+1)*2];
+ tbinptr treebins[NTREEBINS];
+ size_t footprint;
+ size_t max_footprint;
+ flag_t mflags;
+#if USE_LOCKS
+ MLOCK_T mutex; /* locate lock among fields that rarely change */
+#endif /* USE_LOCKS */
+ msegment seg;
+};
+
+typedef struct malloc_state* mstate;
+
+/* ------------- Global malloc_state and malloc_params ------------------- */
+
+/*
+ malloc_params holds global properties, including those that can be
+ dynamically set using mallopt. There is a single instance, mparams,
+ initialized in init_mparams.
+*/
+
+struct malloc_params {
+ size_t magic;
+ size_t page_size;
+ size_t granularity;
+ size_t mmap_threshold;
+ size_t trim_threshold;
+ flag_t default_mflags;
+};
+
+static struct malloc_params mparams;
+
+/* The global malloc_state used for all non-"mspace" calls */
+static struct malloc_state _gm_;
+#define gm (&_gm_)
+#define is_global(M) ((M) == &_gm_)
+#define is_initialized(M) ((M)->top != 0)
+
+/* -------------------------- system alloc setup ------------------------- */
+
+/* Operations on mflags */
+
+#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
+#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
+#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
+
+#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
+#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
+#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
+
+#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
+#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
+
+#define set_lock(M,L)\
+ ((M)->mflags = (L)?\
+ ((M)->mflags | USE_LOCK_BIT) :\
+ ((M)->mflags & ~USE_LOCK_BIT))
+
+/* page-align a size */
+#define page_align(S)\
+ (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
+
+/* granularity-align a size */
+#define granularity_align(S)\
+ (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
+
+#define is_page_aligned(S)\
+ (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
+#define is_granularity_aligned(S)\
+ (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
+
+/* True if segment S holds address A */
+#define segment_holds(S, A)\
+ ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
+
+/* Return segment holding given address */
+static msegmentptr segment_holding(mstate m, char* addr) {
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if (addr >= sp->base && addr < sp->base + sp->size)
+ return sp;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
+
+/* Return true if segment contains a segment link */
+static int has_segment_link(mstate m, msegmentptr ss) {
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
+ return 1;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
+
+#ifndef MORECORE_CANNOT_TRIM
+#define should_trim(M,s) ((s) > (M)->trim_check)
+#else /* MORECORE_CANNOT_TRIM */
+#define should_trim(M,s) (0)
+#endif /* MORECORE_CANNOT_TRIM */
+
+/*
+ TOP_FOOT_SIZE is padding at the end of a segment, including space
+ that may be needed to place segment records and fenceposts when new
+ noncontiguous segments are added.
+*/
+#define TOP_FOOT_SIZE\
+ (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
+
+
+/* ------------------------------- Hooks -------------------------------- */
+
+/*
+ PREACTION should be defined to return 0 on success, and nonzero on
+ failure. If you are not using locking, you can redefine these to do
+ anything you like.
+*/
+
+#if USE_LOCKS
+
+/* Ensure locks are initialized */
+#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
+
+#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
+#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
+#else /* USE_LOCKS */
+
+#ifndef PREACTION
+#define PREACTION(M) (0)
+#endif /* PREACTION */
+
+#ifndef POSTACTION
+#define POSTACTION(M)
+#endif /* POSTACTION */
+
+#endif /* USE_LOCKS */
+
+/*
+ CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
+ USAGE_ERROR_ACTION is triggered on detected bad frees and
+ reallocs. The argument p is an address that might have triggered the
+ fault. It is ignored by the two predefined actions, but might be
+ useful in custom actions that try to help diagnose errors.
+*/
+
+#if PROCEED_ON_ERROR
+
+/* A count of the number of corruption errors causing resets */
+int malloc_corruption_error_count;
+
+/* default corruption action */
+static void reset_on_error(mstate m);
+
+#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
+#define USAGE_ERROR_ACTION(m, p)
+
+#else /* PROCEED_ON_ERROR */
+
+#ifndef CORRUPTION_ERROR_ACTION
+#define CORRUPTION_ERROR_ACTION(m) ABORT
+#endif /* CORRUPTION_ERROR_ACTION */
+
+#ifndef USAGE_ERROR_ACTION
+#define USAGE_ERROR_ACTION(m,p) ABORT
+#endif /* USAGE_ERROR_ACTION */
+
+#endif /* PROCEED_ON_ERROR */
+
+/* -------------------------- Debugging setup ---------------------------- */
+
+#if ! DEBUG
+
+#define check_free_chunk(M,P)
+#define check_inuse_chunk(M,P)
+#define check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P)
+#define check_malloc_state(M)
+#define check_top_chunk(M,P)
+
+#else /* DEBUG */
+#define check_free_chunk(M,P) do_check_free_chunk(M,P)
+#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
+#define check_top_chunk(M,P) do_check_top_chunk(M,P)
+#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
+#define check_malloc_state(M) do_check_malloc_state(M)
+
+static void do_check_any_chunk(mstate m, mchunkptr p);
+static void do_check_top_chunk(mstate m, mchunkptr p);
+static void do_check_mmapped_chunk(mstate m, mchunkptr p);
+static void do_check_inuse_chunk(mstate m, mchunkptr p);
+static void do_check_free_chunk(mstate m, mchunkptr p);
+static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
+static void do_check_tree(mstate m, tchunkptr t);
+static void do_check_treebin(mstate m, bindex_t i);
+static void do_check_smallbin(mstate m, bindex_t i);
+static void do_check_malloc_state(mstate m);
+static int bin_find(mstate m, mchunkptr x);
+static size_t traverse_and_check(mstate m);
+#endif /* DEBUG */
+
+/* ---------------------------- Indexing Bins ---------------------------- */
+
+#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
+#define small_index(s) ((s) >> SMALLBIN_SHIFT)
+#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
+#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
+
+/* addressing by index. See above about smallbin repositioning */
+#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
+#define treebin_at(M,i) (&((M)->treebins[i]))
+
+/* assign tree index for size S to variable I */
+#if defined(__GNUC__) && defined(__i386__)
+#define compute_tree_index(S, I)\
+{\
+ size_t X = S >> TREEBIN_SHIFT;\
+ if (X == 0)\
+ I = 0;\
+ else if (X > 0xFFFF)\
+ I = NTREEBINS-1;\
+ else {\
+ unsigned int K;\
+ __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
+ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+ }\
+}
+#else /* GNUC */
+#define compute_tree_index(S, I)\
+{\
+ size_t X = S >> TREEBIN_SHIFT;\
+ if (X == 0)\
+ I = 0;\
+ else if (X > 0xFFFF)\
+ I = NTREEBINS-1;\
+ else {\
+ unsigned int Y = (unsigned int)X;\
+ unsigned int N = ((Y - 0x100) >> 16) & 8;\
+ unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
+ N += K;\
+ N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
+ K = 14 - N + ((Y <<= K) >> 15);\
+ I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
+ }\
+}
+#endif /* GNUC */
+
+/* Bit representing maximum resolved size in a treebin at i */
+#define bit_for_tree_index(i) \
+ (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
+
+/* Shift placing maximum resolved bit in a treebin at i as sign bit */
+#define leftshift_for_tree_index(i) \
+ ((i == NTREEBINS-1)? 0 : \
+ ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
+
+/* The size of the smallest chunk held in bin with index i */
+#define minsize_for_tree_index(i) \
+ ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
+ (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+
+
+/* ------------------------ Operations on bin maps ----------------------- */
+
+/* bit corresponding to given index */
+#define idx2bit(i) ((binmap_t)(1) << (i))
+
+/* Mark/Clear bits with given index */
+#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
+#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
+#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
+
+#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
+#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
+#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
+
+/* index corresponding to given bit */
+
+#if defined(__GNUC__) && defined(__i386__)
+#define compute_bit2idx(X, I)\
+{\
+ unsigned int J;\
+ __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
+ I = (bindex_t)J;\
+}
+
+#else /* GNUC */
+#if USE_BUILTIN_FFS
+#define compute_bit2idx(X, I) I = ffs(X)-1
+
+#else /* USE_BUILTIN_FFS */
+#define compute_bit2idx(X, I)\
+{\
+ unsigned int Y = X - 1;\
+ unsigned int K = Y >> (16-4) & 16;\
+ unsigned int N = K; Y >>= K;\
+ N += K = Y >> (8-3) & 8; Y >>= K;\
+ N += K = Y >> (4-2) & 4; Y >>= K;\
+ N += K = Y >> (2-1) & 2; Y >>= K;\
+ N += K = Y >> (1-0) & 1; Y >>= K;\
+ I = (bindex_t)(N + Y);\
+}
+#endif /* USE_BUILTIN_FFS */
+#endif /* GNUC */
+
+/* isolate the least set bit of a bitmap */
+#define least_bit(x) ((x) & -(x))
+
+/* mask with all bits to left of least bit of x on */
+#define left_bits(x) ((x<<1) | -(x<<1))
+
+/* mask with all bits to left of or equal to least bit of x on */
+#define same_or_left_bits(x) ((x) | -(x))
+
+
+/* ----------------------- Runtime Check Support ------------------------- */
+
+/*
+ For security, the main invariant is that malloc/free/etc never
+ writes to a static address other than malloc_state, unless static
+ malloc_state itself has been corrupted, which cannot occur via
+ malloc (because of these checks). In essence this means that we
+ believe all pointers, sizes, maps etc held in malloc_state, but
+ check all of those linked or offsetted from other embedded data
+ structures. These checks are interspersed with main code in a way
+ that tends to minimize their run-time cost.
+
+ When FOOTERS is defined, in addition to range checking, we also
+ verify footer fields of inuse chunks, which can be used guarantee
+ that the mstate controlling malloc/free is intact. This is a
+ streamlined version of the approach described by William Robertson
+ et al in "Run-time Detection of Heap-based Overflows" LISA'03
+ http://www.usenix.org/events/lisa03/tech/robertson.html The footer
+ of an inuse chunk holds the xor of its mstate and a random seed,
+ that is checked upon calls to free() and realloc(). This is
+ (probablistically) unguessable from outside the program, but can be
+ computed by any code successfully malloc'ing any chunk, so does not
+ itself provide protection against code that has already broken
+ security through some other means. Unlike Robertson et al, we
+ always dynamically check addresses of all offset chunks (previous,
+ next, etc). This turns out to be cheaper than relying on hashes.
+*/
+
+#if !INSECURE
+/* Check if address a is at least as high as any from MORECORE or MMAP */
+#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
+/* Check if address of next chunk n is higher than base chunk p */
+#define ok_next(p, n) ((char*)(p) < (char*)(n))
+/* Check if p has its cinuse bit on */
+#define ok_cinuse(p) cinuse(p)
+/* Check if p has its pinuse bit on */
+#define ok_pinuse(p) pinuse(p)
+
+#else /* !INSECURE */
+#define ok_address(M, a) (1)
+#define ok_next(b, n) (1)
+#define ok_cinuse(p) (1)
+#define ok_pinuse(p) (1)
+#endif /* !INSECURE */
+
+#if (FOOTERS && !INSECURE)
+/* Check if (alleged) mstate m has expected magic field */
+#define ok_magic(M) ((M)->magic == mparams.magic)
+#else /* (FOOTERS && !INSECURE) */
+#define ok_magic(M) (1)
+#endif /* (FOOTERS && !INSECURE) */
+
+
+/* In gcc, use __builtin_expect to minimize impact of checks */
+#if !INSECURE
+#if defined(__GNUC__) && __GNUC__ >= 3
+#define RTCHECK(e) __builtin_expect(e, 1)
+#else /* GNUC */
+#define RTCHECK(e) (e)
+#endif /* GNUC */
+#else /* !INSECURE */
+#define RTCHECK(e) (1)
+#endif /* !INSECURE */
+
+/* macros to set up inuse chunks with or without footers */
+
+#if !FOOTERS
+
+#define mark_inuse_foot(M,p,s)
+
+/* Set cinuse bit and pinuse bit of next chunk */
+#define set_inuse(M,p,s)\
+ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+ ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
+#define set_inuse_and_pinuse(M,p,s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set size, cinuse and pinuse bit of this chunk */
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
+
+#else /* FOOTERS */
+
+/* Set foot of inuse chunk to be xor of mstate and seed */
+#define mark_inuse_foot(M,p,s)\
+ (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
+
+#define get_mstate_for(p)\
+ ((mstate)(((mchunkptr)((char*)(p) +\
+ (chunksize(p))))->prev_foot ^ mparams.magic))
+
+#define set_inuse(M,p,s)\
+ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+ (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
+ mark_inuse_foot(M,p,s))
+
+#define set_inuse_and_pinuse(M,p,s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
+ mark_inuse_foot(M,p,s))
+
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ mark_inuse_foot(M, p, s))
+
+#endif /* !FOOTERS */
+
+/* ---------------------------- setting mparams -------------------------- */
+
+/* Initialize mparams */
+static int init_mparams(void) {
+ if (mparams.page_size == 0) {
+ size_t s;
+
+ mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
+ mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
+#if MORECORE_CONTIGUOUS
+ mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
+#else /* MORECORE_CONTIGUOUS */
+ mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
+#endif /* MORECORE_CONTIGUOUS */
+
+#if (FOOTERS && !INSECURE)
+ {
+#if USE_DEV_RANDOM
+ int fd;
+ unsigned char buf[sizeof(size_t)];
+ /* Try to use /dev/urandom, else fall back on using time */
+ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
+ read(fd, buf, sizeof(buf)) == sizeof(buf)) {
+ s = *((size_t *) buf);
+ close(fd);
+ }
+ else
+#endif /* USE_DEV_RANDOM */
+ s = (size_t)(time(0) ^ (size_t)0x55555555U);
+
+ s |= (size_t)8U; /* ensure nonzero */
+ s &= ~(size_t)7U; /* improve chances of fault for bad values */
+
+ }
+#else /* (FOOTERS && !INSECURE) */
+ s = (size_t)0x58585858U;
+#endif /* (FOOTERS && !INSECURE) */
+ ACQUIRE_MAGIC_INIT_LOCK();
+ if (mparams.magic == 0) {
+ mparams.magic = s;
+ /* Set up lock for main malloc area */
+ INITIAL_LOCK(&gm->mutex);
+ gm->mflags = mparams.default_mflags;
+ }
+ RELEASE_MAGIC_INIT_LOCK();
+
+#if !defined(WIN32) && !defined(__OS2__)
+ mparams.page_size = malloc_getpagesize;
+ mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
+ DEFAULT_GRANULARITY : mparams.page_size);
+#elif defined (__OS2__)
+ /* if low-memory is used, os2munmap() would break
+ if it were anything other than 64k */
+ mparams.page_size = 4096u;
+ mparams.granularity = 65536u;
+#else /* WIN32 */
+ {
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ mparams.page_size = system_info.dwPageSize;
+ mparams.granularity = system_info.dwAllocationGranularity;
+ }
+#endif /* WIN32 */
+
+ /* Sanity-check configuration:
+ size_t must be unsigned and as wide as pointer type.
+ ints must be at least 4 bytes.
+ alignment must be at least 8.
+ Alignment, min chunk size, and page size must all be powers of 2.
+ */
+ if ((sizeof(size_t) != sizeof(char*)) ||
+ (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
+ (sizeof(int) < 4) ||
+ (MALLOC_ALIGNMENT < (size_t)8U) ||
+ ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
+ ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
+ ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
+ ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0))
+ ABORT;
+ }
+ return 0;
+}
+
+/* support for mallopt */
+static int change_mparam(int param_number, int value) {
+ size_t val = (size_t)value;
+ init_mparams();
+ switch(param_number) {
+ case M_TRIM_THRESHOLD:
+ mparams.trim_threshold = val;
+ return 1;
+ case M_GRANULARITY:
+ if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
+ mparams.granularity = val;
+ return 1;
+ }
+ else
+ return 0;
+ case M_MMAP_THRESHOLD:
+ mparams.mmap_threshold = val;
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+#if DEBUG
+/* ------------------------- Debugging Support --------------------------- */
+
+/* Check properties of any chunk, whether free, inuse, mmapped etc */
+static void do_check_any_chunk(mstate m, mchunkptr p) {
+ assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+ assert(ok_address(m, p));
+}
+
+/* Check properties of top chunk */
+static void do_check_top_chunk(mstate m, mchunkptr p) {
+ msegmentptr sp = segment_holding(m, (char*)p);
+ size_t sz = chunksize(p);
+ assert(sp != 0);
+ assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+ assert(ok_address(m, p));
+ assert(sz == m->topsize);
+ assert(sz > 0);
+ assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
+ assert(pinuse(p));
+ assert(!next_pinuse(p));
+}
+
+/* Check properties of (inuse) mmapped chunks */
+static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
+ size_t sz = chunksize(p);
+ size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
+ assert(is_mmapped(p));
+ assert(use_mmap(m));
+ assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+ assert(ok_address(m, p));
+ assert(!is_small(sz));
+ assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
+ assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
+ assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
+}
+
+/* Check properties of inuse chunks */
+static void do_check_inuse_chunk(mstate m, mchunkptr p) {
+ do_check_any_chunk(m, p);
+ assert(cinuse(p));
+ assert(next_pinuse(p));
+ /* If not pinuse and not mmapped, previous chunk has OK offset */
+ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
+ if (is_mmapped(p))
+ do_check_mmapped_chunk(m, p);
+}
+
+/* Check properties of free chunks */
+static void do_check_free_chunk(mstate m, mchunkptr p) {
+ size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
+ mchunkptr next = chunk_plus_offset(p, sz);
+ do_check_any_chunk(m, p);
+ assert(!cinuse(p));
+ assert(!next_pinuse(p));
+ assert (!is_mmapped(p));
+ if (p != m->dv && p != m->top) {
+ if (sz >= MIN_CHUNK_SIZE) {
+ assert((sz & CHUNK_ALIGN_MASK) == 0);
+ assert(is_aligned(chunk2mem(p)));
+ assert(next->prev_foot == sz);
+ assert(pinuse(p));
+ assert (next == m->top || cinuse(next));
+ assert(p->fd->bk == p);
+ assert(p->bk->fd == p);
+ }
+ else /* markers are always of size SIZE_T_SIZE */
+ assert(sz == SIZE_T_SIZE);
+ }
+}
+
+/* Check properties of malloced chunks at the point they are malloced */
+static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+ size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
+ do_check_inuse_chunk(m, p);
+ assert((sz & CHUNK_ALIGN_MASK) == 0);
+ assert(sz >= MIN_CHUNK_SIZE);
+ assert(sz >= s);
+ /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
+ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
+ }
+}
+
+/* Check a tree and its subtrees. */
+static void do_check_tree(mstate m, tchunkptr t) {
+ tchunkptr head = 0;
+ tchunkptr u = t;
+ bindex_t tindex = t->index;
+ size_t tsize = chunksize(t);
+ bindex_t idx;
+ compute_tree_index(tsize, idx);
+ assert(tindex == idx);
+ assert(tsize >= MIN_LARGE_SIZE);
+ assert(tsize >= minsize_for_tree_index(idx));
+ assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
+
+ do { /* traverse through chain of same-sized nodes */
+ do_check_any_chunk(m, ((mchunkptr)u));
+ assert(u->index == tindex);
+ assert(chunksize(u) == tsize);
+ assert(!cinuse(u));
+ assert(!next_pinuse(u));
+ assert(u->fd->bk == u);
+ assert(u->bk->fd == u);
+ if (u->parent == 0) {
+ assert(u->child[0] == 0);
+ assert(u->child[1] == 0);
+ }
+ else {
+ assert(head == 0); /* only one node on chain has parent */
+ head = u;
+ assert(u->parent != u);
+ assert (u->parent->child[0] == u ||
+ u->parent->child[1] == u ||
+ *((tbinptr*)(u->parent)) == u);
+ if (u->child[0] != 0) {
+ assert(u->child[0]->parent == u);
+ assert(u->child[0] != u);
+ do_check_tree(m, u->child[0]);
+ }
+ if (u->child[1] != 0) {
+ assert(u->child[1]->parent == u);
+ assert(u->child[1] != u);
+ do_check_tree(m, u->child[1]);
+ }
+ if (u->child[0] != 0 && u->child[1] != 0) {
+ assert(chunksize(u->child[0]) < chunksize(u->child[1]));
+ }
+ }
+ u = u->fd;
+ } while (u != t);
+ assert(head != 0);
+}
+
+/* Check all the chunks in a treebin. */
+static void do_check_treebin(mstate m, bindex_t i) {
+ tbinptr* tb = treebin_at(m, i);
+ tchunkptr t = *tb;
+ int empty = (m->treemap & (1U << i)) == 0;
+ if (t == 0)
+ assert(empty);
+ if (!empty)
+ do_check_tree(m, t);
+}
+
+/* Check all the chunks in a smallbin. */
+static void do_check_smallbin(mstate m, bindex_t i) {
+ sbinptr b = smallbin_at(m, i);
+ mchunkptr p = b->bk;
+ unsigned int empty = (m->smallmap & (1U << i)) == 0;
+ if (p == b)
+ assert(empty);
+ if (!empty) {
+ for (; p != b; p = p->bk) {
+ size_t size = chunksize(p);
+ mchunkptr q;
+ /* each chunk claims to be free */
+ do_check_free_chunk(m, p);
+ /* chunk belongs in bin */
+ assert(small_index(size) == i);
+ assert(p->bk == b || chunksize(p->bk) == chunksize(p));
+ /* chunk is followed by an inuse chunk */
+ q = next_chunk(p);
+ if (q->head != FENCEPOST_HEAD)
+ do_check_inuse_chunk(m, q);
+ }
+ }
+}
+
+/* Find x in a bin. Used in other check functions. */
+static int bin_find(mstate m, mchunkptr x) {
+ size_t size = chunksize(x);
+ if (is_small(size)) {
+ bindex_t sidx = small_index(size);
+ sbinptr b = smallbin_at(m, sidx);
+ if (smallmap_is_marked(m, sidx)) {
+ mchunkptr p = b;
+ do {
+ if (p == x)
+ return 1;
+ } while ((p = p->fd) != b);
+ }
+ }
+ else {
+ bindex_t tidx;
+ compute_tree_index(size, tidx);
+ if (treemap_is_marked(m, tidx)) {
+ tchunkptr t = *treebin_at(m, tidx);
+ size_t sizebits = size << leftshift_for_tree_index(tidx);
+ while (t != 0 && chunksize(t) != size) {
+ t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ sizebits <<= 1;
+ }
+ if (t != 0) {
+ tchunkptr u = t;
+ do {
+ if (u == (tchunkptr)x)
+ return 1;
+ } while ((u = u->fd) != t);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Traverse each chunk and check it; return total */
+static size_t traverse_and_check(mstate m) {
+ size_t sum = 0;
+ if (is_initialized(m)) {
+ msegmentptr s = &m->seg;
+ sum += m->topsize + TOP_FOOT_SIZE;
+ while (s != 0) {
+ mchunkptr q = align_as_chunk(s->base);
+ mchunkptr lastq = 0;
+ assert(pinuse(q));
+ while (segment_holds(s, q) &&
+ q != m->top && q->head != FENCEPOST_HEAD) {
+ sum += chunksize(q);
+ if (cinuse(q)) {
+ assert(!bin_find(m, q));
+ do_check_inuse_chunk(m, q);
+ }
+ else {
+ assert(q == m->dv || bin_find(m, q));
+ assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
+ do_check_free_chunk(m, q);
+ }
+ lastq = q;
+ q = next_chunk(q);
+ }
+ s = s->next;
+ }
+ }
+ return sum;
+}
+
+/* Check all properties of malloc_state. */
+static void do_check_malloc_state(mstate m) {
+ bindex_t i;
+ size_t total;
+ /* check bins */
+ for (i = 0; i < NSMALLBINS; ++i)
+ do_check_smallbin(m, i);
+ for (i = 0; i < NTREEBINS; ++i)
+ do_check_treebin(m, i);
+
+ if (m->dvsize != 0) { /* check dv chunk */
+ do_check_any_chunk(m, m->dv);
+ assert(m->dvsize == chunksize(m->dv));
+ assert(m->dvsize >= MIN_CHUNK_SIZE);
+ assert(bin_find(m, m->dv) == 0);
+ }
+
+ if (m->top != 0) { /* check top chunk */
+ do_check_top_chunk(m, m->top);
+ assert(m->topsize == chunksize(m->top));
+ assert(m->topsize > 0);
+ assert(bin_find(m, m->top) == 0);
+ }
+
+ total = traverse_and_check(m);
+ assert(total <= m->footprint);
+ assert(m->footprint <= m->max_footprint);
+}
+#endif /* DEBUG */
+
+/* ----------------------------- statistics ------------------------------ */
+
+#if !NO_MALLINFO
+static struct mallinfo internal_mallinfo(mstate m) {
+ struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ if (!PREACTION(m)) {
+ check_malloc_state(m);
+ if (is_initialized(m)) {
+ size_t nfree = SIZE_T_ONE; /* top always free */
+ size_t mfree = m->topsize + TOP_FOOT_SIZE;
+ size_t sum = mfree;
+ msegmentptr s = &m->seg;
+ while (s != 0) {
+ mchunkptr q = align_as_chunk(s->base);
+ while (segment_holds(s, q) &&
+ q != m->top && q->head != FENCEPOST_HEAD) {
+ size_t sz = chunksize(q);
+ sum += sz;
+ if (!cinuse(q)) {
+ mfree += sz;
+ ++nfree;
+ }
+ q = next_chunk(q);
+ }
+ s = s->next;
+ }
+
+ nm.arena = sum;
+ nm.ordblks = nfree;
+ nm.hblkhd = m->footprint - sum;
+ nm.usmblks = m->max_footprint;
+ nm.uordblks = m->footprint - mfree;
+ nm.fordblks = mfree;
+ nm.keepcost = m->topsize;
+ }
+
+ POSTACTION(m);
+ }
+ return nm;
+}
+#endif /* !NO_MALLINFO */
+
+static void internal_malloc_stats(mstate m) {
+ if (!PREACTION(m)) {
+ size_t maxfp = 0;
+ size_t fp = 0;
+ size_t used = 0;
+ check_malloc_state(m);
+ if (is_initialized(m)) {
+ msegmentptr s = &m->seg;
+ maxfp = m->max_footprint;
+ fp = m->footprint;
+ used = fp - (m->topsize + TOP_FOOT_SIZE);
+
+ while (s != 0) {
+ mchunkptr q = align_as_chunk(s->base);
+ while (segment_holds(s, q) &&
+ q != m->top && q->head != FENCEPOST_HEAD) {
+ if (!cinuse(q))
+ used -= chunksize(q);
+ q = next_chunk(q);
+ }
+ s = s->next;
+ }
+ }
+
+ fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
+ fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
+ fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
+
+ POSTACTION(m);
+ }
+}
+
+/* ----------------------- Operations on smallbins ----------------------- */
+
+/*
+ Various forms of linking and unlinking are defined as macros. Even
+ the ones for trees, which are very long but have very short typical
+ paths. This is ugly but reduces reliance on inlining support of
+ compilers.
+*/
+
+/* Link a free chunk into a smallbin */
+#define insert_small_chunk(M, P, S) {\
+ bindex_t I = small_index(S);\
+ mchunkptr B = smallbin_at(M, I);\
+ mchunkptr F = B;\
+ assert(S >= MIN_CHUNK_SIZE);\
+ if (!smallmap_is_marked(M, I))\
+ mark_smallmap(M, I);\
+ else if (RTCHECK(ok_address(M, B->fd)))\
+ F = B->fd;\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ B->fd = P;\
+ F->bk = P;\
+ P->fd = F;\
+ P->bk = B;\
+}
+
+/* Unlink a chunk from a smallbin */
+#define unlink_small_chunk(M, P, S) {\
+ mchunkptr F = P->fd;\
+ mchunkptr B = P->bk;\
+ bindex_t I = small_index(S);\
+ assert(P != B);\
+ assert(P != F);\
+ assert(chunksize(P) == small_index2size(I));\
+ if (F == B)\
+ clear_smallmap(M, I);\
+ else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
+ (B == smallbin_at(M,I) || ok_address(M, B)))) {\
+ F->bk = B;\
+ B->fd = F;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+}
+
+/* Unlink the first chunk from a smallbin */
+#define unlink_first_small_chunk(M, B, P, I) {\
+ mchunkptr F = P->fd;\
+ assert(P != B);\
+ assert(P != F);\
+ assert(chunksize(P) == small_index2size(I));\
+ if (B == F)\
+ clear_smallmap(M, I);\
+ else if (RTCHECK(ok_address(M, F))) {\
+ B->fd = F;\
+ F->bk = B;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+}
+
+/* Replace dv node, binning the old one */
+/* Used only when dvsize known to be small */
+#define replace_dv(M, P, S) {\
+ size_t DVS = M->dvsize;\
+ if (DVS != 0) {\
+ mchunkptr DV = M->dv;\
+ assert(is_small(DVS));\
+ insert_small_chunk(M, DV, DVS);\
+ }\
+ M->dvsize = S;\
+ M->dv = P;\
+}
+
+/* ------------------------- Operations on trees ------------------------- */
+
+/* Insert chunk into tree */
+#define insert_large_chunk(M, X, S) {\
+ tbinptr* H;\
+ bindex_t I;\
+ compute_tree_index(S, I);\
+ H = treebin_at(M, I);\
+ X->index = I;\
+ X->child[0] = X->child[1] = 0;\
+ if (!treemap_is_marked(M, I)) {\
+ mark_treemap(M, I);\
+ *H = X;\
+ X->parent = (tchunkptr)H;\
+ X->fd = X->bk = X;\
+ }\
+ else {\
+ tchunkptr T = *H;\
+ size_t K = S << leftshift_for_tree_index(I);\
+ for (;;) {\
+ if (chunksize(T) != S) {\
+ tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
+ K <<= 1;\
+ if (*C != 0)\
+ T = *C;\
+ else if (RTCHECK(ok_address(M, C))) {\
+ *C = X;\
+ X->parent = T;\
+ X->fd = X->bk = X;\
+ break;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ break;\
+ }\
+ }\
+ else {\
+ tchunkptr F = T->fd;\
+ if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
+ T->fd = F->bk = X;\
+ X->fd = F;\
+ X->bk = T;\
+ X->parent = 0;\
+ break;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ break;\
+ }\
+ }\
+ }\
+ }\
+}
+
+/*
+ Unlink steps:
+
+ 1. If x is a chained node, unlink it from its same-sized fd/bk links
+ and choose its bk node as its replacement.
+ 2. If x was the last node of its size, but not a leaf node, it must
+ be replaced with a leaf node (not merely one with an open left or
+ right), to make sure that lefts and rights of descendants
+ correspond properly to bit masks. We use the rightmost descendant
+ of x. We could use any other leaf, but this is easy to locate and
+ tends to counteract removal of leftmosts elsewhere, and so keeps
+ paths shorter than minimally guaranteed. This doesn't loop much
+ because on average a node in a tree is near the bottom.
+ 3. If x is the base of a chain (i.e., has parent links) relink
+ x's parent and children to x's replacement (or null if none).
+*/
+
+#define unlink_large_chunk(M, X) {\
+ tchunkptr XP = X->parent;\
+ tchunkptr R;\
+ if (X->bk != X) {\
+ tchunkptr F = X->fd;\
+ R = X->bk;\
+ if (RTCHECK(ok_address(M, F))) {\
+ F->bk = R;\
+ R->fd = F;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ else {\
+ tchunkptr* RP;\
+ if (((R = *(RP = &(X->child[1]))) != 0) ||\
+ ((R = *(RP = &(X->child[0]))) != 0)) {\
+ tchunkptr* CP;\
+ while ((*(CP = &(R->child[1])) != 0) ||\
+ (*(CP = &(R->child[0])) != 0)) {\
+ R = *(RP = CP);\
+ }\
+ if (RTCHECK(ok_address(M, RP)))\
+ *RP = 0;\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ }\
+ if (XP != 0) {\
+ tbinptr* H = treebin_at(M, X->index);\
+ if (X == *H) {\
+ if ((*H = R) == 0) \
+ clear_treemap(M, X->index);\
+ }\
+ else if (RTCHECK(ok_address(M, XP))) {\
+ if (XP->child[0] == X) \
+ XP->child[0] = R;\
+ else \
+ XP->child[1] = R;\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ if (R != 0) {\
+ if (RTCHECK(ok_address(M, R))) {\
+ tchunkptr C0, C1;\
+ R->parent = XP;\
+ if ((C0 = X->child[0]) != 0) {\
+ if (RTCHECK(ok_address(M, C0))) {\
+ R->child[0] = C0;\
+ C0->parent = R;\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ if ((C1 = X->child[1]) != 0) {\
+ if (RTCHECK(ok_address(M, C1))) {\
+ R->child[1] = C1;\
+ C1->parent = R;\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+}
+
+/* Relays to large vs small bin operations */
+
+#define insert_chunk(M, P, S)\
+ if (is_small(S)) insert_small_chunk(M, P, S)\
+ else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
+
+#define unlink_chunk(M, P, S)\
+ if (is_small(S)) unlink_small_chunk(M, P, S)\
+ else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
+
+
+/* Relays to internal calls to malloc/free from realloc, memalign etc */
+
+#if ONLY_MSPACES
+#define internal_malloc(m, b) mspace_malloc(m, b)
+#define internal_free(m, mem) mspace_free(m,mem);
+#else /* ONLY_MSPACES */
+#if MSPACES
+#define internal_malloc(m, b)\
+ (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
+#define internal_free(m, mem)\
+ if (m == gm) dlfree(mem); else mspace_free(m,mem);
+#else /* MSPACES */
+#define internal_malloc(m, b) dlmalloc(b)
+#define internal_free(m, mem) dlfree(mem)
+#endif /* MSPACES */
+#endif /* ONLY_MSPACES */
+
+/* ----------------------- Direct-mmapping chunks ----------------------- */
+
+/*
+ Directly mmapped chunks are set up with an offset to the start of
+ the mmapped region stored in the prev_foot field of the chunk. This
+ allows reconstruction of the required argument to MUNMAP when freed,
+ and also allows adjustment of the returned chunk to meet alignment
+ requirements (especially in memalign). There is also enough space
+ allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
+ the PINUSE bit so frees can be checked.
+*/
+
+/* Malloc using mmap */
+static void* mmap_alloc(mstate m, size_t nb) {
+ size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ if (mmsize > nb) { /* Check for wrap around 0 */
+ char* mm = (char*)(DIRECT_MMAP(mmsize));
+ if (mm != CMFAIL) {
+ size_t offset = align_offset(chunk2mem(mm));
+ size_t psize = mmsize - offset - MMAP_FOOT_PAD;
+ mchunkptr p = (mchunkptr)(mm + offset);
+ p->prev_foot = offset | IS_MMAPPED_BIT;
+ (p)->head = (psize|CINUSE_BIT);
+ mark_inuse_foot(m, p, psize);
+ chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
+ chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
+
+ if (mm < m->least_addr)
+ m->least_addr = mm;
+ if ((m->footprint += mmsize) > m->max_footprint)
+ m->max_footprint = m->footprint;
+ assert(is_aligned(chunk2mem(p)));
+ check_mmapped_chunk(m, p);
+ return chunk2mem(p);
+ }
+ }
+ return 0;
+}
+
+/* Realloc using mmap */
+static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
+ size_t oldsize = chunksize(oldp);
+ if (is_small(nb)) /* Can't shrink mmap regions below small size */
+ return 0;
+ /* Keep old chunk if big enough but not too big */
+ if (oldsize >= nb + SIZE_T_SIZE &&
+ (oldsize - nb) <= (mparams.granularity << 1))
+ return oldp;
+ else {
+ size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
+ size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
+ size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
+ CHUNK_ALIGN_MASK);
+ char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
+ oldmmsize, newmmsize, 1);
+ if (cp != CMFAIL) {
+ mchunkptr newp = (mchunkptr)(cp + offset);
+ size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
+ newp->head = (psize|CINUSE_BIT);
+ mark_inuse_foot(m, newp, psize);
+ chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
+ chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
+
+ if (cp < m->least_addr)
+ m->least_addr = cp;
+ if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
+ m->max_footprint = m->footprint;
+ check_mmapped_chunk(m, newp);
+ return newp;
+ }
+ }
+ return 0;
+}
+
+/* -------------------------- mspace management -------------------------- */
+
+/* Initialize top chunk and its size */
+static void init_top(mstate m, mchunkptr p, size_t psize) {
+ /* Ensure alignment */
+ size_t offset = align_offset(chunk2mem(p));
+ p = (mchunkptr)((char*)p + offset);
+ psize -= offset;
+
+ m->top = p;
+ m->topsize = psize;
+ p->head = psize | PINUSE_BIT;
+ /* set size of fake trailing chunk holding overhead space only once */
+ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
+ m->trim_check = mparams.trim_threshold; /* reset on each update */
+}
+
+/* Initialize bins for a new mstate that is otherwise zeroed out */
+static void init_bins(mstate m) {
+ /* Establish circular links for smallbins */
+ bindex_t i;
+ for (i = 0; i < NSMALLBINS; ++i) {
+ sbinptr bin = smallbin_at(m,i);
+ bin->fd = bin->bk = bin;
+ }
+}
+
+#if PROCEED_ON_ERROR
+
+/* default corruption action */
+static void reset_on_error(mstate m) {
+ int i;
+ ++malloc_corruption_error_count;
+ /* Reinitialize fields to forget about all memory */
+ m->smallbins = m->treebins = 0;
+ m->dvsize = m->topsize = 0;
+ m->seg.base = 0;
+ m->seg.size = 0;
+ m->seg.next = 0;
+ m->top = m->dv = 0;
+ for (i = 0; i < NTREEBINS; ++i)
+ *treebin_at(m, i) = 0;
+ init_bins(m);
+}
+#endif /* PROCEED_ON_ERROR */
+
+/* Allocate chunk and prepend remainder with chunk in successor base. */
+static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
+ size_t nb) {
+ mchunkptr p = align_as_chunk(newbase);
+ mchunkptr oldfirst = align_as_chunk(oldbase);
+ size_t psize = (char*)oldfirst - (char*)p;
+ mchunkptr q = chunk_plus_offset(p, nb);
+ size_t qsize = psize - nb;
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+
+ assert((char*)oldfirst > (char*)q);
+ assert(pinuse(oldfirst));
+ assert(qsize >= MIN_CHUNK_SIZE);
+
+ /* consolidate remainder with first chunk of old base */
+ if (oldfirst == m->top) {
+ size_t tsize = m->topsize += qsize;
+ m->top = q;
+ q->head = tsize | PINUSE_BIT;
+ check_top_chunk(m, q);
+ }
+ else if (oldfirst == m->dv) {
+ size_t dsize = m->dvsize += qsize;
+ m->dv = q;
+ set_size_and_pinuse_of_free_chunk(q, dsize);
+ }
+ else {
+ if (!cinuse(oldfirst)) {
+ size_t nsize = chunksize(oldfirst);
+ unlink_chunk(m, oldfirst, nsize);
+ oldfirst = chunk_plus_offset(oldfirst, nsize);
+ qsize += nsize;
+ }
+ set_free_with_pinuse(q, qsize, oldfirst);
+ insert_chunk(m, q, qsize);
+ check_free_chunk(m, q);
+ }
+
+ check_malloced_chunk(m, chunk2mem(p), nb);
+ return chunk2mem(p);
+}
+
+
+/* Add a segment to hold a new noncontiguous region */
+static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
+ /* Determine locations and sizes of segment, fenceposts, old top */
+ char* old_top = (char*)m->top;
+ msegmentptr oldsp = segment_holding(m, old_top);
+ char* old_end = oldsp->base + oldsp->size;
+ size_t ssize = pad_request(sizeof(struct malloc_segment));
+ char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ size_t offset = align_offset(chunk2mem(rawsp));
+ char* asp = rawsp + offset;
+ char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
+ mchunkptr sp = (mchunkptr)csp;
+ msegmentptr ss = (msegmentptr)(chunk2mem(sp));
+ mchunkptr tnext = chunk_plus_offset(sp, ssize);
+ mchunkptr p = tnext;
+ int nfences = 0;
+
+ /* reset top to new space */
+ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+
+ /* Set up segment record */
+ assert(is_aligned(ss));
+ set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
+ *ss = m->seg; /* Push current record */
+ m->seg.base = tbase;
+ m->seg.size = tsize;
+ (void)set_segment_flags(&m->seg, mmapped);
+ m->seg.next = ss;
+
+ /* Insert trailing fenceposts */
+ for (;;) {
+ mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
+ p->head = FENCEPOST_HEAD;
+ ++nfences;
+ if ((char*)(&(nextp->head)) < old_end)
+ p = nextp;
+ else
+ break;
+ }
+ assert(nfences >= 2);
+
+ /* Insert the rest of old top into a bin as an ordinary free chunk */
+ if (csp != old_top) {
+ mchunkptr q = (mchunkptr)old_top;
+ size_t psize = csp - old_top;
+ mchunkptr tn = chunk_plus_offset(q, psize);
+ set_free_with_pinuse(q, psize, tn);
+ insert_chunk(m, q, psize);
+ }
+
+ check_top_chunk(m, m->top);
+}
+
+/* -------------------------- System allocation -------------------------- */
+
+/* Get memory from system using MORECORE or MMAP */
+static void* sys_alloc(mstate m, size_t nb) {
+ char* tbase = CMFAIL;
+ size_t tsize = 0;
+ flag_t mmap_flag = 0;
+
+ init_mparams();
+
+ /* Directly map large chunks */
+ if (use_mmap(m) && nb >= mparams.mmap_threshold) {
+ void* mem = mmap_alloc(m, nb);
+ if (mem != 0)
+ return mem;
+ }
+
+ /*
+ Try getting memory in any of three ways (in most-preferred to
+ least-preferred order):
+ 1. A call to MORECORE that can normally contiguously extend memory.
+ (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
+ or main space is mmapped or a previous contiguous call failed)
+ 2. A call to MMAP new space (disabled if not HAVE_MMAP).
+ Note that under the default settings, if MORECORE is unable to
+ fulfill a request, and HAVE_MMAP is true, then mmap is
+ used as a noncontiguous system allocator. This is a useful backup
+ strategy for systems with holes in address spaces -- in this case
+ sbrk cannot contiguously expand the heap, but mmap may be able to
+ find space.
+ 3. A call to MORECORE that cannot usually contiguously extend memory.
+ (disabled if not HAVE_MORECORE)
+ */
+
+ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
+ char* br = CMFAIL;
+ msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
+ size_t asize = 0;
+ ACQUIRE_MORECORE_LOCK();
+
+ if (ss == 0) { /* First time through or recovery */
+ char* base = (char*)CALL_MORECORE(0);
+ if (base != CMFAIL) {
+ asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
+ /* Adjust to end on a page boundary */
+ if (!is_page_aligned(base))
+ asize += (page_align((size_t)base) - (size_t)base);
+ /* Can't call MORECORE if size is negative when treated as signed */
+ if (asize < HALF_MAX_SIZE_T &&
+ (br = (char*)(CALL_MORECORE(asize))) == base) {
+ tbase = base;
+ tsize = asize;
+ }
+ }
+ }
+ else {
+ /* Subtract out existing available top space from MORECORE request. */
+ asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
+ /* Use mem here only if it did continuously extend old space */
+ if (asize < HALF_MAX_SIZE_T &&
+ (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
+ tbase = br;
+ tsize = asize;
+ }
+ }
+
+ if (tbase == CMFAIL) { /* Cope with partial failure */
+ if (br != CMFAIL) { /* Try to use/extend the space we did get */
+ if (asize < HALF_MAX_SIZE_T &&
+ asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
+ size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
+ if (esize < HALF_MAX_SIZE_T) {
+ char* end = (char*)CALL_MORECORE(esize);
+ if (end != CMFAIL)
+ asize += esize;
+ else { /* Can't use; try to release */
+ (void)CALL_MORECORE(-asize);
+ br = CMFAIL;
+ }
+ }
+ }
+ }
+ if (br != CMFAIL) { /* Use the space we did get */
+ tbase = br;
+ tsize = asize;
+ }
+ else
+ disable_contiguous(m); /* Don't try contiguous path in the future */
+ }
+
+ RELEASE_MORECORE_LOCK();
+ }
+
+ if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
+ size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
+ size_t rsize = granularity_align(req);
+ if (rsize > nb) { /* Fail if wraps around zero */
+ char* mp = (char*)(CALL_MMAP(rsize));
+ if (mp != CMFAIL) {
+ tbase = mp;
+ tsize = rsize;
+ mmap_flag = IS_MMAPPED_BIT;
+ }
+ }
+ }
+
+ if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
+ size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
+ if (asize < HALF_MAX_SIZE_T) {
+ char* br = CMFAIL;
+ char* end = CMFAIL;
+ ACQUIRE_MORECORE_LOCK();
+ br = (char*)(CALL_MORECORE(asize));
+ end = (char*)(CALL_MORECORE(0));
+ RELEASE_MORECORE_LOCK();
+ if (br != CMFAIL && end != CMFAIL && br < end) {
+ size_t ssize = end - br;
+ if (ssize > nb + TOP_FOOT_SIZE) {
+ tbase = br;
+ tsize = ssize;
+ }
+ }
+ }
+ }
+
+ if (tbase != CMFAIL) {
+
+ if ((m->footprint += tsize) > m->max_footprint)
+ m->max_footprint = m->footprint;
+
+ if (!is_initialized(m)) { /* first-time initialization */
+ m->seg.base = m->least_addr = tbase;
+ m->seg.size = tsize;
+ (void)set_segment_flags(&m->seg, mmap_flag);
+ m->magic = mparams.magic;
+ init_bins(m);
+ if (is_global(m))
+ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+ else {
+ /* Offset top by embedded malloc_state */
+ mchunkptr mn = next_chunk(mem2chunk(m));
+ init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
+ }
+ }
+
+ else {
+ /* Try to merge with an existing segment */
+ msegmentptr sp = &m->seg;
+ while (sp != 0 && tbase != sp->base + sp->size)
+ sp = sp->next;
+ if (sp != 0 &&
+ !is_extern_segment(sp) &&
+ check_segment_merge(sp, tbase, tsize) &&
+ (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag &&
+ segment_holds(sp, m->top)) { /* append */
+ sp->size += tsize;
+ init_top(m, m->top, m->topsize + tsize);
+ }
+ else {
+ if (tbase < m->least_addr)
+ m->least_addr = tbase;
+ sp = &m->seg;
+ while (sp != 0 && sp->base != tbase + tsize)
+ sp = sp->next;
+ if (sp != 0 &&
+ !is_extern_segment(sp) &&
+ check_segment_merge(sp, tbase, tsize) &&
+ (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag) {
+ char* oldbase = sp->base;
+ sp->base = tbase;
+ sp->size += tsize;
+ return prepend_alloc(m, tbase, oldbase, nb);
+ }
+ else
+ add_segment(m, tbase, tsize, mmap_flag);
+ }
+ }
+
+ if (nb < m->topsize) { /* Allocate from new or extended top space */
+ size_t rsize = m->topsize -= nb;
+ mchunkptr p = m->top;
+ mchunkptr r = m->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+ check_top_chunk(m, m->top);
+ check_malloced_chunk(m, chunk2mem(p), nb);
+ return chunk2mem(p);
+ }
+ }
+
+ MALLOC_FAILURE_ACTION;
+ return 0;
+}
+
+/* ----------------------- system deallocation -------------------------- */
+
+/* Unmap and unlink any mmapped segments that don't contain used chunks */
+static size_t release_unused_segments(mstate m) {
+ size_t released = 0;
+ msegmentptr pred = &m->seg;
+ msegmentptr sp = pred->next;
+ while (sp != 0) {
+ char* base = sp->base;
+ size_t size = sp->size;
+ msegmentptr next = sp->next;
+ if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
+ mchunkptr p = align_as_chunk(base);
+ size_t psize = chunksize(p);
+ /* Can unmap if first chunk holds entire segment and not pinned */
+ if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
+ tchunkptr tp = (tchunkptr)p;
+ assert(segment_holds(sp, (char*)sp));
+ if (p == m->dv) {
+ m->dv = 0;
+ m->dvsize = 0;
+ }
+ else {
+ unlink_large_chunk(m, tp);
+ }
+ if (CALL_MUNMAP(base, size) == 0) {
+ released += size;
+ m->footprint -= size;
+ /* unlink obsoleted record */
+ sp = pred;
+ sp->next = next;
+ }
+ else { /* back out if cannot unmap */
+ insert_large_chunk(m, tp, psize);
+ }
+ }
+ }
+ pred = sp;
+ sp = next;
+ }
+ return released;
+}
+
+static int sys_trim(mstate m, size_t pad) {
+ size_t released = 0;
+ if (pad < MAX_REQUEST && is_initialized(m)) {
+ pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
+
+ if (m->topsize > pad) {
+ /* Shrink top space in granularity-size units, keeping at least one */
+ size_t unit = mparams.granularity;
+ size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
+ SIZE_T_ONE) * unit;
+ msegmentptr sp = segment_holding(m, (char*)m->top);
+
+ if (!is_extern_segment(sp)) {
+ if (is_mmapped_segment(sp)) {
+ if (HAVE_MMAP &&
+ sp->size >= extra &&
+ !has_segment_link(m, sp)) { /* can't shrink if pinned */
+ size_t newsize = sp->size - extra;
+ /* Prefer mremap, fall back to munmap */
+ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
+ (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
+ released = extra;
+ }
+ }
+ }
+ else if (HAVE_MORECORE) {
+ if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
+ extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
+ ACQUIRE_MORECORE_LOCK();
+ {
+ /* Make sure end of memory is where we last set it. */
+ char* old_br = (char*)(CALL_MORECORE(0));
+ if (old_br == sp->base + sp->size) {
+ char* rel_br = (char*)(CALL_MORECORE(-extra));
+ char* new_br = (char*)(CALL_MORECORE(0));
+ if (rel_br != CMFAIL && new_br < old_br)
+ released = old_br - new_br;
+ }
+ }
+ RELEASE_MORECORE_LOCK();
+ }
+ }
+
+ if (released != 0) {
+ sp->size -= released;
+ m->footprint -= released;
+ init_top(m, m->top, m->topsize - released);
+ check_top_chunk(m, m->top);
+ }
+ }
+
+ /* Unmap any unused mmapped segments */
+ if (HAVE_MMAP)
+ released += release_unused_segments(m);
+
+ /* On failure, disable autotrim to avoid repeated failed future calls */
+ if (released == 0)
+ m->trim_check = MAX_SIZE_T;
+ }
+
+ return (released != 0)? 1 : 0;
+}
+
+/* ---------------------------- malloc support --------------------------- */
+
+/* allocate a large request from the best fitting chunk in a treebin */
+static void* tmalloc_large(mstate m, size_t nb) {
+ tchunkptr v = 0;
+ size_t rsize = -nb; /* Unsigned negation */
+ tchunkptr t;
+ bindex_t idx;
+ compute_tree_index(nb, idx);
+
+ if ((t = *treebin_at(m, idx)) != 0) {
+ /* Traverse tree for this bin looking for node with size == nb */
+ size_t sizebits = nb << leftshift_for_tree_index(idx);
+ tchunkptr rst = 0; /* The deepest untaken right subtree */
+ for (;;) {
+ tchunkptr rt;
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ v = t;
+ if ((rsize = trem) == 0)
+ break;
+ }
+ rt = t->child[1];
+ t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ if (rt != 0 && rt != t)
+ rst = rt;
+ if (t == 0) {
+ t = rst; /* set t to least subtree holding sizes > nb */
+ break;
+ }
+ sizebits <<= 1;
+ }
+ }
+
+ if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
+ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
+ if (leftbits != 0) {
+ bindex_t i;
+ binmap_t leastbit = least_bit(leftbits);
+ compute_bit2idx(leastbit, i);
+ t = *treebin_at(m, i);
+ }
+ }
+
+ while (t != 0) { /* find smallest of tree or subtree */
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ t = leftmost_child(t);
+ }
+
+ /* If dv is a better fit, return 0 so malloc will use it */
+ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
+ if (RTCHECK(ok_address(m, v))) { /* split */
+ mchunkptr r = chunk_plus_offset(v, nb);
+ assert(chunksize(v) == rsize + nb);
+ if (RTCHECK(ok_next(v, r))) {
+ unlink_large_chunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(m, v, (rsize + nb));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ insert_chunk(m, r, rsize);
+ }
+ return chunk2mem(v);
+ }
+ }
+ CORRUPTION_ERROR_ACTION(m);
+ }
+ return 0;
+}
+
+/* allocate a small request from the best fitting chunk in a treebin */
+static void* tmalloc_small(mstate m, size_t nb) {
+ tchunkptr t, v;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leastbit = least_bit(m->treemap);
+ compute_bit2idx(leastbit, i);
+
+ v = t = *treebin_at(m, i);
+ rsize = chunksize(t) - nb;
+
+ while ((t = leftmost_child(t)) != 0) {
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ }
+
+ if (RTCHECK(ok_address(m, v))) {
+ mchunkptr r = chunk_plus_offset(v, nb);
+ assert(chunksize(v) == rsize + nb);
+ if (RTCHECK(ok_next(v, r))) {
+ unlink_large_chunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(m, v, (rsize + nb));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(m, r, rsize);
+ }
+ return chunk2mem(v);
+ }
+ }
+
+ CORRUPTION_ERROR_ACTION(m);
+ return 0;
+}
+
+/* --------------------------- realloc support --------------------------- */
+
+static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
+ if (bytes >= MAX_REQUEST) {
+ MALLOC_FAILURE_ACTION;
+ return 0;
+ }
+ if (!PREACTION(m)) {
+ mchunkptr oldp = mem2chunk(oldmem);
+ size_t oldsize = chunksize(oldp);
+ mchunkptr next = chunk_plus_offset(oldp, oldsize);
+ mchunkptr newp = 0;
+ void* extra = 0;
+
+ /* Try to either shrink or extend into top. Else malloc-copy-free */
+
+ if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
+ ok_next(oldp, next) && ok_pinuse(next))) {
+ size_t nb = request2size(bytes);
+ if (is_mmapped(oldp))
+ newp = mmap_resize(m, oldp, nb);
+ else if (oldsize >= nb) { /* already big enough */
+ size_t rsize = oldsize - nb;
+ newp = oldp;
+ if (rsize >= MIN_CHUNK_SIZE) {
+ mchunkptr remainder = chunk_plus_offset(newp, nb);
+ set_inuse(m, newp, nb);
+ set_inuse(m, remainder, rsize);
+ extra = chunk2mem(remainder);
+ }
+ }
+ else if (next == m->top && oldsize + m->topsize > nb) {
+ /* Expand into top */
+ size_t newsize = oldsize + m->topsize;
+ size_t newtopsize = newsize - nb;
+ mchunkptr newtop = chunk_plus_offset(oldp, nb);
+ set_inuse(m, oldp, nb);
+ newtop->head = newtopsize |PINUSE_BIT;
+ m->top = newtop;
+ m->topsize = newtopsize;
+ newp = oldp;
+ }
+ }
+ else {
+ USAGE_ERROR_ACTION(m, oldmem);
+ POSTACTION(m);
+ return 0;
+ }
+
+ POSTACTION(m);
+
+ if (newp != 0) {
+ if (extra != 0) {
+ internal_free(m, extra);
+ }
+ check_inuse_chunk(m, newp);
+ return chunk2mem(newp);
+ }
+ else {
+ void* newmem = internal_malloc(m, bytes);
+ if (newmem != 0) {
+ size_t oc = oldsize - overhead_for(oldp);
+ memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
+ internal_free(m, oldmem);
+ }
+ return newmem;
+ }
+ }
+ return 0;
+}
+
+/* --------------------------- memalign support -------------------------- */
+
+static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
+ if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
+ return internal_malloc(m, bytes);
+ if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
+ alignment = MIN_CHUNK_SIZE;
+ if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
+ size_t a = MALLOC_ALIGNMENT << 1;
+ while (a < alignment) a <<= 1;
+ alignment = a;
+ }
+
+ if (bytes >= MAX_REQUEST - alignment) {
+ if (m != 0) { /* Test isn't needed but avoids compiler warning */
+ MALLOC_FAILURE_ACTION;
+ }
+ }
+ else {
+ size_t nb = request2size(bytes);
+ size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
+ char* mem = (char*)internal_malloc(m, req);
+ if (mem != 0) {
+ void* leader = 0;
+ void* trailer = 0;
+ mchunkptr p = mem2chunk(mem);
+
+ if (PREACTION(m)) return 0;
+ if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
+ /*
+ Find an aligned spot inside chunk. Since we need to give
+ back leading space in a chunk of at least MIN_CHUNK_SIZE, if
+ the first calculation places us at a spot with less than
+ MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
+ We've allocated enough total room so that this is always
+ possible.
+ */
+ char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
+ alignment -
+ SIZE_T_ONE)) &
+ -alignment));
+ char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
+ br : br+alignment;
+ mchunkptr newp = (mchunkptr)pos;
+ size_t leadsize = pos - (char*)(p);
+ size_t newsize = chunksize(p) - leadsize;
+
+ if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
+ newp->prev_foot = p->prev_foot + leadsize;
+ newp->head = (newsize|CINUSE_BIT);
+ }
+ else { /* Otherwise, give back leader, use the rest */
+ set_inuse(m, newp, newsize);
+ set_inuse(m, p, leadsize);
+ leader = chunk2mem(p);
+ }
+ p = newp;
+ }
+
+ /* Give back spare room at the end */
+ if (!is_mmapped(p)) {
+ size_t size = chunksize(p);
+ if (size > nb + MIN_CHUNK_SIZE) {
+ size_t remainder_size = size - nb;
+ mchunkptr remainder = chunk_plus_offset(p, nb);
+ set_inuse(m, p, nb);
+ set_inuse(m, remainder, remainder_size);
+ trailer = chunk2mem(remainder);
+ }
+ }
+
+ assert (chunksize(p) >= nb);
+ assert((((size_t)(chunk2mem(p))) % alignment) == 0);
+ check_inuse_chunk(m, p);
+ POSTACTION(m);
+ if (leader != 0) {
+ internal_free(m, leader);
+ }
+ if (trailer != 0) {
+ internal_free(m, trailer);
+ }
+ return chunk2mem(p);
+ }
+ }
+ return 0;
+}
+
+/* ------------------------ comalloc/coalloc support --------------------- */
+
+static void** ialloc(mstate m,
+ size_t n_elements,
+ size_t* sizes,
+ int opts,
+ void* chunks[]) {
+ /*
+ This provides common support for independent_X routines, handling
+ all of the combinations that can result.
+
+ The opts arg has:
+ bit 0 set if all elements are same size (using sizes[0])
+ bit 1 set if elements should be zeroed
+ */
+
+ size_t element_size; /* chunksize of each element, if all same */
+ size_t contents_size; /* total size of elements */
+ size_t array_size; /* request size of pointer array */
+ void* mem; /* malloced aggregate space */
+ mchunkptr p; /* corresponding chunk */
+ size_t remainder_size; /* remaining bytes while splitting */
+ void** marray; /* either "chunks" or malloced ptr array */
+ mchunkptr array_chunk; /* chunk for malloced ptr array */
+ flag_t was_enabled; /* to disable mmap */
+ size_t size;
+ size_t i;
+
+ /* compute array length, if needed */
+ if (chunks != 0) {
+ if (n_elements == 0)
+ return chunks; /* nothing to do */
+ marray = chunks;
+ array_size = 0;
+ }
+ else {
+ /* if empty req, must still return chunk representing empty array */
+ if (n_elements == 0)
+ return (void**)internal_malloc(m, 0);
+ marray = 0;
+ array_size = request2size(n_elements * (sizeof(void*)));
+ }
+
+ /* compute total element size */
+ if (opts & 0x1) { /* all-same-size */
+ element_size = request2size(*sizes);
+ contents_size = n_elements * element_size;
+ }
+ else { /* add up all the sizes */
+ element_size = 0;
+ contents_size = 0;
+ for (i = 0; i != n_elements; ++i)
+ contents_size += request2size(sizes[i]);
+ }
+
+ size = contents_size + array_size;
+
+ /*
+ Allocate the aggregate chunk. First disable direct-mmapping so
+ malloc won't use it, since we would not be able to later
+ free/realloc space internal to a segregated mmap region.
+ */
+ was_enabled = use_mmap(m);
+ disable_mmap(m);
+ mem = internal_malloc(m, size - CHUNK_OVERHEAD);
+ if (was_enabled)
+ enable_mmap(m);
+ if (mem == 0)
+ return 0;
+
+ if (PREACTION(m)) return 0;
+ p = mem2chunk(mem);
+ remainder_size = chunksize(p);
+
+ assert(!is_mmapped(p));
+
+ if (opts & 0x2) { /* optionally clear the elements */
+ memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
+ }
+
+ /* If not provided, allocate the pointer array as final part of chunk */
+ if (marray == 0) {
+ size_t array_chunk_size;
+ array_chunk = chunk_plus_offset(p, contents_size);
+ array_chunk_size = remainder_size - contents_size;
+ marray = (void**) (chunk2mem(array_chunk));
+ set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
+ remainder_size = contents_size;
+ }
+
+ /* split out elements */
+ for (i = 0; ; ++i) {
+ marray[i] = chunk2mem(p);
+ if (i != n_elements-1) {
+ if (element_size != 0)
+ size = element_size;
+ else
+ size = request2size(sizes[i]);
+ remainder_size -= size;
+ set_size_and_pinuse_of_inuse_chunk(m, p, size);
+ p = chunk_plus_offset(p, size);
+ }
+ else { /* the final element absorbs any overallocation slop */
+ set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
+ break;
+ }
+ }
+
+#if DEBUG
+ if (marray != chunks) {
+ /* final element must have exactly exhausted chunk */
+ if (element_size != 0) {
+ assert(remainder_size == element_size);
+ }
+ else {
+ assert(remainder_size == request2size(sizes[i]));
+ }
+ check_inuse_chunk(m, mem2chunk(marray));
+ }
+ for (i = 0; i != n_elements; ++i)
+ check_inuse_chunk(m, mem2chunk(marray[i]));
+
+#endif /* DEBUG */
+
+ POSTACTION(m);
+ return marray;
+}
+
+
+/* -------------------------- public routines ---------------------------- */
+
+#if !ONLY_MSPACES
+
+void* dlmalloc(size_t bytes) {
+ /*
+ Basic algorithm:
+ If a small request (< 256 bytes minus per-chunk overhead):
+ 1. If one exists, use a remainderless chunk in associated smallbin.
+ (Remainderless means that there are too few excess bytes to
+ represent as a chunk.)
+ 2. If it is big enough, use the dv chunk, which is normally the
+ chunk adjacent to the one used for the most recent small request.
+ 3. If one exists, split the smallest available chunk in a bin,
+ saving remainder in dv.
+ 4. If it is big enough, use the top chunk.
+ 5. If available, get memory from system and use it
+ Otherwise, for a large request:
+ 1. Find the smallest available binned chunk that fits, and use it
+ if it is better fitting than dv chunk, splitting if necessary.
+ 2. If better fitting than any binned chunk, use the dv chunk.
+ 3. If it is big enough, use the top chunk.
+ 4. If request size >= mmap threshold, try to directly mmap this chunk.
+ 5. If available, get memory from system and use it
+
+ The ugly goto's here ensure that postaction occurs along all paths.
+ */
+
+ if (!PREACTION(gm)) {
+ void* mem;
+ size_t nb;
+ if (bytes <= MAX_SMALL_REQUEST) {
+ bindex_t idx;
+ binmap_t smallbits;
+ nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
+ idx = small_index(nb);
+ smallbits = gm->smallmap >> idx;
+
+ if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
+ mchunkptr b, p;
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = smallbin_at(gm, idx);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(idx));
+ unlink_first_small_chunk(gm, b, p, idx);
+ set_inuse_and_pinuse(gm, p, small_index2size(idx));
+ mem = chunk2mem(p);
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb > gm->dvsize) {
+ if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
+ mchunkptr b, p, r;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+ binmap_t leastbit = least_bit(leftbits);
+ compute_bit2idx(leastbit, i);
+ b = smallbin_at(gm, i);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(i));
+ unlink_first_small_chunk(gm, b, p, i);
+ rsize = small_index2size(i) - nb;
+ /* Fit here cannot be remainderless if 4byte sizes */
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(gm, p, small_index2size(i));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+ r = chunk_plus_offset(p, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(gm, r, rsize);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+
+ else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+ }
+ }
+ else if (bytes >= MAX_REQUEST)
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ else {
+ nb = pad_request(bytes);
+ if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+ }
+
+ if (nb <= gm->dvsize) {
+ size_t rsize = gm->dvsize - nb;
+ mchunkptr p = gm->dv;
+ if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
+ mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
+ gm->dvsize = rsize;
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+ }
+ else { /* exhaust dv */
+ size_t dvs = gm->dvsize;
+ gm->dvsize = 0;
+ gm->dv = 0;
+ set_inuse_and_pinuse(gm, p, dvs);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb < gm->topsize) { /* Split top */
+ size_t rsize = gm->topsize -= nb;
+ mchunkptr p = gm->top;
+ mchunkptr r = gm->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+ mem = chunk2mem(p);
+ check_top_chunk(gm, gm->top);
+ check_malloced_chunk(gm, mem, nb);
+ goto postaction;
+ }
+
+ mem = sys_alloc(gm, nb);
+
+ postaction:
+ POSTACTION(gm);
+ return mem;
+ }
+
+ return 0;
+}
+
+void dlfree(void* mem) {
+ /*
+ Consolidate freed chunks with preceding or succeeding bordering
+ free chunks, if they exist, and then place in a bin. Intermixed
+ with special cases for top, dv, mmapped chunks, and usage errors.
+ */
+
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+#if FOOTERS
+ mstate fm = get_mstate_for(p);
+ if (!ok_magic(fm)) {
+ USAGE_ERROR_ACTION(fm, p);
+ return;
+ }
+#else /* FOOTERS */
+#define fm gm
+#endif /* FOOTERS */
+ if (!PREACTION(fm)) {
+ check_inuse_chunk(fm, p);
+ if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
+ size_t psize = chunksize(p);
+ mchunkptr next = chunk_plus_offset(p, psize);
+ if (!pinuse(p)) {
+ size_t prevsize = p->prev_foot;
+ if ((prevsize & IS_MMAPPED_BIT) != 0) {
+ prevsize &= ~IS_MMAPPED_BIT;
+ psize += prevsize + MMAP_FOOT_PAD;
+ if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+ fm->footprint -= psize;
+ goto postaction;
+ }
+ else {
+ mchunkptr prev = chunk_minus_offset(p, prevsize);
+ psize += prevsize;
+ p = prev;
+ if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
+ if (p != fm->dv) {
+ unlink_chunk(fm, p, prevsize);
+ }
+ else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+ fm->dvsize = psize;
+ set_free_with_pinuse(p, psize, next);
+ goto postaction;
+ }
+ }
+ else
+ goto erroraction;
+ }
+ }
+
+ if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
+ if (!cinuse(next)) { /* consolidate forward */
+ if (next == fm->top) {
+ size_t tsize = fm->topsize += psize;
+ fm->top = p;
+ p->head = tsize | PINUSE_BIT;
+ if (p == fm->dv) {
+ fm->dv = 0;
+ fm->dvsize = 0;
+ }
+ if (should_trim(fm, tsize))
+ sys_trim(fm, 0);
+ goto postaction;
+ }
+ else if (next == fm->dv) {
+ size_t dsize = fm->dvsize += psize;
+ fm->dv = p;
+ set_size_and_pinuse_of_free_chunk(p, dsize);
+ goto postaction;
+ }
+ else {
+ size_t nsize = chunksize(next);
+ psize += nsize;
+ unlink_chunk(fm, next, nsize);
+ set_size_and_pinuse_of_free_chunk(p, psize);
+ if (p == fm->dv) {
+ fm->dvsize = psize;
+ goto postaction;
+ }
+ }
+ }
+ else
+ set_free_with_pinuse(p, psize, next);
+ insert_chunk(fm, p, psize);
+ check_free_chunk(fm, p);
+ goto postaction;
+ }
+ }
+ erroraction:
+ USAGE_ERROR_ACTION(fm, p);
+ postaction:
+ POSTACTION(fm);
+ }
+ }
+#if !FOOTERS
+#undef fm
+#endif /* FOOTERS */
+}
+
+void* dlcalloc(size_t n_elements, size_t elem_size) {
+ void* mem;
+ size_t req = 0;
+ if (n_elements != 0) {
+ req = n_elements * elem_size;
+ if (((n_elements | elem_size) & ~(size_t)0xffff) &&
+ (req / n_elements != elem_size))
+ req = MAX_SIZE_T; /* force downstream failure on overflow */
+ }
+ mem = dlmalloc(req);
+ if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
+ memset(mem, 0, req);
+ return mem;
+}
+
+void* dlrealloc(void* oldmem, size_t bytes) {
+ if (oldmem == 0)
+ return dlmalloc(bytes);
+#ifdef REALLOC_ZERO_BYTES_FREES
+ if (bytes == 0) {
+ dlfree(oldmem);
+ return 0;
+ }
+#endif /* REALLOC_ZERO_BYTES_FREES */
+ else {
+#if ! FOOTERS
+ mstate m = gm;
+#else /* FOOTERS */
+ mstate m = get_mstate_for(mem2chunk(oldmem));
+ if (!ok_magic(m)) {
+ USAGE_ERROR_ACTION(m, oldmem);
+ return 0;
+ }
+#endif /* FOOTERS */
+ return internal_realloc(m, oldmem, bytes);
+ }
+}
+
+void* dlmemalign(size_t alignment, size_t bytes) {
+ return internal_memalign(gm, alignment, bytes);
+}
+
+void** dlindependent_calloc(size_t n_elements, size_t elem_size,
+ void* chunks[]) {
+ size_t sz = elem_size; /* serves as 1-element array */
+ return ialloc(gm, n_elements, &sz, 3, chunks);
+}
+
+void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
+ void* chunks[]) {
+ return ialloc(gm, n_elements, sizes, 0, chunks);
+}
+
+void* dlvalloc(size_t bytes) {
+ size_t pagesz;
+ init_mparams();
+ pagesz = mparams.page_size;
+ return dlmemalign(pagesz, bytes);
+}
+
+void* dlpvalloc(size_t bytes) {
+ size_t pagesz;
+ init_mparams();
+ pagesz = mparams.page_size;
+ return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
+}
+
+int dlmalloc_trim(size_t pad) {
+ int result = 0;
+ if (!PREACTION(gm)) {
+ result = sys_trim(gm, pad);
+ POSTACTION(gm);
+ }
+ return result;
+}
+
+size_t dlmalloc_footprint(void) {
+ return gm->footprint;
+}
+
+size_t dlmalloc_max_footprint(void) {
+ return gm->max_footprint;
+}
+
+#if !NO_MALLINFO
+struct mallinfo dlmallinfo(void) {
+ return internal_mallinfo(gm);
+}
+#endif /* NO_MALLINFO */
+
+void dlmalloc_stats() {
+ internal_malloc_stats(gm);
+}
+
+size_t dlmalloc_usable_size(void* mem) {
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+ if (cinuse(p))
+ return chunksize(p) - overhead_for(p);
+ }
+ return 0;
+}
+
+int dlmallopt(int param_number, int value) {
+ return change_mparam(param_number, value);
+}
+
+#endif /* !ONLY_MSPACES */
+
+/* ----------------------------- user mspaces ---------------------------- */
+
+#if MSPACES
+
+static mstate init_user_mstate(char* tbase, size_t tsize) {
+ size_t msize = pad_request(sizeof(struct malloc_state));
+ mchunkptr mn;
+ mchunkptr msp = align_as_chunk(tbase);
+ mstate m = (mstate)(chunk2mem(msp));
+ memset(m, 0, msize);
+ INITIAL_LOCK(&m->mutex);
+ msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
+ m->seg.base = m->least_addr = tbase;
+ m->seg.size = m->footprint = m->max_footprint = tsize;
+ m->magic = mparams.magic;
+ m->mflags = mparams.default_mflags;
+ disable_contiguous(m);
+ init_bins(m);
+ mn = next_chunk(mem2chunk(m));
+ init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
+ check_top_chunk(m, m->top);
+ return m;
+}
+
+mspace create_mspace(size_t capacity, int locked) {
+ mstate m = 0;
+ size_t msize = pad_request(sizeof(struct malloc_state));
+ init_mparams(); /* Ensure pagesize etc initialized */
+
+ if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+ size_t rs = ((capacity == 0)? mparams.granularity :
+ (capacity + TOP_FOOT_SIZE + msize));
+ size_t tsize = granularity_align(rs);
+ char* tbase = (char*)(CALL_MMAP(tsize));
+ if (tbase != CMFAIL) {
+ m = init_user_mstate(tbase, tsize);
+ set_segment_flags(&m->seg, IS_MMAPPED_BIT);
+ set_lock(m, locked);
+ }
+ }
+ return (mspace)m;
+}
+
+mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
+ mstate m = 0;
+ size_t msize = pad_request(sizeof(struct malloc_state));
+ init_mparams(); /* Ensure pagesize etc initialized */
+
+ if (capacity > msize + TOP_FOOT_SIZE &&
+ capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+ m = init_user_mstate((char*)base, capacity);
+ set_segment_flags(&m->seg, EXTERN_BIT);
+ set_lock(m, locked);
+ }
+ return (mspace)m;
+}
+
+size_t destroy_mspace(mspace msp) {
+ size_t freed = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ msegmentptr sp = &ms->seg;
+ while (sp != 0) {
+ char* base = sp->base;
+ size_t size = sp->size;
+ flag_t flag = get_segment_flags(sp);
+ sp = sp->next;
+ if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
+ CALL_MUNMAP(base, size) == 0)
+ freed += size;
+ }
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return freed;
+}
+
+/*
+ mspace versions of routines are near-clones of the global
+ versions. This is not so nice but better than the alternatives.
+*/
+
+
+void* mspace_malloc(mspace msp, size_t bytes) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ if (!PREACTION(ms)) {
+ void* mem;
+ size_t nb;
+ if (bytes <= MAX_SMALL_REQUEST) {
+ bindex_t idx;
+ binmap_t smallbits;
+ nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
+ idx = small_index(nb);
+ smallbits = ms->smallmap >> idx;
+
+ if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
+ mchunkptr b, p;
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = smallbin_at(ms, idx);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(idx));
+ unlink_first_small_chunk(ms, b, p, idx);
+ set_inuse_and_pinuse(ms, p, small_index2size(idx));
+ mem = chunk2mem(p);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb > ms->dvsize) {
+ if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
+ mchunkptr b, p, r;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+ binmap_t leastbit = least_bit(leftbits);
+ compute_bit2idx(leastbit, i);
+ b = smallbin_at(ms, i);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(i));
+ unlink_first_small_chunk(ms, b, p, i);
+ rsize = small_index2size(i) - nb;
+ /* Fit here cannot be remainderless if 4byte sizes */
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(ms, p, small_index2size(i));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ r = chunk_plus_offset(p, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(ms, r, rsize);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+ }
+ }
+ else if (bytes >= MAX_REQUEST)
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ else {
+ nb = pad_request(bytes);
+ if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+ }
+
+ if (nb <= ms->dvsize) {
+ size_t rsize = ms->dvsize - nb;
+ mchunkptr p = ms->dv;
+ if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
+ mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
+ ms->dvsize = rsize;
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ }
+ else { /* exhaust dv */
+ size_t dvs = ms->dvsize;
+ ms->dvsize = 0;
+ ms->dv = 0;
+ set_inuse_and_pinuse(ms, p, dvs);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb < ms->topsize) { /* Split top */
+ size_t rsize = ms->topsize -= nb;
+ mchunkptr p = ms->top;
+ mchunkptr r = ms->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ mem = chunk2mem(p);
+ check_top_chunk(ms, ms->top);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ mem = sys_alloc(ms, nb);
+
+ postaction:
+ POSTACTION(ms);
+ return mem;
+ }
+
+ return 0;
+}
+
+void mspace_free(mspace msp, void* mem) {
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+#if FOOTERS
+ mstate fm = get_mstate_for(p);
+#else /* FOOTERS */
+ mstate fm = (mstate)msp;
+#endif /* FOOTERS */
+ if (!ok_magic(fm)) {
+ USAGE_ERROR_ACTION(fm, p);
+ return;
+ }
+ if (!PREACTION(fm)) {
+ check_inuse_chunk(fm, p);
+ if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
+ size_t psize = chunksize(p);
+ mchunkptr next = chunk_plus_offset(p, psize);
+ if (!pinuse(p)) {
+ size_t prevsize = p->prev_foot;
+ if ((prevsize & IS_MMAPPED_BIT) != 0) {
+ prevsize &= ~IS_MMAPPED_BIT;
+ psize += prevsize + MMAP_FOOT_PAD;
+ if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+ fm->footprint -= psize;
+ goto postaction;
+ }
+ else {
+ mchunkptr prev = chunk_minus_offset(p, prevsize);
+ psize += prevsize;
+ p = prev;
+ if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
+ if (p != fm->dv) {
+ unlink_chunk(fm, p, prevsize);
+ }
+ else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+ fm->dvsize = psize;
+ set_free_with_pinuse(p, psize, next);
+ goto postaction;
+ }
+ }
+ else
+ goto erroraction;
+ }
+ }
+
+ if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
+ if (!cinuse(next)) { /* consolidate forward */
+ if (next == fm->top) {
+ size_t tsize = fm->topsize += psize;
+ fm->top = p;
+ p->head = tsize | PINUSE_BIT;
+ if (p == fm->dv) {
+ fm->dv = 0;
+ fm->dvsize = 0;
+ }
+ if (should_trim(fm, tsize))
+ sys_trim(fm, 0);
+ goto postaction;
+ }
+ else if (next == fm->dv) {
+ size_t dsize = fm->dvsize += psize;
+ fm->dv = p;
+ set_size_and_pinuse_of_free_chunk(p, dsize);
+ goto postaction;
+ }
+ else {
+ size_t nsize = chunksize(next);
+ psize += nsize;
+ unlink_chunk(fm, next, nsize);
+ set_size_and_pinuse_of_free_chunk(p, psize);
+ if (p == fm->dv) {
+ fm->dvsize = psize;
+ goto postaction;
+ }
+ }
+ }
+ else
+ set_free_with_pinuse(p, psize, next);
+ insert_chunk(fm, p, psize);
+ check_free_chunk(fm, p);
+ goto postaction;
+ }
+ }
+ erroraction:
+ USAGE_ERROR_ACTION(fm, p);
+ postaction:
+ POSTACTION(fm);
+ }
+ }
+}
+
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
+ void* mem;
+ size_t req = 0;
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ if (n_elements != 0) {
+ req = n_elements * elem_size;
+ if (((n_elements | elem_size) & ~(size_t)0xffff) &&
+ (req / n_elements != elem_size))
+ req = MAX_SIZE_T; /* force downstream failure on overflow */
+ }
+ mem = internal_malloc(ms, req);
+ if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
+ memset(mem, 0, req);
+ return mem;
+}
+
+void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
+ if (oldmem == 0)
+ return mspace_malloc(msp, bytes);
+#ifdef REALLOC_ZERO_BYTES_FREES
+ if (bytes == 0) {
+ mspace_free(msp, oldmem);
+ return 0;
+ }
+#endif /* REALLOC_ZERO_BYTES_FREES */
+ else {
+#if FOOTERS
+ mchunkptr p = mem2chunk(oldmem);
+ mstate ms = get_mstate_for(p);
+#else /* FOOTERS */
+ mstate ms = (mstate)msp;
+#endif /* FOOTERS */
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return internal_realloc(ms, oldmem, bytes);
+ }
+}
+
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return internal_memalign(ms, alignment, bytes);
+}
+
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+ size_t elem_size, void* chunks[]) {
+ size_t sz = elem_size; /* serves as 1-element array */
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return ialloc(ms, n_elements, &sz, 3, chunks);
+}
+
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+ size_t sizes[], void* chunks[]) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return ialloc(ms, n_elements, sizes, 0, chunks);
+}
+
+int mspace_trim(mspace msp, size_t pad) {
+ int result = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ if (!PREACTION(ms)) {
+ result = sys_trim(ms, pad);
+ POSTACTION(ms);
+ }
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return result;
+}
+
+void mspace_malloc_stats(mspace msp) {
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ internal_malloc_stats(ms);
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+}
+
+size_t mspace_footprint(mspace msp) {
+ size_t result;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ result = ms->footprint;
+ }
+ USAGE_ERROR_ACTION(ms,ms);
+ return result;
+}
+
+
+size_t mspace_max_footprint(mspace msp) {
+ size_t result;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ result = ms->max_footprint;
+ }
+ USAGE_ERROR_ACTION(ms,ms);
+ return result;
+}
+
+
+#if !NO_MALLINFO
+struct mallinfo mspace_mallinfo(mspace msp) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return internal_mallinfo(ms);
+}
+#endif /* NO_MALLINFO */
+
+int mspace_mallopt(int param_number, int value) {
+ return change_mparam(param_number, value);
+}
+
+#endif /* MSPACES */
+
+/* -------------------- Alternative MORECORE functions ------------------- */
+
+/*
+ Guidelines for creating a custom version of MORECORE:
+
+ * For best performance, MORECORE should allocate in multiples of pagesize.
+ * MORECORE may allocate more memory than requested. (Or even less,
+ but this will usually result in a malloc failure.)
+ * MORECORE must not allocate memory when given argument zero, but
+ instead return one past the end address of memory from previous
+ nonzero call.
+ * For best performance, consecutive calls to MORECORE with positive
+ arguments should return increasing addresses, indicating that
+ space has been contiguously extended.
+ * Even though consecutive calls to MORECORE need not return contiguous
+ addresses, it must be OK for malloc'ed chunks to span multiple
+ regions in those cases where they do happen to be contiguous.
+ * MORECORE need not handle negative arguments -- it may instead
+ just return MFAIL when given negative arguments.
+ Negative arguments are always multiples of pagesize. MORECORE
+ must not misinterpret negative args as large positive unsigned
+ args. You can suppress all such calls from even occurring by defining
+ MORECORE_CANNOT_TRIM,
+
+ As an example alternative MORECORE, here is a custom allocator
+ kindly contributed for pre-OSX macOS. It uses virtually but not
+ necessarily physically contiguous non-paged memory (locked in,
+ present and won't get swapped out). You can use it by uncommenting
+ this section, adding some #includes, and setting up the appropriate
+ defines above:
+
+ #define MORECORE osMoreCore
+
+ There is also a shutdown routine that should somehow be called for
+ cleanup upon program exit.
+
+ #define MAX_POOL_ENTRIES 100
+ #define MINIMUM_MORECORE_SIZE (64 * 1024U)
+ static int next_os_pool;
+ void *our_os_pools[MAX_POOL_ENTRIES];
+
+ void *osMoreCore(int size)
+ {
+ void *ptr = 0;
+ static void *sbrk_top = 0;
+
+ if (size > 0)
+ {
+ if (size < MINIMUM_MORECORE_SIZE)
+ size = MINIMUM_MORECORE_SIZE;
+ if (CurrentExecutionLevel() == kTaskLevel)
+ ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
+ if (ptr == 0)
+ {
+ return (void *) MFAIL;
+ }
+ // save ptrs so they can be freed during cleanup
+ our_os_pools[next_os_pool] = ptr;
+ next_os_pool++;
+ ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
+ sbrk_top = (char *) ptr + size;
+ return ptr;
+ }
+ else if (size < 0)
+ {
+ // we don't currently support shrink behavior
+ return (void *) MFAIL;
+ }
+ else
+ {
+ return sbrk_top;
+ }
+ }
+
+ // cleanup any allocated memory pools
+ // called as last thing before shutting down driver
+
+ void osCleanupMem(void)
+ {
+ void **ptr;
+
+ for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
+ if (*ptr)
+ {
+ PoolDeallocate(*ptr);
+ *ptr = 0;
+ }
+ }
+
+*/
+
+
+/* -----------------------------------------------------------------------
+History:
+ V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
+ * Add max_footprint functions
+ * Ensure all appropriate literals are size_t
+ * Fix conditional compilation problem for some #define settings
+ * Avoid concatenating segments with the one provided
+ in create_mspace_with_base
+ * Rename some variables to avoid compiler shadowing warnings
+ * Use explicit lock initialization.
+ * Better handling of sbrk interference.
+ * Simplify and fix segment insertion, trimming and mspace_destroy
+ * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
+ * Thanks especially to Dennis Flanagan for help on these.
+
+ V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
+ * Fix memalign brace error.
+
+ V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
+ * Fix improper #endif nesting in C++
+ * Add explicit casts needed for C++
+
+ V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
+ * Use trees for large bins
+ * Support mspaces
+ * Use segments to unify sbrk-based and mmap-based system allocation,
+ removing need for emulation on most platforms without sbrk.
+ * Default safety checks
+ * Optional footer checks. Thanks to William Robertson for the idea.
+ * Internal code refactoring
+ * Incorporate suggestions and platform-specific changes.
+ Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
+ Aaron Bachmann, Emery Berger, and others.
+ * Speed up non-fastbin processing enough to remove fastbins.
+ * Remove useless cfree() to avoid conflicts with other apps.
+ * Remove internal memcpy, memset. Compilers handle builtins better.
+ * Remove some options that no one ever used and rename others.
+
+ V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+ * Fix malloc_state bitmap array misdeclaration
+
+ V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
+ * Allow tuning of FIRST_SORTED_BIN_SIZE
+ * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
+ * Better detection and support for non-contiguousness of MORECORE.
+ Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
+ * Bypass most of malloc if no frees. Thanks To Emery Berger.
+ * Fix freeing of old top non-contiguous chunk im sysmalloc.
+ * Raised default trim and map thresholds to 256K.
+ * Fix mmap-related #defines. Thanks to Lubos Lunak.
+ * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
+ * Branch-free bin calculation
+ * Default trim and mmap thresholds now 256K.
+
+ V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
+ * Introduce independent_comalloc and independent_calloc.
+ Thanks to Michael Pachos for motivation and help.
+ * Make optional .h file available
+ * Allow > 2GB requests on 32bit systems.
+ * new WIN32 sbrk, mmap, munmap, lock code from .
+ Thanks also to Andreas Mueller ,
+ and Anonymous.
+ * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
+ helping test this.)
+ * memalign: check alignment arg
+ * realloc: don't try to shift chunks backwards, since this
+ leads to more fragmentation in some programs and doesn't
+ seem to help in any others.
+ * Collect all cases in malloc requiring system memory into sysmalloc
+ * Use mmap as backup to sbrk
+ * Place all internal state in malloc_state
+ * Introduce fastbins (although similar to 2.5.1)
+ * Many minor tunings and cosmetic improvements
+ * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
+ * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
+ Thanks to Tony E. Bennett and others.
+ * Include errno.h to support default failure action.
+
+ V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
+ * return null for negative arguments
+ * Added Several WIN32 cleanups from Martin C. Fong
+ * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
+ (e.g. WIN32 platforms)
+ * Cleanup header file inclusion for WIN32 platforms
+ * Cleanup code to avoid Microsoft Visual C++ compiler complaints
+ * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
+ memory allocation routines
+ * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
+ * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
+ usage of 'assert' in non-WIN32 code
+ * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
+ avoid infinite loop
+ * Always call 'fREe()' rather than 'free()'
+
+ V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
+ * Fixed ordering problem with boundary-stamping
+
+ V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
+ * Added pvalloc, as recommended by H.J. Liu
+ * Added 64bit pointer support mainly from Wolfram Gloger
+ * Added anonymously donated WIN32 sbrk emulation
+ * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
+ * malloc_extend_top: fix mask error that caused wastage after
+ foreign sbrks
+ * Add linux mremap support code from HJ Liu
+
+ V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
+ * Integrated most documentation with the code.
+ * Add support for mmap, with help from
+ Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
+ * Use last_remainder in more cases.
+ * Pack bins using idea from colin@nyx10.cs.du.edu
+ * Use ordered bins instead of best-fit threshold
+ * Eliminate block-local decls to simplify tracing and debugging.
+ * Support another case of realloc via move into top
+ * Fix error occurring when initial sbrk_base not word-aligned.
+ * Rely on page size for units instead of SBRK_UNIT to
+ avoid surprises about sbrk alignment conventions.
+ * Add mallinfo, mallopt. Thanks to Raymond Nijssen
+ (raymond@es.ele.tue.nl) for the suggestion.
+ * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
+ * More precautions for cases where other routines call sbrk,
+ courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
+ * Added macros etc., allowing use in linux libc from
+ H.J. Lu (hjl@gnu.ai.mit.edu)
+ * Inverted this history list
+
+ V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
+ * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
+ * Removed all preallocation code since under current scheme
+ the work required to undo bad preallocations exceeds
+ the work saved in good cases for most test programs.
+ * No longer use return list or unconsolidated bins since
+ no scheme using them consistently outperforms those that don't
+ given above changes.
+ * Use best fit for very large chunks to prevent some worst-cases.
+ * Added some support for debugging
+
+ V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
+ * Removed footers when chunks are in use. Thanks to
+ Paul Wilson (wilson@cs.texas.edu) for the suggestion.
+
+ V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
+ * Added malloc_trim, with help from Wolfram Gloger
+ (wmglo@Dent.MED.Uni-Muenchen.DE).
+
+ V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
+
+ V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
+ * realloc: try to expand in both directions
+ * malloc: swap order of clean-bin strategy;
+ * realloc: only conditionally expand backwards
+ * Try not to scavenge used bins
+ * Use bin counts as a guide to preallocation
+ * Occasionally bin return list chunks in first scan
+ * Add a few optimizations from colin@nyx10.cs.du.edu
+
+ V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
+ * faster bin computation & slightly different binning
+ * merged all consolidations to one part of malloc proper
+ (eliminating old malloc_find_space & malloc_clean_bin)
+ * Scan 2 returns chunks (not just 1)
+ * Propagate failure in realloc if malloc returns 0
+ * Add stuff to allow compilation on non-ANSI compilers
+ from kpv@research.att.com
+
+ V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
+ * removed potential for odd address access in prev_chunk
+ * removed dependency on getpagesize.h
+ * misc cosmetics and a bit more internal documentation
+ * anticosmetics: mangled names in macros to evade debugger strangeness
+ * tested on sparc, hp-700, dec-mips, rs6000
+ with gcc & native cc (hp, dec only) allowing
+ Detlefs & Zorn comparison study (in SIGPLAN Notices.)
+
+ Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
+ * Based loosely on libg++-1.2X malloc. (It retains some of the overall
+ structure of old version, but most details differ.)
+
+*/
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffi.h modified/Modules/_ctypes/libffi_ios/include/ffi.h
--- orig/Modules/_ctypes/libffi_ios/include/ffi.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffi.h 2015-03-12 21:32:24.000000000 +0800
@@ -0,0 +1,24 @@
+#ifdef __arm64__
+
+#include
+
+
+#endif
+#ifdef __i386__
+
+#include
+
+
+#endif
+#ifdef __arm__
+
+#include
+
+
+#endif
+#ifdef __x86_64__
+
+#include
+
+
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffi_arm64.h modified/Modules/_ctypes/libffi_ios/include/ffi_arm64.h
--- orig/Modules/_ctypes/libffi_ios/include/ffi_arm64.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffi_arm64.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,508 @@
+#ifdef __arm64__
+
+/* -----------------------------------------------------------------*-C-*-
+ libffi 3.99999 - Copyright (c) 2011, 2014 Anthony Green
+ - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc.
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the ``Software''), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+/* -------------------------------------------------------------------
+ The basic API is described in the README file.
+
+ The raw API is designed to bypass some of the argument packing
+ and unpacking on architectures for which it can be avoided.
+
+ The closure API allows interpreted functions to be packaged up
+ inside a C function pointer, so that they can be called as C functions,
+ with no understanding on the client side that they are interpreted.
+ It can also be used in other cases in which it is necessary to package
+ up a user specified parameter and a function pointer as a single
+ function pointer.
+
+ The closure API must be implemented in order to get its functionality,
+ e.g. for use by gij. Routines are provided to emulate the raw API
+ if the underlying platform doesn't allow faster implementation.
+
+ More details on the raw and cloure API can be found in:
+
+ http://gcc.gnu.org/ml/java/1999-q3/msg00138.html
+
+ and
+
+ http://gcc.gnu.org/ml/java/1999-q3/msg00174.html
+ -------------------------------------------------------------------- */
+
+#ifndef LIBFFI_H
+#define LIBFFI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Specify which architecture libffi is configured for. */
+#ifndef AARCH64
+#define AARCH64
+#endif
+
+/* ---- System configuration information --------------------------------- */
+
+#include
+
+#ifndef LIBFFI_ASM
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define __attribute__(X)
+#endif
+
+#include
+#include
+
+/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example).
+ But we can find it either under the correct ANSI name, or under GNU
+ C's internal name. */
+
+#define FFI_64_BIT_MAX 9223372036854775807
+
+#ifdef LONG_LONG_MAX
+# define FFI_LONG_LONG_MAX LONG_LONG_MAX
+#else
+# ifdef LLONG_MAX
+# define FFI_LONG_LONG_MAX LLONG_MAX
+# ifdef _AIX52 /* or newer has C99 LLONG_MAX */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif /* _AIX52 or newer */
+# else
+# ifdef __GNUC__
+# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__
+# endif
+# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */
+# ifndef __PPC64__
+# if defined (__IBMC__) || defined (__IBMCPP__)
+# define FFI_LONG_LONG_MAX LONGLONG_MAX
+# endif
+# endif /* __PPC64__ */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif
+# endif
+#endif
+
+/* The closure code assumes that this works on pointers, i.e. a size_t */
+/* can hold a pointer. */
+
+typedef struct _ffi_type
+{
+ size_t size;
+ unsigned short alignment;
+ unsigned short type;
+ struct _ffi_type **elements;
+} ffi_type;
+
+#ifndef LIBFFI_HIDE_BASIC_TYPES
+#if SCHAR_MAX == 127
+# define ffi_type_uchar ffi_type_uint8
+# define ffi_type_schar ffi_type_sint8
+#else
+ #error "char size not supported"
+#endif
+
+#if SHRT_MAX == 32767
+# define ffi_type_ushort ffi_type_uint16
+# define ffi_type_sshort ffi_type_sint16
+#elif SHRT_MAX == 2147483647
+# define ffi_type_ushort ffi_type_uint32
+# define ffi_type_sshort ffi_type_sint32
+#else
+ #error "short size not supported"
+#endif
+
+#if INT_MAX == 32767
+# define ffi_type_uint ffi_type_uint16
+# define ffi_type_sint ffi_type_sint16
+#elif INT_MAX == 2147483647
+# define ffi_type_uint ffi_type_uint32
+# define ffi_type_sint ffi_type_sint32
+#elif INT_MAX == 9223372036854775807
+# define ffi_type_uint ffi_type_uint64
+# define ffi_type_sint ffi_type_sint64
+#else
+ #error "int size not supported"
+#endif
+
+#if LONG_MAX == 2147483647
+# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX
+ #error "no 64-bit data type supported"
+# endif
+#elif LONG_MAX != FFI_64_BIT_MAX
+ #error "long size not supported"
+#endif
+
+#if LONG_MAX == 2147483647
+# define ffi_type_ulong ffi_type_uint32
+# define ffi_type_slong ffi_type_sint32
+#elif LONG_MAX == FFI_64_BIT_MAX
+# define ffi_type_ulong ffi_type_uint64
+# define ffi_type_slong ffi_type_sint64
+#else
+ #error "long size not supported"
+#endif
+
+/* Need minimal decorations for DLLs to works on Windows. */
+/* GCC has autoimport and autoexport. Rely on Libtool to */
+/* help MSVC export from a DLL, but always declare data */
+/* to be imported for MSVC clients. This costs an extra */
+/* indirection for MSVC clients using the static version */
+/* of the library, but don't worry about that. Besides, */
+/* as a workaround, they can define FFI_BUILDING if they */
+/* *know* they are going to link with the static library. */
+#if defined _MSC_VER && !defined FFI_BUILDING
+#define FFI_EXTERN extern __declspec(dllimport)
+#else
+#define FFI_EXTERN extern
+#endif
+
+/* These are defined in types.c */
+FFI_EXTERN ffi_type ffi_type_void;
+FFI_EXTERN ffi_type ffi_type_uint8;
+FFI_EXTERN ffi_type ffi_type_sint8;
+FFI_EXTERN ffi_type ffi_type_uint16;
+FFI_EXTERN ffi_type ffi_type_sint16;
+FFI_EXTERN ffi_type ffi_type_uint32;
+FFI_EXTERN ffi_type ffi_type_sint32;
+FFI_EXTERN ffi_type ffi_type_uint64;
+FFI_EXTERN ffi_type ffi_type_sint64;
+FFI_EXTERN ffi_type ffi_type_float;
+FFI_EXTERN ffi_type ffi_type_double;
+FFI_EXTERN ffi_type ffi_type_pointer;
+
+#if 0
+FFI_EXTERN ffi_type ffi_type_longdouble;
+#else
+#define ffi_type_longdouble ffi_type_double
+#endif
+
+#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
+FFI_EXTERN ffi_type ffi_type_complex_float;
+FFI_EXTERN ffi_type ffi_type_complex_double;
+#if 0
+FFI_EXTERN ffi_type ffi_type_complex_longdouble;
+#else
+#define ffi_type_complex_longdouble ffi_type_complex_double
+#endif
+#endif
+#endif /* LIBFFI_HIDE_BASIC_TYPES */
+
+typedef enum {
+ FFI_OK = 0,
+ FFI_BAD_TYPEDEF,
+ FFI_BAD_ABI
+} ffi_status;
+
+typedef unsigned FFI_TYPE;
+
+typedef struct {
+ ffi_abi abi;
+ unsigned nargs;
+ ffi_type **arg_types;
+ ffi_type *rtype;
+ unsigned bytes;
+ unsigned flags;
+#ifdef FFI_EXTRA_CIF_FIELDS
+ FFI_EXTRA_CIF_FIELDS;
+#endif
+} ffi_cif;
+
+#if 0
+/* Used to adjust size/alignment of ffi types. */
+void ffi_prep_types (ffi_abi abi);
+#endif
+
+/* Used internally, but overridden by some architectures */
+ffi_status ffi_prep_cif_core(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int isvariadic,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+/* ---- Definitions for the raw API -------------------------------------- */
+
+#ifndef FFI_SIZEOF_ARG
+# if LONG_MAX == 2147483647
+# define FFI_SIZEOF_ARG 4
+# elif LONG_MAX == FFI_64_BIT_MAX
+# define FFI_SIZEOF_ARG 8
+# endif
+#endif
+
+#ifndef FFI_SIZEOF_JAVA_RAW
+# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG
+#endif
+
+typedef union {
+ ffi_sarg sint;
+ ffi_arg uint;
+ float flt;
+ char data[FFI_SIZEOF_ARG];
+ void* ptr;
+} ffi_raw;
+
+#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8
+/* This is a special case for mips64/n32 ABI (and perhaps others) where
+ sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */
+typedef union {
+ signed int sint;
+ unsigned int uint;
+ float flt;
+ char data[FFI_SIZEOF_JAVA_RAW];
+ void* ptr;
+} ffi_java_raw;
+#else
+typedef ffi_raw ffi_java_raw;
+#endif
+
+
+void ffi_raw_call (ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ ffi_raw *avalue);
+
+void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw);
+void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args);
+size_t ffi_raw_size (ffi_cif *cif);
+
+/* This is analogous to the raw API, except it uses Java parameter */
+/* packing, even on 64-bit machines. I.e. on 64-bit machines */
+/* longs and doubles are followed by an empty 64-bit word. */
+
+void ffi_java_raw_call (ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ ffi_java_raw *avalue);
+
+void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw);
+void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args);
+size_t ffi_java_raw_size (ffi_cif *cif);
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#if FFI_CLOSURES
+
+#ifdef _MSC_VER
+__declspec(align(8))
+#endif
+typedef struct {
+#if 1
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+ ffi_cif *cif;
+ void (*fun)(ffi_cif*,void*,void**,void*);
+ void *user_data;
+#ifdef __GNUC__
+} ffi_closure __attribute__((aligned (8)));
+#else
+} ffi_closure;
+# ifdef __sgi
+# pragma pack 0
+# endif
+#endif
+
+void *ffi_closure_alloc (size_t size, void **code);
+void ffi_closure_free (void *);
+
+ffi_status
+ffi_prep_closure (ffi_closure*,
+ ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure*,
+ ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void*codeloc);
+
+#ifdef __sgi
+# pragma pack 8
+#endif
+typedef struct {
+#if 1
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+ ffi_cif *cif;
+
+#if !FFI_NATIVE_RAW_API
+
+ /* if this is enabled, then a raw closure has the same layout
+ as a regular closure. We use this to install an intermediate
+ handler to do the transaltion, void** -> ffi_raw*. */
+
+ void (*translate_args)(ffi_cif*,void*,void**,void*);
+ void *this_closure;
+
+#endif
+
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*);
+ void *user_data;
+
+} ffi_raw_closure;
+
+typedef struct {
+#if 1
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+
+ ffi_cif *cif;
+
+#if !FFI_NATIVE_RAW_API
+
+ /* if this is enabled, then a raw closure has the same layout
+ as a regular closure. We use this to install an intermediate
+ handler to do the transaltion, void** -> ffi_raw*. */
+
+ void (*translate_args)(ffi_cif*,void*,void**,void*);
+ void *this_closure;
+
+#endif
+
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*);
+ void *user_data;
+
+} ffi_java_raw_closure;
+
+ffi_status
+ffi_prep_raw_closure (ffi_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_raw_closure_loc (ffi_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data,
+ void *codeloc);
+
+ffi_status
+ffi_prep_java_raw_closure (ffi_java_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data,
+ void *codeloc);
+
+#endif /* FFI_CLOSURES */
+
+#if FFI_GO_CLOSURES
+
+typedef struct {
+ void *tramp;
+ ffi_cif *cif;
+ void (*fun)(ffi_cif*,void*,void**,void*);
+} ffi_go_closure;
+
+ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*));
+
+void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure);
+
+#endif /* FFI_GO_CLOSURES */
+
+/* ---- Public interface definition -------------------------------------- */
+
+ffi_status ffi_prep_cif(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+ffi_status ffi_prep_cif_var(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+void ffi_call(ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ void **avalue);
+
+/* Useful for eliminating compiler warnings */
+#define FFI_FN(f) ((void (*)(void))f)
+
+/* ---- Definitions shared with assembly code ---------------------------- */
+
+#endif
+
+/* If these change, update src/mips/ffitarget.h. */
+#define FFI_TYPE_VOID 0
+#define FFI_TYPE_INT 1
+#define FFI_TYPE_FLOAT 2
+#define FFI_TYPE_DOUBLE 3
+#if 0
+#define FFI_TYPE_LONGDOUBLE 4
+#else
+#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE
+#endif
+#define FFI_TYPE_UINT8 5
+#define FFI_TYPE_SINT8 6
+#define FFI_TYPE_UINT16 7
+#define FFI_TYPE_SINT16 8
+#define FFI_TYPE_UINT32 9
+#define FFI_TYPE_SINT32 10
+#define FFI_TYPE_UINT64 11
+#define FFI_TYPE_SINT64 12
+#define FFI_TYPE_STRUCT 13
+#define FFI_TYPE_POINTER 14
+#define FFI_TYPE_COMPLEX 15
+
+/* This should always refer to the last type code (for sanity checks) */
+#define FFI_TYPE_LAST FFI_TYPE_COMPLEX
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffi_armv7.h modified/Modules/_ctypes/libffi_ios/include/ffi_armv7.h
--- orig/Modules/_ctypes/libffi_ios/include/ffi_armv7.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffi_armv7.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,508 @@
+#ifdef __arm__
+
+/* -----------------------------------------------------------------*-C-*-
+ libffi 3.99999 - Copyright (c) 2011, 2014 Anthony Green
+ - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc.
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the ``Software''), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+/* -------------------------------------------------------------------
+ The basic API is described in the README file.
+
+ The raw API is designed to bypass some of the argument packing
+ and unpacking on architectures for which it can be avoided.
+
+ The closure API allows interpreted functions to be packaged up
+ inside a C function pointer, so that they can be called as C functions,
+ with no understanding on the client side that they are interpreted.
+ It can also be used in other cases in which it is necessary to package
+ up a user specified parameter and a function pointer as a single
+ function pointer.
+
+ The closure API must be implemented in order to get its functionality,
+ e.g. for use by gij. Routines are provided to emulate the raw API
+ if the underlying platform doesn't allow faster implementation.
+
+ More details on the raw and cloure API can be found in:
+
+ http://gcc.gnu.org/ml/java/1999-q3/msg00138.html
+
+ and
+
+ http://gcc.gnu.org/ml/java/1999-q3/msg00174.html
+ -------------------------------------------------------------------- */
+
+#ifndef LIBFFI_H
+#define LIBFFI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Specify which architecture libffi is configured for. */
+#ifndef ARM
+#define ARM
+#endif
+
+/* ---- System configuration information --------------------------------- */
+
+#include
+
+#ifndef LIBFFI_ASM
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define __attribute__(X)
+#endif
+
+#include
+#include
+
+/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example).
+ But we can find it either under the correct ANSI name, or under GNU
+ C's internal name. */
+
+#define FFI_64_BIT_MAX 9223372036854775807
+
+#ifdef LONG_LONG_MAX
+# define FFI_LONG_LONG_MAX LONG_LONG_MAX
+#else
+# ifdef LLONG_MAX
+# define FFI_LONG_LONG_MAX LLONG_MAX
+# ifdef _AIX52 /* or newer has C99 LLONG_MAX */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif /* _AIX52 or newer */
+# else
+# ifdef __GNUC__
+# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__
+# endif
+# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */
+# ifndef __PPC64__
+# if defined (__IBMC__) || defined (__IBMCPP__)
+# define FFI_LONG_LONG_MAX LONGLONG_MAX
+# endif
+# endif /* __PPC64__ */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif
+# endif
+#endif
+
+/* The closure code assumes that this works on pointers, i.e. a size_t */
+/* can hold a pointer. */
+
+typedef struct _ffi_type
+{
+ size_t size;
+ unsigned short alignment;
+ unsigned short type;
+ struct _ffi_type **elements;
+} ffi_type;
+
+#ifndef LIBFFI_HIDE_BASIC_TYPES
+#if SCHAR_MAX == 127
+# define ffi_type_uchar ffi_type_uint8
+# define ffi_type_schar ffi_type_sint8
+#else
+ #error "char size not supported"
+#endif
+
+#if SHRT_MAX == 32767
+# define ffi_type_ushort ffi_type_uint16
+# define ffi_type_sshort ffi_type_sint16
+#elif SHRT_MAX == 2147483647
+# define ffi_type_ushort ffi_type_uint32
+# define ffi_type_sshort ffi_type_sint32
+#else
+ #error "short size not supported"
+#endif
+
+#if INT_MAX == 32767
+# define ffi_type_uint ffi_type_uint16
+# define ffi_type_sint ffi_type_sint16
+#elif INT_MAX == 2147483647
+# define ffi_type_uint ffi_type_uint32
+# define ffi_type_sint ffi_type_sint32
+#elif INT_MAX == 9223372036854775807
+# define ffi_type_uint ffi_type_uint64
+# define ffi_type_sint ffi_type_sint64
+#else
+ #error "int size not supported"
+#endif
+
+#if LONG_MAX == 2147483647
+# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX
+ #error "no 64-bit data type supported"
+# endif
+#elif LONG_MAX != FFI_64_BIT_MAX
+ #error "long size not supported"
+#endif
+
+#if LONG_MAX == 2147483647
+# define ffi_type_ulong ffi_type_uint32
+# define ffi_type_slong ffi_type_sint32
+#elif LONG_MAX == FFI_64_BIT_MAX
+# define ffi_type_ulong ffi_type_uint64
+# define ffi_type_slong ffi_type_sint64
+#else
+ #error "long size not supported"
+#endif
+
+/* Need minimal decorations for DLLs to works on Windows. */
+/* GCC has autoimport and autoexport. Rely on Libtool to */
+/* help MSVC export from a DLL, but always declare data */
+/* to be imported for MSVC clients. This costs an extra */
+/* indirection for MSVC clients using the static version */
+/* of the library, but don't worry about that. Besides, */
+/* as a workaround, they can define FFI_BUILDING if they */
+/* *know* they are going to link with the static library. */
+#if defined _MSC_VER && !defined FFI_BUILDING
+#define FFI_EXTERN extern __declspec(dllimport)
+#else
+#define FFI_EXTERN extern
+#endif
+
+/* These are defined in types.c */
+FFI_EXTERN ffi_type ffi_type_void;
+FFI_EXTERN ffi_type ffi_type_uint8;
+FFI_EXTERN ffi_type ffi_type_sint8;
+FFI_EXTERN ffi_type ffi_type_uint16;
+FFI_EXTERN ffi_type ffi_type_sint16;
+FFI_EXTERN ffi_type ffi_type_uint32;
+FFI_EXTERN ffi_type ffi_type_sint32;
+FFI_EXTERN ffi_type ffi_type_uint64;
+FFI_EXTERN ffi_type ffi_type_sint64;
+FFI_EXTERN ffi_type ffi_type_float;
+FFI_EXTERN ffi_type ffi_type_double;
+FFI_EXTERN ffi_type ffi_type_pointer;
+
+#if 0
+FFI_EXTERN ffi_type ffi_type_longdouble;
+#else
+#define ffi_type_longdouble ffi_type_double
+#endif
+
+#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
+FFI_EXTERN ffi_type ffi_type_complex_float;
+FFI_EXTERN ffi_type ffi_type_complex_double;
+#if 0
+FFI_EXTERN ffi_type ffi_type_complex_longdouble;
+#else
+#define ffi_type_complex_longdouble ffi_type_complex_double
+#endif
+#endif
+#endif /* LIBFFI_HIDE_BASIC_TYPES */
+
+typedef enum {
+ FFI_OK = 0,
+ FFI_BAD_TYPEDEF,
+ FFI_BAD_ABI
+} ffi_status;
+
+typedef unsigned FFI_TYPE;
+
+typedef struct {
+ ffi_abi abi;
+ unsigned nargs;
+ ffi_type **arg_types;
+ ffi_type *rtype;
+ unsigned bytes;
+ unsigned flags;
+#ifdef FFI_EXTRA_CIF_FIELDS
+ FFI_EXTRA_CIF_FIELDS;
+#endif
+} ffi_cif;
+
+#if 0
+/* Used to adjust size/alignment of ffi types. */
+void ffi_prep_types (ffi_abi abi);
+#endif
+
+/* Used internally, but overridden by some architectures */
+ffi_status ffi_prep_cif_core(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int isvariadic,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+/* ---- Definitions for the raw API -------------------------------------- */
+
+#ifndef FFI_SIZEOF_ARG
+# if LONG_MAX == 2147483647
+# define FFI_SIZEOF_ARG 4
+# elif LONG_MAX == FFI_64_BIT_MAX
+# define FFI_SIZEOF_ARG 8
+# endif
+#endif
+
+#ifndef FFI_SIZEOF_JAVA_RAW
+# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG
+#endif
+
+typedef union {
+ ffi_sarg sint;
+ ffi_arg uint;
+ float flt;
+ char data[FFI_SIZEOF_ARG];
+ void* ptr;
+} ffi_raw;
+
+#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8
+/* This is a special case for mips64/n32 ABI (and perhaps others) where
+ sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */
+typedef union {
+ signed int sint;
+ unsigned int uint;
+ float flt;
+ char data[FFI_SIZEOF_JAVA_RAW];
+ void* ptr;
+} ffi_java_raw;
+#else
+typedef ffi_raw ffi_java_raw;
+#endif
+
+
+void ffi_raw_call (ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ ffi_raw *avalue);
+
+void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw);
+void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args);
+size_t ffi_raw_size (ffi_cif *cif);
+
+/* This is analogous to the raw API, except it uses Java parameter */
+/* packing, even on 64-bit machines. I.e. on 64-bit machines */
+/* longs and doubles are followed by an empty 64-bit word. */
+
+void ffi_java_raw_call (ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ ffi_java_raw *avalue);
+
+void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw);
+void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args);
+size_t ffi_java_raw_size (ffi_cif *cif);
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#if FFI_CLOSURES
+
+#ifdef _MSC_VER
+__declspec(align(8))
+#endif
+typedef struct {
+#if 1
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+ ffi_cif *cif;
+ void (*fun)(ffi_cif*,void*,void**,void*);
+ void *user_data;
+#ifdef __GNUC__
+} ffi_closure __attribute__((aligned (8)));
+#else
+} ffi_closure;
+# ifdef __sgi
+# pragma pack 0
+# endif
+#endif
+
+void *ffi_closure_alloc (size_t size, void **code);
+void ffi_closure_free (void *);
+
+ffi_status
+ffi_prep_closure (ffi_closure*,
+ ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure*,
+ ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void*codeloc);
+
+#ifdef __sgi
+# pragma pack 8
+#endif
+typedef struct {
+#if 1
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+ ffi_cif *cif;
+
+#if !FFI_NATIVE_RAW_API
+
+ /* if this is enabled, then a raw closure has the same layout
+ as a regular closure. We use this to install an intermediate
+ handler to do the transaltion, void** -> ffi_raw*. */
+
+ void (*translate_args)(ffi_cif*,void*,void**,void*);
+ void *this_closure;
+
+#endif
+
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*);
+ void *user_data;
+
+} ffi_raw_closure;
+
+typedef struct {
+#if 1
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+
+ ffi_cif *cif;
+
+#if !FFI_NATIVE_RAW_API
+
+ /* if this is enabled, then a raw closure has the same layout
+ as a regular closure. We use this to install an intermediate
+ handler to do the transaltion, void** -> ffi_raw*. */
+
+ void (*translate_args)(ffi_cif*,void*,void**,void*);
+ void *this_closure;
+
+#endif
+
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*);
+ void *user_data;
+
+} ffi_java_raw_closure;
+
+ffi_status
+ffi_prep_raw_closure (ffi_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_raw_closure_loc (ffi_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data,
+ void *codeloc);
+
+ffi_status
+ffi_prep_java_raw_closure (ffi_java_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data,
+ void *codeloc);
+
+#endif /* FFI_CLOSURES */
+
+#if FFI_GO_CLOSURES
+
+typedef struct {
+ void *tramp;
+ ffi_cif *cif;
+ void (*fun)(ffi_cif*,void*,void**,void*);
+} ffi_go_closure;
+
+ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*));
+
+void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure);
+
+#endif /* FFI_GO_CLOSURES */
+
+/* ---- Public interface definition -------------------------------------- */
+
+ffi_status ffi_prep_cif(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+ffi_status ffi_prep_cif_var(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+void ffi_call(ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ void **avalue);
+
+/* Useful for eliminating compiler warnings */
+#define FFI_FN(f) ((void (*)(void))f)
+
+/* ---- Definitions shared with assembly code ---------------------------- */
+
+#endif
+
+/* If these change, update src/mips/ffitarget.h. */
+#define FFI_TYPE_VOID 0
+#define FFI_TYPE_INT 1
+#define FFI_TYPE_FLOAT 2
+#define FFI_TYPE_DOUBLE 3
+#if 0
+#define FFI_TYPE_LONGDOUBLE 4
+#else
+#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE
+#endif
+#define FFI_TYPE_UINT8 5
+#define FFI_TYPE_SINT8 6
+#define FFI_TYPE_UINT16 7
+#define FFI_TYPE_SINT16 8
+#define FFI_TYPE_UINT32 9
+#define FFI_TYPE_SINT32 10
+#define FFI_TYPE_UINT64 11
+#define FFI_TYPE_SINT64 12
+#define FFI_TYPE_STRUCT 13
+#define FFI_TYPE_POINTER 14
+#define FFI_TYPE_COMPLEX 15
+
+/* This should always refer to the last type code (for sanity checks) */
+#define FFI_TYPE_LAST FFI_TYPE_COMPLEX
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffi_cfi.h modified/Modules/_ctypes/libffi_ios/include/ffi_cfi.h
--- orig/Modules/_ctypes/libffi_ios/include/ffi_cfi.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffi_cfi.h 2015-03-12 21:32:24.000000000 +0800
@@ -0,0 +1,55 @@
+/* -----------------------------------------------------------------------
+ ffi_cfi.h - Copyright (c) 2014 Red Hat, Inc.
+
+ Conditionally assemble cfi directives. Only necessary for building libffi.
+ ----------------------------------------------------------------------- */
+
+#ifndef FFI_CFI_H
+#define FFI_CFI_H
+
+#ifdef HAVE_AS_CFI_PSEUDO_OP
+
+# define cfi_startproc .cfi_startproc
+# define cfi_endproc .cfi_endproc
+# define cfi_def_cfa(reg, off) .cfi_def_cfa reg, off
+# define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg
+# define cfi_def_cfa_offset(off) .cfi_def_cfa_offset off
+# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
+# define cfi_offset(reg, off) .cfi_offset reg, off
+# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
+# define cfi_register(r1, r2) .cfi_register r1, r2
+# define cfi_return_column(reg) .cfi_return_column reg
+# define cfi_restore(reg) .cfi_restore reg
+# define cfi_same_value(reg) .cfi_same_value reg
+# define cfi_undefined(reg) .cfi_undefined reg
+# define cfi_remember_state .cfi_remember_state
+# define cfi_restore_state .cfi_restore_state
+# define cfi_window_save .cfi_window_save
+# define cfi_personality(enc, exp) .cfi_personality enc, exp
+# define cfi_lsda(enc, exp) .cfi_lsda enc, exp
+# define cfi_escape(...) .cfi_escape __VA_ARGS__
+
+#else
+
+# define cfi_startproc
+# define cfi_endproc
+# define cfi_def_cfa(reg, off)
+# define cfi_def_cfa_register(reg)
+# define cfi_def_cfa_offset(off)
+# define cfi_adjust_cfa_offset(off)
+# define cfi_offset(reg, off)
+# define cfi_rel_offset(reg, off)
+# define cfi_register(r1, r2)
+# define cfi_return_column(reg)
+# define cfi_restore(reg)
+# define cfi_same_value(reg)
+# define cfi_undefined(reg)
+# define cfi_remember_state
+# define cfi_restore_state
+# define cfi_window_save
+# define cfi_personality(enc, exp)
+# define cfi_lsda(enc, exp)
+# define cfi_escape(...)
+
+#endif /* HAVE_AS_CFI_PSEUDO_OP */
+#endif /* FFI_CFI_H */
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffi_common.h modified/Modules/_ctypes/libffi_ios/include/ffi_common.h
--- orig/Modules/_ctypes/libffi_ios/include/ffi_common.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffi_common.h 2015-03-12 21:32:24.000000000 +0800
@@ -0,0 +1,132 @@
+/* -----------------------------------------------------------------------
+ ffi_common.h - Copyright (C) 2011, 2012, 2013 Anthony Green
+ Copyright (C) 2007 Free Software Foundation, Inc
+ Copyright (c) 1996 Red Hat, Inc.
+
+ Common internal definitions and macros. Only necessary for building
+ libffi.
+ ----------------------------------------------------------------------- */
+
+#ifndef FFI_COMMON_H
+#define FFI_COMMON_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+
+/* Do not move this. Some versions of AIX are very picky about where
+ this is positioned. */
+#ifdef __GNUC__
+# if HAVE_ALLOCA_H
+# include
+# else
+ /* mingw64 defines this already in malloc.h. */
+# ifndef alloca
+# define alloca __builtin_alloca
+# endif
+# endif
+# define MAYBE_UNUSED __attribute__((__unused__))
+#else
+# define MAYBE_UNUSED
+# if HAVE_ALLOCA_H
+# include
+# else
+# ifdef _AIX
+# pragma alloca
+# else
+# ifndef alloca /* predefined by HP cc +Olibcalls */
+# ifdef _MSC_VER
+# define alloca _alloca
+# else
+char *alloca ();
+# endif
+# endif
+# endif
+# endif
+#endif
+
+/* Check for the existence of memcpy. */
+#if STDC_HEADERS
+# include
+#else
+# ifndef HAVE_MEMCPY
+# define memcpy(d, s, n) bcopy ((s), (d), (n))
+# endif
+#endif
+
+#if defined(FFI_DEBUG)
+#include
+#endif
+
+#ifdef FFI_DEBUG
+void ffi_assert(char *expr, char *file, int line);
+void ffi_stop_here(void);
+void ffi_type_test(ffi_type *a, char *file, int line);
+
+#define FFI_ASSERT(x) ((x) ? (void)0 : ffi_assert(#x, __FILE__,__LINE__))
+#define FFI_ASSERT_AT(x, f, l) ((x) ? 0 : ffi_assert(#x, (f), (l)))
+#define FFI_ASSERT_VALID_TYPE(x) ffi_type_test (x, __FILE__, __LINE__)
+#else
+#define FFI_ASSERT(x)
+#define FFI_ASSERT_AT(x, f, l)
+#define FFI_ASSERT_VALID_TYPE(x)
+#endif
+
+#define ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1)
+#define ALIGN_DOWN(v, a) (((size_t) (v)) & -a)
+
+/* Perform machine dependent cif processing */
+ffi_status ffi_prep_cif_machdep(ffi_cif *cif);
+ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif,
+ unsigned int nfixedargs, unsigned int ntotalargs);
+
+/* Extended cif, used in callback from assembly routine */
+typedef struct
+{
+ ffi_cif *cif;
+ void *rvalue;
+ void **avalue;
+} extended_cif;
+
+/* Terse sized type definitions. */
+#if defined(_MSC_VER) || defined(__sgi) || defined(__SUNPRO_C)
+typedef unsigned char UINT8;
+typedef signed char SINT8;
+typedef unsigned short UINT16;
+typedef signed short SINT16;
+typedef unsigned int UINT32;
+typedef signed int SINT32;
+# ifdef _MSC_VER
+typedef unsigned __int64 UINT64;
+typedef signed __int64 SINT64;
+# else
+# include
+typedef uint64_t UINT64;
+typedef int64_t SINT64;
+# endif
+#else
+typedef unsigned int UINT8 __attribute__((__mode__(__QI__)));
+typedef signed int SINT8 __attribute__((__mode__(__QI__)));
+typedef unsigned int UINT16 __attribute__((__mode__(__HI__)));
+typedef signed int SINT16 __attribute__((__mode__(__HI__)));
+typedef unsigned int UINT32 __attribute__((__mode__(__SI__)));
+typedef signed int SINT32 __attribute__((__mode__(__SI__)));
+typedef unsigned int UINT64 __attribute__((__mode__(__DI__)));
+typedef signed int SINT64 __attribute__((__mode__(__DI__)));
+#endif
+
+typedef float FLOAT32;
+
+#ifndef __GNUC__
+#define __builtin_expect(x, expected_value) (x)
+#endif
+#define LIKELY(x) __builtin_expect(!!(x),1)
+#define UNLIKELY(x) __builtin_expect((x)!=0,0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffi_i386.h modified/Modules/_ctypes/libffi_ios/include/ffi_i386.h
--- orig/Modules/_ctypes/libffi_ios/include/ffi_i386.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffi_i386.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,508 @@
+#ifdef __i386__
+
+/* -----------------------------------------------------------------*-C-*-
+ libffi 3.99999 - Copyright (c) 2011, 2014 Anthony Green
+ - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc.
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the ``Software''), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+/* -------------------------------------------------------------------
+ The basic API is described in the README file.
+
+ The raw API is designed to bypass some of the argument packing
+ and unpacking on architectures for which it can be avoided.
+
+ The closure API allows interpreted functions to be packaged up
+ inside a C function pointer, so that they can be called as C functions,
+ with no understanding on the client side that they are interpreted.
+ It can also be used in other cases in which it is necessary to package
+ up a user specified parameter and a function pointer as a single
+ function pointer.
+
+ The closure API must be implemented in order to get its functionality,
+ e.g. for use by gij. Routines are provided to emulate the raw API
+ if the underlying platform doesn't allow faster implementation.
+
+ More details on the raw and cloure API can be found in:
+
+ http://gcc.gnu.org/ml/java/1999-q3/msg00138.html
+
+ and
+
+ http://gcc.gnu.org/ml/java/1999-q3/msg00174.html
+ -------------------------------------------------------------------- */
+
+#ifndef LIBFFI_H
+#define LIBFFI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Specify which architecture libffi is configured for. */
+#ifndef X86_DARWIN
+#define X86_DARWIN
+#endif
+
+/* ---- System configuration information --------------------------------- */
+
+#include
+
+#ifndef LIBFFI_ASM
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define __attribute__(X)
+#endif
+
+#include
+#include
+
+/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example).
+ But we can find it either under the correct ANSI name, or under GNU
+ C's internal name. */
+
+#define FFI_64_BIT_MAX 9223372036854775807
+
+#ifdef LONG_LONG_MAX
+# define FFI_LONG_LONG_MAX LONG_LONG_MAX
+#else
+# ifdef LLONG_MAX
+# define FFI_LONG_LONG_MAX LLONG_MAX
+# ifdef _AIX52 /* or newer has C99 LLONG_MAX */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif /* _AIX52 or newer */
+# else
+# ifdef __GNUC__
+# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__
+# endif
+# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */
+# ifndef __PPC64__
+# if defined (__IBMC__) || defined (__IBMCPP__)
+# define FFI_LONG_LONG_MAX LONGLONG_MAX
+# endif
+# endif /* __PPC64__ */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif
+# endif
+#endif
+
+/* The closure code assumes that this works on pointers, i.e. a size_t */
+/* can hold a pointer. */
+
+typedef struct _ffi_type
+{
+ size_t size;
+ unsigned short alignment;
+ unsigned short type;
+ struct _ffi_type **elements;
+} ffi_type;
+
+#ifndef LIBFFI_HIDE_BASIC_TYPES
+#if SCHAR_MAX == 127
+# define ffi_type_uchar ffi_type_uint8
+# define ffi_type_schar ffi_type_sint8
+#else
+ #error "char size not supported"
+#endif
+
+#if SHRT_MAX == 32767
+# define ffi_type_ushort ffi_type_uint16
+# define ffi_type_sshort ffi_type_sint16
+#elif SHRT_MAX == 2147483647
+# define ffi_type_ushort ffi_type_uint32
+# define ffi_type_sshort ffi_type_sint32
+#else
+ #error "short size not supported"
+#endif
+
+#if INT_MAX == 32767
+# define ffi_type_uint ffi_type_uint16
+# define ffi_type_sint ffi_type_sint16
+#elif INT_MAX == 2147483647
+# define ffi_type_uint ffi_type_uint32
+# define ffi_type_sint ffi_type_sint32
+#elif INT_MAX == 9223372036854775807
+# define ffi_type_uint ffi_type_uint64
+# define ffi_type_sint ffi_type_sint64
+#else
+ #error "int size not supported"
+#endif
+
+#if LONG_MAX == 2147483647
+# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX
+ #error "no 64-bit data type supported"
+# endif
+#elif LONG_MAX != FFI_64_BIT_MAX
+ #error "long size not supported"
+#endif
+
+#if LONG_MAX == 2147483647
+# define ffi_type_ulong ffi_type_uint32
+# define ffi_type_slong ffi_type_sint32
+#elif LONG_MAX == FFI_64_BIT_MAX
+# define ffi_type_ulong ffi_type_uint64
+# define ffi_type_slong ffi_type_sint64
+#else
+ #error "long size not supported"
+#endif
+
+/* Need minimal decorations for DLLs to works on Windows. */
+/* GCC has autoimport and autoexport. Rely on Libtool to */
+/* help MSVC export from a DLL, but always declare data */
+/* to be imported for MSVC clients. This costs an extra */
+/* indirection for MSVC clients using the static version */
+/* of the library, but don't worry about that. Besides, */
+/* as a workaround, they can define FFI_BUILDING if they */
+/* *know* they are going to link with the static library. */
+#if defined _MSC_VER && !defined FFI_BUILDING
+#define FFI_EXTERN extern __declspec(dllimport)
+#else
+#define FFI_EXTERN extern
+#endif
+
+/* These are defined in types.c */
+FFI_EXTERN ffi_type ffi_type_void;
+FFI_EXTERN ffi_type ffi_type_uint8;
+FFI_EXTERN ffi_type ffi_type_sint8;
+FFI_EXTERN ffi_type ffi_type_uint16;
+FFI_EXTERN ffi_type ffi_type_sint16;
+FFI_EXTERN ffi_type ffi_type_uint32;
+FFI_EXTERN ffi_type ffi_type_sint32;
+FFI_EXTERN ffi_type ffi_type_uint64;
+FFI_EXTERN ffi_type ffi_type_sint64;
+FFI_EXTERN ffi_type ffi_type_float;
+FFI_EXTERN ffi_type ffi_type_double;
+FFI_EXTERN ffi_type ffi_type_pointer;
+
+#if 1
+FFI_EXTERN ffi_type ffi_type_longdouble;
+#else
+#define ffi_type_longdouble ffi_type_double
+#endif
+
+#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
+FFI_EXTERN ffi_type ffi_type_complex_float;
+FFI_EXTERN ffi_type ffi_type_complex_double;
+#if 1
+FFI_EXTERN ffi_type ffi_type_complex_longdouble;
+#else
+#define ffi_type_complex_longdouble ffi_type_complex_double
+#endif
+#endif
+#endif /* LIBFFI_HIDE_BASIC_TYPES */
+
+typedef enum {
+ FFI_OK = 0,
+ FFI_BAD_TYPEDEF,
+ FFI_BAD_ABI
+} ffi_status;
+
+typedef unsigned FFI_TYPE;
+
+typedef struct {
+ ffi_abi abi;
+ unsigned nargs;
+ ffi_type **arg_types;
+ ffi_type *rtype;
+ unsigned bytes;
+ unsigned flags;
+#ifdef FFI_EXTRA_CIF_FIELDS
+ FFI_EXTRA_CIF_FIELDS;
+#endif
+} ffi_cif;
+
+#if 0
+/* Used to adjust size/alignment of ffi types. */
+void ffi_prep_types (ffi_abi abi);
+#endif
+
+/* Used internally, but overridden by some architectures */
+ffi_status ffi_prep_cif_core(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int isvariadic,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+/* ---- Definitions for the raw API -------------------------------------- */
+
+#ifndef FFI_SIZEOF_ARG
+# if LONG_MAX == 2147483647
+# define FFI_SIZEOF_ARG 4
+# elif LONG_MAX == FFI_64_BIT_MAX
+# define FFI_SIZEOF_ARG 8
+# endif
+#endif
+
+#ifndef FFI_SIZEOF_JAVA_RAW
+# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG
+#endif
+
+typedef union {
+ ffi_sarg sint;
+ ffi_arg uint;
+ float flt;
+ char data[FFI_SIZEOF_ARG];
+ void* ptr;
+} ffi_raw;
+
+#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8
+/* This is a special case for mips64/n32 ABI (and perhaps others) where
+ sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */
+typedef union {
+ signed int sint;
+ unsigned int uint;
+ float flt;
+ char data[FFI_SIZEOF_JAVA_RAW];
+ void* ptr;
+} ffi_java_raw;
+#else
+typedef ffi_raw ffi_java_raw;
+#endif
+
+
+void ffi_raw_call (ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ ffi_raw *avalue);
+
+void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw);
+void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args);
+size_t ffi_raw_size (ffi_cif *cif);
+
+/* This is analogous to the raw API, except it uses Java parameter */
+/* packing, even on 64-bit machines. I.e. on 64-bit machines */
+/* longs and doubles are followed by an empty 64-bit word. */
+
+void ffi_java_raw_call (ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ ffi_java_raw *avalue);
+
+void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw);
+void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args);
+size_t ffi_java_raw_size (ffi_cif *cif);
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#if FFI_CLOSURES
+
+#ifdef _MSC_VER
+__declspec(align(8))
+#endif
+typedef struct {
+#if 0
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+ ffi_cif *cif;
+ void (*fun)(ffi_cif*,void*,void**,void*);
+ void *user_data;
+#ifdef __GNUC__
+} ffi_closure __attribute__((aligned (8)));
+#else
+} ffi_closure;
+# ifdef __sgi
+# pragma pack 0
+# endif
+#endif
+
+void *ffi_closure_alloc (size_t size, void **code);
+void ffi_closure_free (void *);
+
+ffi_status
+ffi_prep_closure (ffi_closure*,
+ ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure*,
+ ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void*codeloc);
+
+#ifdef __sgi
+# pragma pack 8
+#endif
+typedef struct {
+#if 0
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+ ffi_cif *cif;
+
+#if !FFI_NATIVE_RAW_API
+
+ /* if this is enabled, then a raw closure has the same layout
+ as a regular closure. We use this to install an intermediate
+ handler to do the transaltion, void** -> ffi_raw*. */
+
+ void (*translate_args)(ffi_cif*,void*,void**,void*);
+ void *this_closure;
+
+#endif
+
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*);
+ void *user_data;
+
+} ffi_raw_closure;
+
+typedef struct {
+#if 0
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+
+ ffi_cif *cif;
+
+#if !FFI_NATIVE_RAW_API
+
+ /* if this is enabled, then a raw closure has the same layout
+ as a regular closure. We use this to install an intermediate
+ handler to do the transaltion, void** -> ffi_raw*. */
+
+ void (*translate_args)(ffi_cif*,void*,void**,void*);
+ void *this_closure;
+
+#endif
+
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*);
+ void *user_data;
+
+} ffi_java_raw_closure;
+
+ffi_status
+ffi_prep_raw_closure (ffi_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_raw_closure_loc (ffi_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data,
+ void *codeloc);
+
+ffi_status
+ffi_prep_java_raw_closure (ffi_java_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data,
+ void *codeloc);
+
+#endif /* FFI_CLOSURES */
+
+#if FFI_GO_CLOSURES
+
+typedef struct {
+ void *tramp;
+ ffi_cif *cif;
+ void (*fun)(ffi_cif*,void*,void**,void*);
+} ffi_go_closure;
+
+ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*));
+
+void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure);
+
+#endif /* FFI_GO_CLOSURES */
+
+/* ---- Public interface definition -------------------------------------- */
+
+ffi_status ffi_prep_cif(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+ffi_status ffi_prep_cif_var(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+void ffi_call(ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ void **avalue);
+
+/* Useful for eliminating compiler warnings */
+#define FFI_FN(f) ((void (*)(void))f)
+
+/* ---- Definitions shared with assembly code ---------------------------- */
+
+#endif
+
+/* If these change, update src/mips/ffitarget.h. */
+#define FFI_TYPE_VOID 0
+#define FFI_TYPE_INT 1
+#define FFI_TYPE_FLOAT 2
+#define FFI_TYPE_DOUBLE 3
+#if 1
+#define FFI_TYPE_LONGDOUBLE 4
+#else
+#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE
+#endif
+#define FFI_TYPE_UINT8 5
+#define FFI_TYPE_SINT8 6
+#define FFI_TYPE_UINT16 7
+#define FFI_TYPE_SINT16 8
+#define FFI_TYPE_UINT32 9
+#define FFI_TYPE_SINT32 10
+#define FFI_TYPE_UINT64 11
+#define FFI_TYPE_SINT64 12
+#define FFI_TYPE_STRUCT 13
+#define FFI_TYPE_POINTER 14
+#define FFI_TYPE_COMPLEX 15
+
+/* This should always refer to the last type code (for sanity checks) */
+#define FFI_TYPE_LAST FFI_TYPE_COMPLEX
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffi_x86_64.h modified/Modules/_ctypes/libffi_ios/include/ffi_x86_64.h
--- orig/Modules/_ctypes/libffi_ios/include/ffi_x86_64.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffi_x86_64.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,508 @@
+#ifdef __x86_64__
+
+/* -----------------------------------------------------------------*-C-*-
+ libffi 3.99999 - Copyright (c) 2011, 2014 Anthony Green
+ - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc.
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the ``Software''), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+/* -------------------------------------------------------------------
+ The basic API is described in the README file.
+
+ The raw API is designed to bypass some of the argument packing
+ and unpacking on architectures for which it can be avoided.
+
+ The closure API allows interpreted functions to be packaged up
+ inside a C function pointer, so that they can be called as C functions,
+ with no understanding on the client side that they are interpreted.
+ It can also be used in other cases in which it is necessary to package
+ up a user specified parameter and a function pointer as a single
+ function pointer.
+
+ The closure API must be implemented in order to get its functionality,
+ e.g. for use by gij. Routines are provided to emulate the raw API
+ if the underlying platform doesn't allow faster implementation.
+
+ More details on the raw and cloure API can be found in:
+
+ http://gcc.gnu.org/ml/java/1999-q3/msg00138.html
+
+ and
+
+ http://gcc.gnu.org/ml/java/1999-q3/msg00174.html
+ -------------------------------------------------------------------- */
+
+#ifndef LIBFFI_H
+#define LIBFFI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Specify which architecture libffi is configured for. */
+#ifndef X86_64
+#define X86_64
+#endif
+
+/* ---- System configuration information --------------------------------- */
+
+#include
+
+#ifndef LIBFFI_ASM
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define __attribute__(X)
+#endif
+
+#include
+#include
+
+/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example).
+ But we can find it either under the correct ANSI name, or under GNU
+ C's internal name. */
+
+#define FFI_64_BIT_MAX 9223372036854775807
+
+#ifdef LONG_LONG_MAX
+# define FFI_LONG_LONG_MAX LONG_LONG_MAX
+#else
+# ifdef LLONG_MAX
+# define FFI_LONG_LONG_MAX LLONG_MAX
+# ifdef _AIX52 /* or newer has C99 LLONG_MAX */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif /* _AIX52 or newer */
+# else
+# ifdef __GNUC__
+# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__
+# endif
+# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */
+# ifndef __PPC64__
+# if defined (__IBMC__) || defined (__IBMCPP__)
+# define FFI_LONG_LONG_MAX LONGLONG_MAX
+# endif
+# endif /* __PPC64__ */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif
+# endif
+#endif
+
+/* The closure code assumes that this works on pointers, i.e. a size_t */
+/* can hold a pointer. */
+
+typedef struct _ffi_type
+{
+ size_t size;
+ unsigned short alignment;
+ unsigned short type;
+ struct _ffi_type **elements;
+} ffi_type;
+
+#ifndef LIBFFI_HIDE_BASIC_TYPES
+#if SCHAR_MAX == 127
+# define ffi_type_uchar ffi_type_uint8
+# define ffi_type_schar ffi_type_sint8
+#else
+ #error "char size not supported"
+#endif
+
+#if SHRT_MAX == 32767
+# define ffi_type_ushort ffi_type_uint16
+# define ffi_type_sshort ffi_type_sint16
+#elif SHRT_MAX == 2147483647
+# define ffi_type_ushort ffi_type_uint32
+# define ffi_type_sshort ffi_type_sint32
+#else
+ #error "short size not supported"
+#endif
+
+#if INT_MAX == 32767
+# define ffi_type_uint ffi_type_uint16
+# define ffi_type_sint ffi_type_sint16
+#elif INT_MAX == 2147483647
+# define ffi_type_uint ffi_type_uint32
+# define ffi_type_sint ffi_type_sint32
+#elif INT_MAX == 9223372036854775807
+# define ffi_type_uint ffi_type_uint64
+# define ffi_type_sint ffi_type_sint64
+#else
+ #error "int size not supported"
+#endif
+
+#if LONG_MAX == 2147483647
+# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX
+ #error "no 64-bit data type supported"
+# endif
+#elif LONG_MAX != FFI_64_BIT_MAX
+ #error "long size not supported"
+#endif
+
+#if LONG_MAX == 2147483647
+# define ffi_type_ulong ffi_type_uint32
+# define ffi_type_slong ffi_type_sint32
+#elif LONG_MAX == FFI_64_BIT_MAX
+# define ffi_type_ulong ffi_type_uint64
+# define ffi_type_slong ffi_type_sint64
+#else
+ #error "long size not supported"
+#endif
+
+/* Need minimal decorations for DLLs to works on Windows. */
+/* GCC has autoimport and autoexport. Rely on Libtool to */
+/* help MSVC export from a DLL, but always declare data */
+/* to be imported for MSVC clients. This costs an extra */
+/* indirection for MSVC clients using the static version */
+/* of the library, but don't worry about that. Besides, */
+/* as a workaround, they can define FFI_BUILDING if they */
+/* *know* they are going to link with the static library. */
+#if defined _MSC_VER && !defined FFI_BUILDING
+#define FFI_EXTERN extern __declspec(dllimport)
+#else
+#define FFI_EXTERN extern
+#endif
+
+/* These are defined in types.c */
+FFI_EXTERN ffi_type ffi_type_void;
+FFI_EXTERN ffi_type ffi_type_uint8;
+FFI_EXTERN ffi_type ffi_type_sint8;
+FFI_EXTERN ffi_type ffi_type_uint16;
+FFI_EXTERN ffi_type ffi_type_sint16;
+FFI_EXTERN ffi_type ffi_type_uint32;
+FFI_EXTERN ffi_type ffi_type_sint32;
+FFI_EXTERN ffi_type ffi_type_uint64;
+FFI_EXTERN ffi_type ffi_type_sint64;
+FFI_EXTERN ffi_type ffi_type_float;
+FFI_EXTERN ffi_type ffi_type_double;
+FFI_EXTERN ffi_type ffi_type_pointer;
+
+#if 1
+FFI_EXTERN ffi_type ffi_type_longdouble;
+#else
+#define ffi_type_longdouble ffi_type_double
+#endif
+
+#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
+FFI_EXTERN ffi_type ffi_type_complex_float;
+FFI_EXTERN ffi_type ffi_type_complex_double;
+#if 1
+FFI_EXTERN ffi_type ffi_type_complex_longdouble;
+#else
+#define ffi_type_complex_longdouble ffi_type_complex_double
+#endif
+#endif
+#endif /* LIBFFI_HIDE_BASIC_TYPES */
+
+typedef enum {
+ FFI_OK = 0,
+ FFI_BAD_TYPEDEF,
+ FFI_BAD_ABI
+} ffi_status;
+
+typedef unsigned FFI_TYPE;
+
+typedef struct {
+ ffi_abi abi;
+ unsigned nargs;
+ ffi_type **arg_types;
+ ffi_type *rtype;
+ unsigned bytes;
+ unsigned flags;
+#ifdef FFI_EXTRA_CIF_FIELDS
+ FFI_EXTRA_CIF_FIELDS;
+#endif
+} ffi_cif;
+
+#if 0
+/* Used to adjust size/alignment of ffi types. */
+void ffi_prep_types (ffi_abi abi);
+#endif
+
+/* Used internally, but overridden by some architectures */
+ffi_status ffi_prep_cif_core(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int isvariadic,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+/* ---- Definitions for the raw API -------------------------------------- */
+
+#ifndef FFI_SIZEOF_ARG
+# if LONG_MAX == 2147483647
+# define FFI_SIZEOF_ARG 4
+# elif LONG_MAX == FFI_64_BIT_MAX
+# define FFI_SIZEOF_ARG 8
+# endif
+#endif
+
+#ifndef FFI_SIZEOF_JAVA_RAW
+# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG
+#endif
+
+typedef union {
+ ffi_sarg sint;
+ ffi_arg uint;
+ float flt;
+ char data[FFI_SIZEOF_ARG];
+ void* ptr;
+} ffi_raw;
+
+#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8
+/* This is a special case for mips64/n32 ABI (and perhaps others) where
+ sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */
+typedef union {
+ signed int sint;
+ unsigned int uint;
+ float flt;
+ char data[FFI_SIZEOF_JAVA_RAW];
+ void* ptr;
+} ffi_java_raw;
+#else
+typedef ffi_raw ffi_java_raw;
+#endif
+
+
+void ffi_raw_call (ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ ffi_raw *avalue);
+
+void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw);
+void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args);
+size_t ffi_raw_size (ffi_cif *cif);
+
+/* This is analogous to the raw API, except it uses Java parameter */
+/* packing, even on 64-bit machines. I.e. on 64-bit machines */
+/* longs and doubles are followed by an empty 64-bit word. */
+
+void ffi_java_raw_call (ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ ffi_java_raw *avalue);
+
+void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw);
+void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args);
+size_t ffi_java_raw_size (ffi_cif *cif);
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#if FFI_CLOSURES
+
+#ifdef _MSC_VER
+__declspec(align(8))
+#endif
+typedef struct {
+#if 0
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+ ffi_cif *cif;
+ void (*fun)(ffi_cif*,void*,void**,void*);
+ void *user_data;
+#ifdef __GNUC__
+} ffi_closure __attribute__((aligned (8)));
+#else
+} ffi_closure;
+# ifdef __sgi
+# pragma pack 0
+# endif
+#endif
+
+void *ffi_closure_alloc (size_t size, void **code);
+void ffi_closure_free (void *);
+
+ffi_status
+ffi_prep_closure (ffi_closure*,
+ ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure*,
+ ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void*codeloc);
+
+#ifdef __sgi
+# pragma pack 8
+#endif
+typedef struct {
+#if 0
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+ ffi_cif *cif;
+
+#if !FFI_NATIVE_RAW_API
+
+ /* if this is enabled, then a raw closure has the same layout
+ as a regular closure. We use this to install an intermediate
+ handler to do the transaltion, void** -> ffi_raw*. */
+
+ void (*translate_args)(ffi_cif*,void*,void**,void*);
+ void *this_closure;
+
+#endif
+
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*);
+ void *user_data;
+
+} ffi_raw_closure;
+
+typedef struct {
+#if 0
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
+ char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
+
+ ffi_cif *cif;
+
+#if !FFI_NATIVE_RAW_API
+
+ /* if this is enabled, then a raw closure has the same layout
+ as a regular closure. We use this to install an intermediate
+ handler to do the transaltion, void** -> ffi_raw*. */
+
+ void (*translate_args)(ffi_cif*,void*,void**,void*);
+ void *this_closure;
+
+#endif
+
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*);
+ void *user_data;
+
+} ffi_java_raw_closure;
+
+ffi_status
+ffi_prep_raw_closure (ffi_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_raw_closure_loc (ffi_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data,
+ void *codeloc);
+
+ffi_status
+ffi_prep_java_raw_closure (ffi_java_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data);
+
+ffi_status
+ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data,
+ void *codeloc);
+
+#endif /* FFI_CLOSURES */
+
+#if FFI_GO_CLOSURES
+
+typedef struct {
+ void *tramp;
+ ffi_cif *cif;
+ void (*fun)(ffi_cif*,void*,void**,void*);
+} ffi_go_closure;
+
+ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *,
+ void (*fun)(ffi_cif*,void*,void**,void*));
+
+void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure);
+
+#endif /* FFI_GO_CLOSURES */
+
+/* ---- Public interface definition -------------------------------------- */
+
+ffi_status ffi_prep_cif(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+ffi_status ffi_prep_cif_var(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
+void ffi_call(ffi_cif *cif,
+ void (*fn)(void),
+ void *rvalue,
+ void **avalue);
+
+/* Useful for eliminating compiler warnings */
+#define FFI_FN(f) ((void (*)(void))f)
+
+/* ---- Definitions shared with assembly code ---------------------------- */
+
+#endif
+
+/* If these change, update src/mips/ffitarget.h. */
+#define FFI_TYPE_VOID 0
+#define FFI_TYPE_INT 1
+#define FFI_TYPE_FLOAT 2
+#define FFI_TYPE_DOUBLE 3
+#if 1
+#define FFI_TYPE_LONGDOUBLE 4
+#else
+#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE
+#endif
+#define FFI_TYPE_UINT8 5
+#define FFI_TYPE_SINT8 6
+#define FFI_TYPE_UINT16 7
+#define FFI_TYPE_SINT16 8
+#define FFI_TYPE_UINT32 9
+#define FFI_TYPE_SINT32 10
+#define FFI_TYPE_UINT64 11
+#define FFI_TYPE_SINT64 12
+#define FFI_TYPE_STRUCT 13
+#define FFI_TYPE_POINTER 14
+#define FFI_TYPE_COMPLEX 15
+
+/* This should always refer to the last type code (for sanity checks) */
+#define FFI_TYPE_LAST FFI_TYPE_COMPLEX
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/fficonfig.h modified/Modules/_ctypes/libffi_ios/include/fficonfig.h
--- orig/Modules/_ctypes/libffi_ios/include/fficonfig.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/fficonfig.h 2015-03-12 21:32:24.000000000 +0800
@@ -0,0 +1,24 @@
+#ifdef __arm64__
+
+#include
+
+
+#endif
+#ifdef __i386__
+
+#include
+
+
+#endif
+#ifdef __arm__
+
+#include
+
+
+#endif
+#ifdef __x86_64__
+
+#include
+
+
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/include/fficonfig_arm64.h modified/Modules/_ctypes/libffi_ios/include/fficonfig_arm64.h
--- orig/Modules/_ctypes/libffi_ios/include/fficonfig_arm64.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/fficonfig_arm64.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,214 @@
+#ifdef __arm64__
+
+/* fficonfig.h. Generated from fficonfig.h.in by configure. */
+/* fficonfig.h.in. Generated from configure.ac by autoheader. */
+
+/* Define if building universal (internal helper macro) */
+/* #undef AC_APPLE_UNIVERSAL_BUILD */
+
+/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
+ systems. This function is required for `alloca.c' support on those systems.
+ */
+/* #undef CRAY_STACKSEG_END */
+
+/* Define to 1 if using `alloca.c'. */
+/* #undef C_ALLOCA */
+
+/* Define to the flags needed for the .section .eh_frame directive. */
+#define EH_FRAME_FLAGS "aw"
+
+/* Define this if you want extra debugging. */
+/* #undef FFI_DEBUG */
+
+/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */
+#define FFI_EXEC_TRAMPOLINE_TABLE 1
+
+/* Define this if you want to enable pax emulated trampolines */
+/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */
+
+/* Cannot use malloc on this target, so, we revert to alternative means */
+/* #undef FFI_MMAP_EXEC_WRIT */
+
+/* Define this if you do not want support for the raw API. */
+/* #undef FFI_NO_RAW_API */
+
+/* Define this if you do not want support for aggregate types. */
+/* #undef FFI_NO_STRUCTS */
+
+/* Define to 1 if you have `alloca', as a function or macro. */
+#define HAVE_ALLOCA 1
+
+/* Define to 1 if you have and it should be used (not on Ultrix).
+ */
+#define HAVE_ALLOCA_H 1
+
+/* Define if your assembler supports .cfi_* directives. */
+#define HAVE_AS_CFI_PSEUDO_OP 1
+
+/* Define if your assembler supports .register. */
+/* #undef HAVE_AS_REGISTER_PSEUDO_OP */
+
+/* Define if the compiler uses zarch features. */
+/* #undef HAVE_AS_S390_ZARCH */
+
+/* Define if your assembler and linker support unaligned PC relative relocs.
+ */
+/* #undef HAVE_AS_SPARC_UA_PCREL */
+
+/* Define if your assembler supports unwind section type. */
+/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */
+
+/* Define if your assembler supports PC relative relocs. */
+/* #undef HAVE_AS_X86_PCREL */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if __attribute__((visibility("hidden"))) is supported. */
+/* #undef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define if you have the long double type and it is bigger than a double */
+/* #undef HAVE_LONG_DOUBLE */
+
+/* Define if you support more than one size of the long double type */
+/* #undef HAVE_LONG_DOUBLE_VARIANT */
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mkostemp' function. */
+/* #undef HAVE_MKOSTEMP */
+
+/* Define to 1 if you have the `mmap' function. */
+#define HAVE_MMAP 1
+
+/* Define if mmap with MAP_ANON(YMOUS) works. */
+#define HAVE_MMAP_ANON 1
+
+/* Define if mmap of /dev/zero works. */
+/* #undef HAVE_MMAP_DEV_ZERO */
+
+/* Define if read-only mmap of a plain file works. */
+#define HAVE_MMAP_FILE 1
+
+/* Define if .eh_frame sections should be read-only. */
+/* #undef HAVE_RO_EH_FRAME */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_MMAN_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#define LT_OBJDIR ".libs/"
+
+/* Name of package */
+#define PACKAGE "libffi"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "http://github.com/atgreen/libffi/issues"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "libffi"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "libffi 3.99999"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "libffi"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "3.99999"
+
+/* The size of `double', as computed by sizeof. */
+#define SIZEOF_DOUBLE 8
+
+/* The size of `long double', as computed by sizeof. */
+#define SIZEOF_LONG_DOUBLE 8
+
+/* The size of `size_t', as computed by sizeof. */
+#define SIZEOF_SIZE_T 8
+
+/* If using the C implementation of alloca, define if you know the
+ direction of stack growth for your system; otherwise it will be
+ automatically deduced at runtime.
+ STACK_DIRECTION > 0 => grows toward higher addresses
+ STACK_DIRECTION < 0 => grows toward lower addresses
+ STACK_DIRECTION = 0 => direction of growth unknown */
+/* #undef STACK_DIRECTION */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define if symbols are underscored. */
+#define SYMBOL_UNDERSCORE 1
+
+/* Define this if you are using Purify and want to suppress spurious messages.
+ */
+/* #undef USING_PURIFY */
+
+/* Version number of package */
+#define VERSION "3.99999"
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined AC_APPLE_UNIVERSAL_BUILD
+# if defined __BIG_ENDIAN__
+# define WORDS_BIGENDIAN 1
+# endif
+#else
+# ifndef WORDS_BIGENDIAN
+/* # undef WORDS_BIGENDIAN */
+# endif
+#endif
+
+/* Define to `unsigned int' if does not define. */
+/* #undef size_t */
+
+
+#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
+#ifdef LIBFFI_ASM
+#define FFI_HIDDEN(name) .hidden name
+#else
+#define FFI_HIDDEN __attribute__ ((visibility ("hidden")))
+#endif
+#else
+#ifdef LIBFFI_ASM
+#define FFI_HIDDEN(name)
+#else
+#define FFI_HIDDEN
+#endif
+#endif
+
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/fficonfig_armv7.h modified/Modules/_ctypes/libffi_ios/include/fficonfig_armv7.h
--- orig/Modules/_ctypes/libffi_ios/include/fficonfig_armv7.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/fficonfig_armv7.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,214 @@
+#ifdef __arm__
+
+/* fficonfig.h. Generated from fficonfig.h.in by configure. */
+/* fficonfig.h.in. Generated from configure.ac by autoheader. */
+
+/* Define if building universal (internal helper macro) */
+/* #undef AC_APPLE_UNIVERSAL_BUILD */
+
+/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
+ systems. This function is required for `alloca.c' support on those systems.
+ */
+/* #undef CRAY_STACKSEG_END */
+
+/* Define to 1 if using `alloca.c'. */
+/* #undef C_ALLOCA */
+
+/* Define to the flags needed for the .section .eh_frame directive. */
+#define EH_FRAME_FLAGS "aw"
+
+/* Define this if you want extra debugging. */
+/* #undef FFI_DEBUG */
+
+/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */
+#define FFI_EXEC_TRAMPOLINE_TABLE 1
+
+/* Define this if you want to enable pax emulated trampolines */
+/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */
+
+/* Cannot use malloc on this target, so, we revert to alternative means */
+/* #undef FFI_MMAP_EXEC_WRIT */
+
+/* Define this if you do not want support for the raw API. */
+/* #undef FFI_NO_RAW_API */
+
+/* Define this if you do not want support for aggregate types. */
+/* #undef FFI_NO_STRUCTS */
+
+/* Define to 1 if you have `alloca', as a function or macro. */
+#define HAVE_ALLOCA 1
+
+/* Define to 1 if you have and it should be used (not on Ultrix).
+ */
+#define HAVE_ALLOCA_H 1
+
+/* Define if your assembler supports .cfi_* directives. */
+/* #undef HAVE_AS_CFI_PSEUDO_OP */
+
+/* Define if your assembler supports .register. */
+/* #undef HAVE_AS_REGISTER_PSEUDO_OP */
+
+/* Define if the compiler uses zarch features. */
+/* #undef HAVE_AS_S390_ZARCH */
+
+/* Define if your assembler and linker support unaligned PC relative relocs.
+ */
+/* #undef HAVE_AS_SPARC_UA_PCREL */
+
+/* Define if your assembler supports unwind section type. */
+/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */
+
+/* Define if your assembler supports PC relative relocs. */
+/* #undef HAVE_AS_X86_PCREL */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if __attribute__((visibility("hidden"))) is supported. */
+/* #undef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define if you have the long double type and it is bigger than a double */
+/* #undef HAVE_LONG_DOUBLE */
+
+/* Define if you support more than one size of the long double type */
+/* #undef HAVE_LONG_DOUBLE_VARIANT */
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mkostemp' function. */
+/* #undef HAVE_MKOSTEMP */
+
+/* Define to 1 if you have the `mmap' function. */
+#define HAVE_MMAP 1
+
+/* Define if mmap with MAP_ANON(YMOUS) works. */
+#define HAVE_MMAP_ANON 1
+
+/* Define if mmap of /dev/zero works. */
+/* #undef HAVE_MMAP_DEV_ZERO */
+
+/* Define if read-only mmap of a plain file works. */
+#define HAVE_MMAP_FILE 1
+
+/* Define if .eh_frame sections should be read-only. */
+/* #undef HAVE_RO_EH_FRAME */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_MMAN_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#define LT_OBJDIR ".libs/"
+
+/* Name of package */
+#define PACKAGE "libffi"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "http://github.com/atgreen/libffi/issues"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "libffi"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "libffi 3.99999"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "libffi"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "3.99999"
+
+/* The size of `double', as computed by sizeof. */
+#define SIZEOF_DOUBLE 8
+
+/* The size of `long double', as computed by sizeof. */
+#define SIZEOF_LONG_DOUBLE 8
+
+/* The size of `size_t', as computed by sizeof. */
+#define SIZEOF_SIZE_T 4
+
+/* If using the C implementation of alloca, define if you know the
+ direction of stack growth for your system; otherwise it will be
+ automatically deduced at runtime.
+ STACK_DIRECTION > 0 => grows toward higher addresses
+ STACK_DIRECTION < 0 => grows toward lower addresses
+ STACK_DIRECTION = 0 => direction of growth unknown */
+/* #undef STACK_DIRECTION */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define if symbols are underscored. */
+#define SYMBOL_UNDERSCORE 1
+
+/* Define this if you are using Purify and want to suppress spurious messages.
+ */
+/* #undef USING_PURIFY */
+
+/* Version number of package */
+#define VERSION "3.99999"
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined AC_APPLE_UNIVERSAL_BUILD
+# if defined __BIG_ENDIAN__
+# define WORDS_BIGENDIAN 1
+# endif
+#else
+# ifndef WORDS_BIGENDIAN
+/* # undef WORDS_BIGENDIAN */
+# endif
+#endif
+
+/* Define to `unsigned int' if does not define. */
+/* #undef size_t */
+
+
+#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
+#ifdef LIBFFI_ASM
+#define FFI_HIDDEN(name) .hidden name
+#else
+#define FFI_HIDDEN __attribute__ ((visibility ("hidden")))
+#endif
+#else
+#ifdef LIBFFI_ASM
+#define FFI_HIDDEN(name)
+#else
+#define FFI_HIDDEN
+#endif
+#endif
+
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/fficonfig_i386.h modified/Modules/_ctypes/libffi_ios/include/fficonfig_i386.h
--- orig/Modules/_ctypes/libffi_ios/include/fficonfig_i386.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/fficonfig_i386.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,214 @@
+#ifdef __i386__
+
+/* fficonfig.h. Generated from fficonfig.h.in by configure. */
+/* fficonfig.h.in. Generated from configure.ac by autoheader. */
+
+/* Define if building universal (internal helper macro) */
+/* #undef AC_APPLE_UNIVERSAL_BUILD */
+
+/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
+ systems. This function is required for `alloca.c' support on those systems.
+ */
+/* #undef CRAY_STACKSEG_END */
+
+/* Define to 1 if using `alloca.c'. */
+/* #undef C_ALLOCA */
+
+/* Define to the flags needed for the .section .eh_frame directive. */
+#define EH_FRAME_FLAGS "aw"
+
+/* Define this if you want extra debugging. */
+/* #undef FFI_DEBUG */
+
+/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */
+/* #undef FFI_EXEC_TRAMPOLINE_TABLE */
+
+/* Define this if you want to enable pax emulated trampolines */
+/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */
+
+/* Cannot use malloc on this target, so, we revert to alternative means */
+#define FFI_MMAP_EXEC_WRIT 1
+
+/* Define this if you do not want support for the raw API. */
+/* #undef FFI_NO_RAW_API */
+
+/* Define this if you do not want support for aggregate types. */
+/* #undef FFI_NO_STRUCTS */
+
+/* Define to 1 if you have `alloca', as a function or macro. */
+#define HAVE_ALLOCA 1
+
+/* Define to 1 if you have and it should be used (not on Ultrix).
+ */
+#define HAVE_ALLOCA_H 1
+
+/* Define if your assembler supports .cfi_* directives. */
+/* #undef HAVE_AS_CFI_PSEUDO_OP */
+
+/* Define if your assembler supports .register. */
+/* #undef HAVE_AS_REGISTER_PSEUDO_OP */
+
+/* Define if the compiler uses zarch features. */
+/* #undef HAVE_AS_S390_ZARCH */
+
+/* Define if your assembler and linker support unaligned PC relative relocs.
+ */
+/* #undef HAVE_AS_SPARC_UA_PCREL */
+
+/* Define if your assembler supports unwind section type. */
+/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */
+
+/* Define if your assembler supports PC relative relocs. */
+#define HAVE_AS_X86_PCREL 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if __attribute__((visibility("hidden"))) is supported. */
+/* #undef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define if you have the long double type and it is bigger than a double */
+#define HAVE_LONG_DOUBLE 1
+
+/* Define if you support more than one size of the long double type */
+/* #undef HAVE_LONG_DOUBLE_VARIANT */
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mkostemp' function. */
+/* #undef HAVE_MKOSTEMP */
+
+/* Define to 1 if you have the `mmap' function. */
+#define HAVE_MMAP 1
+
+/* Define if mmap with MAP_ANON(YMOUS) works. */
+#define HAVE_MMAP_ANON 1
+
+/* Define if mmap of /dev/zero works. */
+/* #undef HAVE_MMAP_DEV_ZERO */
+
+/* Define if read-only mmap of a plain file works. */
+#define HAVE_MMAP_FILE 1
+
+/* Define if .eh_frame sections should be read-only. */
+/* #undef HAVE_RO_EH_FRAME */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_MMAN_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#define LT_OBJDIR ".libs/"
+
+/* Name of package */
+#define PACKAGE "libffi"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "http://github.com/atgreen/libffi/issues"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "libffi"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "libffi 3.99999"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "libffi"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "3.99999"
+
+/* The size of `double', as computed by sizeof. */
+#define SIZEOF_DOUBLE 8
+
+/* The size of `long double', as computed by sizeof. */
+#define SIZEOF_LONG_DOUBLE 16
+
+/* The size of `size_t', as computed by sizeof. */
+#define SIZEOF_SIZE_T 4
+
+/* If using the C implementation of alloca, define if you know the
+ direction of stack growth for your system; otherwise it will be
+ automatically deduced at runtime.
+ STACK_DIRECTION > 0 => grows toward higher addresses
+ STACK_DIRECTION < 0 => grows toward lower addresses
+ STACK_DIRECTION = 0 => direction of growth unknown */
+/* #undef STACK_DIRECTION */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define if symbols are underscored. */
+#define SYMBOL_UNDERSCORE 1
+
+/* Define this if you are using Purify and want to suppress spurious messages.
+ */
+/* #undef USING_PURIFY */
+
+/* Version number of package */
+#define VERSION "3.99999"
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined AC_APPLE_UNIVERSAL_BUILD
+# if defined __BIG_ENDIAN__
+# define WORDS_BIGENDIAN 1
+# endif
+#else
+# ifndef WORDS_BIGENDIAN
+/* # undef WORDS_BIGENDIAN */
+# endif
+#endif
+
+/* Define to `unsigned int' if does not define. */
+/* #undef size_t */
+
+
+#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
+#ifdef LIBFFI_ASM
+#define FFI_HIDDEN(name) .hidden name
+#else
+#define FFI_HIDDEN __attribute__ ((visibility ("hidden")))
+#endif
+#else
+#ifdef LIBFFI_ASM
+#define FFI_HIDDEN(name)
+#else
+#define FFI_HIDDEN
+#endif
+#endif
+
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/fficonfig_x86_64.h modified/Modules/_ctypes/libffi_ios/include/fficonfig_x86_64.h
--- orig/Modules/_ctypes/libffi_ios/include/fficonfig_x86_64.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/fficonfig_x86_64.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,214 @@
+#ifdef __x86_64__
+
+/* fficonfig.h. Generated from fficonfig.h.in by configure. */
+/* fficonfig.h.in. Generated from configure.ac by autoheader. */
+
+/* Define if building universal (internal helper macro) */
+/* #undef AC_APPLE_UNIVERSAL_BUILD */
+
+/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
+ systems. This function is required for `alloca.c' support on those systems.
+ */
+/* #undef CRAY_STACKSEG_END */
+
+/* Define to 1 if using `alloca.c'. */
+/* #undef C_ALLOCA */
+
+/* Define to the flags needed for the .section .eh_frame directive. */
+#define EH_FRAME_FLAGS "aw"
+
+/* Define this if you want extra debugging. */
+/* #undef FFI_DEBUG */
+
+/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */
+/* #undef FFI_EXEC_TRAMPOLINE_TABLE */
+
+/* Define this if you want to enable pax emulated trampolines */
+/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */
+
+/* Cannot use malloc on this target, so, we revert to alternative means */
+#define FFI_MMAP_EXEC_WRIT 1
+
+/* Define this if you do not want support for the raw API. */
+/* #undef FFI_NO_RAW_API */
+
+/* Define this if you do not want support for aggregate types. */
+/* #undef FFI_NO_STRUCTS */
+
+/* Define to 1 if you have `alloca', as a function or macro. */
+#define HAVE_ALLOCA 1
+
+/* Define to 1 if you have and it should be used (not on Ultrix).
+ */
+#define HAVE_ALLOCA_H 1
+
+/* Define if your assembler supports .cfi_* directives. */
+/* #undef HAVE_AS_CFI_PSEUDO_OP */
+
+/* Define if your assembler supports .register. */
+/* #undef HAVE_AS_REGISTER_PSEUDO_OP */
+
+/* Define if the compiler uses zarch features. */
+/* #undef HAVE_AS_S390_ZARCH */
+
+/* Define if your assembler and linker support unaligned PC relative relocs.
+ */
+/* #undef HAVE_AS_SPARC_UA_PCREL */
+
+/* Define if your assembler supports unwind section type. */
+/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */
+
+/* Define if your assembler supports PC relative relocs. */
+#define HAVE_AS_X86_PCREL 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if __attribute__((visibility("hidden"))) is supported. */
+/* #undef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define if you have the long double type and it is bigger than a double */
+#define HAVE_LONG_DOUBLE 1
+
+/* Define if you support more than one size of the long double type */
+/* #undef HAVE_LONG_DOUBLE_VARIANT */
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mkostemp' function. */
+/* #undef HAVE_MKOSTEMP */
+
+/* Define to 1 if you have the `mmap' function. */
+#define HAVE_MMAP 1
+
+/* Define if mmap with MAP_ANON(YMOUS) works. */
+#define HAVE_MMAP_ANON 1
+
+/* Define if mmap of /dev/zero works. */
+/* #undef HAVE_MMAP_DEV_ZERO */
+
+/* Define if read-only mmap of a plain file works. */
+#define HAVE_MMAP_FILE 1
+
+/* Define if .eh_frame sections should be read-only. */
+/* #undef HAVE_RO_EH_FRAME */
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_MMAN_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#define LT_OBJDIR ".libs/"
+
+/* Name of package */
+#define PACKAGE "libffi"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "http://github.com/atgreen/libffi/issues"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "libffi"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "libffi 3.99999"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "libffi"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "3.99999"
+
+/* The size of `double', as computed by sizeof. */
+#define SIZEOF_DOUBLE 8
+
+/* The size of `long double', as computed by sizeof. */
+#define SIZEOF_LONG_DOUBLE 16
+
+/* The size of `size_t', as computed by sizeof. */
+#define SIZEOF_SIZE_T 8
+
+/* If using the C implementation of alloca, define if you know the
+ direction of stack growth for your system; otherwise it will be
+ automatically deduced at runtime.
+ STACK_DIRECTION > 0 => grows toward higher addresses
+ STACK_DIRECTION < 0 => grows toward lower addresses
+ STACK_DIRECTION = 0 => direction of growth unknown */
+/* #undef STACK_DIRECTION */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define if symbols are underscored. */
+#define SYMBOL_UNDERSCORE 1
+
+/* Define this if you are using Purify and want to suppress spurious messages.
+ */
+/* #undef USING_PURIFY */
+
+/* Version number of package */
+#define VERSION "3.99999"
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined AC_APPLE_UNIVERSAL_BUILD
+# if defined __BIG_ENDIAN__
+# define WORDS_BIGENDIAN 1
+# endif
+#else
+# ifndef WORDS_BIGENDIAN
+/* # undef WORDS_BIGENDIAN */
+# endif
+#endif
+
+/* Define to `unsigned int' if does not define. */
+/* #undef size_t */
+
+
+#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
+#ifdef LIBFFI_ASM
+#define FFI_HIDDEN(name) .hidden name
+#else
+#define FFI_HIDDEN __attribute__ ((visibility ("hidden")))
+#endif
+#else
+#ifdef LIBFFI_ASM
+#define FFI_HIDDEN(name)
+#else
+#define FFI_HIDDEN
+#endif
+#endif
+
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffitarget.h modified/Modules/_ctypes/libffi_ios/include/ffitarget.h
--- orig/Modules/_ctypes/libffi_ios/include/ffitarget.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffitarget.h 2015-03-12 21:32:24.000000000 +0800
@@ -0,0 +1,24 @@
+#ifdef __arm64__
+
+#include
+
+
+#endif
+#ifdef __i386__
+
+#include
+
+
+#endif
+#ifdef __arm__
+
+#include
+
+
+#endif
+#ifdef __x86_64__
+
+#include
+
+
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffitarget_arm64.h modified/Modules/_ctypes/libffi_ios/include/ffitarget_arm64.h
--- orig/Modules/_ctypes/libffi_ios/include/ffitarget_arm64.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffitarget_arm64.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,78 @@
+#ifdef __arm64__
+
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+#ifdef __ILP32__
+#define FFI_SIZEOF_ARG 8
+typedef unsigned long long ffi_arg;
+typedef signed long long ffi_sarg;
+#else
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+#endif
+
+typedef enum ffi_abi
+ {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+ } ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#if defined (__APPLE__)
+#define FFI_TRAMPOLINE_SIZE 20
+#define FFI_TRAMPOLINE_CLOSURE_OFFSET 16
+#else
+#define FFI_TRAMPOLINE_SIZE 24
+#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE
+#endif
+#define FFI_NATIVE_RAW_API 0
+
+/* ---- Internal ---- */
+
+#if defined (__APPLE__)
+#define FFI_TARGET_SPECIFIC_VARIADIC
+#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_nfixedargs
+#else
+/* iOS reserves x18 for the system. Disable Go closures until
+ a new static chain is chosen. */
+#define FFI_GO_CLOSURES 1
+#endif
+
+#define FFI_TARGET_HAS_COMPLEX_TYPE
+
+#endif
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffitarget_armv7.h modified/Modules/_ctypes/libffi_ios/include/ffitarget_armv7.h
--- orig/Modules/_ctypes/libffi_ios/include/ffitarget_armv7.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffitarget_armv7.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,74 @@
+#ifdef __arm__
+
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 2010 CodeSourcery
+ Copyright (c) 1996-2003 Red Hat, Inc.
+
+ Target configuration macros for ARM.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_VFP,
+ FFI_LAST_ABI,
+#ifdef __ARM_PCS_VFP
+ FFI_DEFAULT_ABI = FFI_VFP,
+#else
+ FFI_DEFAULT_ABI = FFI_SYSV,
+#endif
+} ffi_abi;
+#endif
+
+#define FFI_EXTRA_CIF_FIELDS \
+ int vfp_used; \
+ unsigned short vfp_reg_free, vfp_nargs; \
+ signed char vfp_args[16] \
+
+#define FFI_TARGET_SPECIFIC_VARIADIC
+#define FFI_TARGET_HAS_COMPLEX_TYPE
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_GO_CLOSURES 1
+#define FFI_TRAMPOLINE_SIZE 12
+#define FFI_NATIVE_RAW_API 0
+
+#endif
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffitarget_i386.h modified/Modules/_ctypes/libffi_ios/include/ffitarget_i386.h
--- orig/Modules/_ctypes/libffi_ios/include/ffitarget_i386.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffitarget_i386.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,144 @@
+#ifdef __i386__
+
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 2012, 2014 Anthony Green
+ Copyright (c) 1996-2003, 2010 Red Hat, Inc.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+
+ Target configuration macros for x86 and x86-64.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+/* ---- System specific configurations ----------------------------------- */
+
+/* For code common to all platforms on x86 and x86_64. */
+#define X86_ANY
+
+#if defined (X86_64) && defined (__i386__)
+#undef X86_64
+#define X86
+#endif
+
+#ifdef X86_WIN64
+#define FFI_SIZEOF_ARG 8
+#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */
+#endif
+
+#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION
+#ifndef _MSC_VER
+#define FFI_TARGET_HAS_COMPLEX_TYPE
+#endif
+
+/* ---- Generic type definitions ----------------------------------------- */
+
+#ifndef LIBFFI_ASM
+#ifdef X86_WIN64
+#ifdef _MSC_VER
+typedef unsigned __int64 ffi_arg;
+typedef __int64 ffi_sarg;
+#else
+typedef unsigned long long ffi_arg;
+typedef long long ffi_sarg;
+#endif
+#else
+#if defined __x86_64__ && defined __ILP32__
+#define FFI_SIZEOF_ARG 8
+#define FFI_SIZEOF_JAVA_RAW 4
+typedef unsigned long long ffi_arg;
+typedef long long ffi_sarg;
+#else
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+#endif
+#endif
+
+typedef enum ffi_abi {
+#if defined(X86_WIN64)
+ FFI_FIRST_ABI = 0,
+ FFI_WIN64,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_WIN64
+
+#elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN))
+ FFI_FIRST_ABI = 1,
+ FFI_UNIX64,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_UNIX64
+
+#elif defined(X86_WIN32)
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV = 1,
+ FFI_STDCALL = 2,
+ FFI_THISCALL = 3,
+ FFI_FASTCALL = 4,
+ FFI_MS_CDECL = 5,
+ FFI_PASCAL = 6,
+ FFI_REGISTER = 7,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_MS_CDECL
+#else
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV = 1,
+ FFI_THISCALL = 3,
+ FFI_FASTCALL = 4,
+ FFI_STDCALL = 5,
+ FFI_PASCAL = 6,
+ FFI_REGISTER = 7,
+ FFI_MS_CDECL = 8,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+#endif
+} ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_GO_CLOSURES 1
+
+#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1)
+#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2)
+#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3)
+#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4)
+
+#if defined (X86_64) || defined(X86_WIN64) \
+ || (defined (__x86_64__) && defined (X86_DARWIN))
+# define FFI_TRAMPOLINE_SIZE 24
+# define FFI_NATIVE_RAW_API 0
+#else
+# define FFI_TRAMPOLINE_SIZE 12
+# define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */
+#endif
+
+#endif
+
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/include/ffitarget_x86_64.h modified/Modules/_ctypes/libffi_ios/include/ffitarget_x86_64.h
--- orig/Modules/_ctypes/libffi_ios/include/ffitarget_x86_64.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/include/ffitarget_x86_64.h 2015-03-12 21:32:31.000000000 +0800
@@ -0,0 +1,144 @@
+#ifdef __x86_64__
+
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 2012, 2014 Anthony Green
+ Copyright (c) 1996-2003, 2010 Red Hat, Inc.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+
+ Target configuration macros for x86 and x86-64.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+/* ---- System specific configurations ----------------------------------- */
+
+/* For code common to all platforms on x86 and x86_64. */
+#define X86_ANY
+
+#if defined (X86_64) && defined (__i386__)
+#undef X86_64
+#define X86
+#endif
+
+#ifdef X86_WIN64
+#define FFI_SIZEOF_ARG 8
+#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */
+#endif
+
+#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION
+#ifndef _MSC_VER
+#define FFI_TARGET_HAS_COMPLEX_TYPE
+#endif
+
+/* ---- Generic type definitions ----------------------------------------- */
+
+#ifndef LIBFFI_ASM
+#ifdef X86_WIN64
+#ifdef _MSC_VER
+typedef unsigned __int64 ffi_arg;
+typedef __int64 ffi_sarg;
+#else
+typedef unsigned long long ffi_arg;
+typedef long long ffi_sarg;
+#endif
+#else
+#if defined __x86_64__ && defined __ILP32__
+#define FFI_SIZEOF_ARG 8
+#define FFI_SIZEOF_JAVA_RAW 4
+typedef unsigned long long ffi_arg;
+typedef long long ffi_sarg;
+#else
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+#endif
+#endif
+
+typedef enum ffi_abi {
+#if defined(X86_WIN64)
+ FFI_FIRST_ABI = 0,
+ FFI_WIN64,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_WIN64
+
+#elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN))
+ FFI_FIRST_ABI = 1,
+ FFI_UNIX64,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_UNIX64
+
+#elif defined(X86_WIN32)
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV = 1,
+ FFI_STDCALL = 2,
+ FFI_THISCALL = 3,
+ FFI_FASTCALL = 4,
+ FFI_MS_CDECL = 5,
+ FFI_PASCAL = 6,
+ FFI_REGISTER = 7,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_MS_CDECL
+#else
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV = 1,
+ FFI_THISCALL = 3,
+ FFI_FASTCALL = 4,
+ FFI_STDCALL = 5,
+ FFI_PASCAL = 6,
+ FFI_REGISTER = 7,
+ FFI_MS_CDECL = 8,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+#endif
+} ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_GO_CLOSURES 1
+
+#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1)
+#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2)
+#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3)
+#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4)
+
+#if defined (X86_64) || defined(X86_WIN64) \
+ || (defined (__x86_64__) && defined (X86_DARWIN))
+# define FFI_TRAMPOLINE_SIZE 24
+# define FFI_NATIVE_RAW_API 0
+#else
+# define FFI_TRAMPOLINE_SIZE 12
+# define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */
+#endif
+
+#endif
+
+
+
+#endif
\ No newline at end of file
diff -Nru orig/Modules/_ctypes/libffi_ios/java_raw_api.c modified/Modules/_ctypes/libffi_ios/java_raw_api.c
--- orig/Modules/_ctypes/libffi_ios/java_raw_api.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/java_raw_api.c 2015-03-12 21:34:00.000000000 +0800
@@ -0,0 +1,374 @@
+/* -----------------------------------------------------------------------
+ java_raw_api.c - Copyright (c) 1999, 2007, 2008 Red Hat, Inc.
+
+ Cloned from raw_api.c
+
+ Raw_api.c author: Kresten Krab Thorup
+ Java_raw_api.c author: Hans-J. Boehm
+
+ $Id $
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+/* This defines a Java- and 64-bit specific variant of the raw API. */
+/* It assumes that "raw" argument blocks look like Java stacks on a */
+/* 64-bit machine. Arguments that can be stored in a single stack */
+/* stack slots (longs, doubles) occupy 128 bits, but only the first */
+/* 64 bits are actually used. */
+
+#include
+#include
+#include
+
+#if !defined(NO_JAVA_RAW_API)
+
+size_t
+ffi_java_raw_size (ffi_cif *cif)
+{
+ size_t result = 0;
+ int i;
+
+ ffi_type **at = cif->arg_types;
+
+ for (i = cif->nargs-1; i >= 0; i--, at++)
+ {
+ switch((*at) -> type) {
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_DOUBLE:
+ result += 2 * FFI_SIZEOF_JAVA_RAW;
+ break;
+ case FFI_TYPE_STRUCT:
+ /* No structure parameters in Java. */
+ abort();
+ case FFI_TYPE_COMPLEX:
+ /* Not supported yet. */
+ abort();
+ default:
+ result += FFI_SIZEOF_JAVA_RAW;
+ }
+ }
+
+ return result;
+}
+
+
+void
+ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args)
+{
+ unsigned i;
+ ffi_type **tp = cif->arg_types;
+
+#if WORDS_BIGENDIAN
+
+ for (i = 0; i < cif->nargs; i++, tp++, args++)
+ {
+ switch ((*tp)->type)
+ {
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ *args = (void*) ((char*)(raw++) + 3);
+ break;
+
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ *args = (void*) ((char*)(raw++) + 2);
+ break;
+
+#if FFI_SIZEOF_JAVA_RAW == 8
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_DOUBLE:
+ *args = (void *)raw;
+ raw += 2;
+ break;
+#endif
+
+ case FFI_TYPE_POINTER:
+ *args = (void*) &(raw++)->ptr;
+ break;
+
+ case FFI_TYPE_COMPLEX:
+ /* Not supported yet. */
+ abort();
+
+ default:
+ *args = raw;
+ raw +=
+ ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw);
+ }
+ }
+
+#else /* WORDS_BIGENDIAN */
+
+#if !PDP
+
+ /* then assume little endian */
+ for (i = 0; i < cif->nargs; i++, tp++, args++)
+ {
+#if FFI_SIZEOF_JAVA_RAW == 8
+ switch((*tp)->type) {
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_DOUBLE:
+ *args = (void*) raw;
+ raw += 2;
+ break;
+ case FFI_TYPE_COMPLEX:
+ /* Not supported yet. */
+ abort();
+ default:
+ *args = (void*) raw++;
+ }
+#else /* FFI_SIZEOF_JAVA_RAW != 8 */
+ *args = (void*) raw;
+ raw +=
+ ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw);
+#endif /* FFI_SIZEOF_JAVA_RAW == 8 */
+ }
+
+#else
+#error "pdp endian not supported"
+#endif /* ! PDP */
+
+#endif /* WORDS_BIGENDIAN */
+}
+
+void
+ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw)
+{
+ unsigned i;
+ ffi_type **tp = cif->arg_types;
+
+ for (i = 0; i < cif->nargs; i++, tp++, args++)
+ {
+ switch ((*tp)->type)
+ {
+ case FFI_TYPE_UINT8:
+#if WORDS_BIGENDIAN
+ *(UINT32*)(raw++) = *(UINT8*) (*args);
+#else
+ (raw++)->uint = *(UINT8*) (*args);
+#endif
+ break;
+
+ case FFI_TYPE_SINT8:
+#if WORDS_BIGENDIAN
+ *(SINT32*)(raw++) = *(SINT8*) (*args);
+#else
+ (raw++)->sint = *(SINT8*) (*args);
+#endif
+ break;
+
+ case FFI_TYPE_UINT16:
+#if WORDS_BIGENDIAN
+ *(UINT32*)(raw++) = *(UINT16*) (*args);
+#else
+ (raw++)->uint = *(UINT16*) (*args);
+#endif
+ break;
+
+ case FFI_TYPE_SINT16:
+#if WORDS_BIGENDIAN
+ *(SINT32*)(raw++) = *(SINT16*) (*args);
+#else
+ (raw++)->sint = *(SINT16*) (*args);
+#endif
+ break;
+
+ case FFI_TYPE_UINT32:
+#if WORDS_BIGENDIAN
+ *(UINT32*)(raw++) = *(UINT32*) (*args);
+#else
+ (raw++)->uint = *(UINT32*) (*args);
+#endif
+ break;
+
+ case FFI_TYPE_SINT32:
+#if WORDS_BIGENDIAN
+ *(SINT32*)(raw++) = *(SINT32*) (*args);
+#else
+ (raw++)->sint = *(SINT32*) (*args);
+#endif
+ break;
+
+ case FFI_TYPE_FLOAT:
+ (raw++)->flt = *(FLOAT32*) (*args);
+ break;
+
+#if FFI_SIZEOF_JAVA_RAW == 8
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_DOUBLE:
+ raw->uint = *(UINT64*) (*args);
+ raw += 2;
+ break;
+#endif
+
+ case FFI_TYPE_POINTER:
+ (raw++)->ptr = **(void***) args;
+ break;
+
+ default:
+#if FFI_SIZEOF_JAVA_RAW == 8
+ FFI_ASSERT(0); /* Should have covered all cases */
+#else
+ memcpy ((void*) raw->data, (void*)*args, (*tp)->size);
+ raw +=
+ ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw);
+#endif
+ }
+ }
+}
+
+#if !FFI_NATIVE_RAW_API
+
+static void
+ffi_java_rvalue_to_raw (ffi_cif *cif, void *rvalue)
+{
+#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_UINT32:
+ *(UINT64 *)rvalue <<= 32;
+ break;
+
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+#if FFI_SIZEOF_JAVA_RAW == 4
+ case FFI_TYPE_POINTER:
+#endif
+ *(SINT64 *)rvalue <<= 32;
+ break;
+
+ case FFI_TYPE_COMPLEX:
+ /* Not supported yet. */
+ abort();
+
+ default:
+ break;
+ }
+#endif
+}
+
+static void
+ffi_java_raw_to_rvalue (ffi_cif *cif, void *rvalue)
+{
+#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_UINT32:
+ *(UINT64 *)rvalue >>= 32;
+ break;
+
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+ *(SINT64 *)rvalue >>= 32;
+ break;
+
+ case FFI_TYPE_COMPLEX:
+ /* Not supported yet. */
+ abort();
+
+ default:
+ break;
+ }
+#endif
+}
+
+/* This is a generic definition of ffi_raw_call, to be used if the
+ * native system does not provide a machine-specific implementation.
+ * Having this, allows code to be written for the raw API, without
+ * the need for system-specific code to handle input in that format;
+ * these following couple of functions will handle the translation forth
+ * and back automatically. */
+
+void ffi_java_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ ffi_java_raw *raw)
+{
+ void **avalue = (void**) alloca (cif->nargs * sizeof (void*));
+ ffi_java_raw_to_ptrarray (cif, raw, avalue);
+ ffi_call (cif, fn, rvalue, avalue);
+ ffi_java_rvalue_to_raw (cif, rvalue);
+}
+
+#if FFI_CLOSURES /* base system provides closures */
+
+static void
+ffi_java_translate_args (ffi_cif *cif, void *rvalue,
+ void **avalue, void *user_data)
+{
+ ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif));
+ ffi_raw_closure *cl = (ffi_raw_closure*)user_data;
+
+ ffi_java_ptrarray_to_raw (cif, avalue, raw);
+ (*cl->fun) (cif, rvalue, (ffi_raw*)raw, cl->user_data);
+ ffi_java_raw_to_rvalue (cif, rvalue);
+}
+
+ffi_status
+ffi_prep_java_raw_closure_loc (ffi_java_raw_closure* cl,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data,
+ void *codeloc)
+{
+ ffi_status status;
+
+ status = ffi_prep_closure_loc ((ffi_closure*) cl,
+ cif,
+ &ffi_java_translate_args,
+ codeloc,
+ codeloc);
+ if (status == FFI_OK)
+ {
+ cl->fun = fun;
+ cl->user_data = user_data;
+ }
+
+ return status;
+}
+
+/* Again, here is the generic version of ffi_prep_raw_closure, which
+ * will install an intermediate "hub" for translation of arguments from
+ * the pointer-array format, to the raw format */
+
+ffi_status
+ffi_prep_java_raw_closure (ffi_java_raw_closure* cl,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*),
+ void *user_data)
+{
+ return ffi_prep_java_raw_closure_loc (cl, cif, fun, user_data, cl);
+}
+
+#endif /* FFI_CLOSURES */
+#endif /* !FFI_NATIVE_RAW_API */
+#endif /* !NO_JAVA_RAW_API */
diff -Nru orig/Modules/_ctypes/libffi_ios/prep_cif.c modified/Modules/_ctypes/libffi_ios/prep_cif.c
--- orig/Modules/_ctypes/libffi_ios/prep_cif.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/prep_cif.c 2015-03-12 21:34:00.000000000 +0800
@@ -0,0 +1,242 @@
+/* -----------------------------------------------------------------------
+ prep_cif.c - Copyright (c) 2011, 2012 Anthony Green
+ Copyright (c) 1996, 1998, 2007 Red Hat, Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include
+#include
+#include
+
+/* Round up to FFI_SIZEOF_ARG. */
+
+#define STACK_ARG_SIZE(x) ALIGN(x, FFI_SIZEOF_ARG)
+
+/* Perform machine independent initialization of aggregate type
+ specifications. */
+
+static ffi_status initialize_aggregate(ffi_type *arg)
+{
+ ffi_type **ptr;
+
+ if (UNLIKELY(arg == NULL || arg->elements == NULL))
+ return FFI_BAD_TYPEDEF;
+
+ arg->size = 0;
+ arg->alignment = 0;
+
+ ptr = &(arg->elements[0]);
+
+ if (UNLIKELY(ptr == 0))
+ return FFI_BAD_TYPEDEF;
+
+ while ((*ptr) != NULL)
+ {
+ if (UNLIKELY(((*ptr)->size == 0)
+ && (initialize_aggregate((*ptr)) != FFI_OK)))
+ return FFI_BAD_TYPEDEF;
+
+ /* Perform a sanity check on the argument type */
+ FFI_ASSERT_VALID_TYPE(*ptr);
+
+ arg->size = ALIGN(arg->size, (*ptr)->alignment);
+ arg->size += (*ptr)->size;
+
+ arg->alignment = (arg->alignment > (*ptr)->alignment) ?
+ arg->alignment : (*ptr)->alignment;
+
+ ptr++;
+ }
+
+ /* Structure size includes tail padding. This is important for
+ structures that fit in one register on ABIs like the PowerPC64
+ Linux ABI that right justify small structs in a register.
+ It's also needed for nested structure layout, for example
+ struct A { long a; char b; }; struct B { struct A x; char y; };
+ should find y at an offset of 2*sizeof(long) and result in a
+ total size of 3*sizeof(long). */
+ arg->size = ALIGN (arg->size, arg->alignment);
+
+ /* On some targets, the ABI defines that structures have an additional
+ alignment beyond the "natural" one based on their elements. */
+#ifdef FFI_AGGREGATE_ALIGNMENT
+ if (FFI_AGGREGATE_ALIGNMENT > arg->alignment)
+ arg->alignment = FFI_AGGREGATE_ALIGNMENT;
+#endif
+
+ if (arg->size == 0)
+ return FFI_BAD_TYPEDEF;
+ else
+ return FFI_OK;
+}
+
+#ifndef __CRIS__
+/* The CRIS ABI specifies structure elements to have byte
+ alignment only, so it completely overrides this functions,
+ which assumes "natural" alignment and padding. */
+
+/* Perform machine independent ffi_cif preparation, then call
+ machine dependent routine. */
+
+/* For non variadic functions isvariadic should be 0 and
+ nfixedargs==ntotalargs.
+
+ For variadic calls, isvariadic should be 1 and nfixedargs
+ and ntotalargs set as appropriate. nfixedargs must always be >=1 */
+
+
+ffi_status FFI_HIDDEN ffi_prep_cif_core(ffi_cif *cif, ffi_abi abi,
+ unsigned int isvariadic,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype, ffi_type **atypes)
+{
+ unsigned bytes = 0;
+ unsigned int i;
+ ffi_type **ptr;
+
+ FFI_ASSERT(cif != NULL);
+ FFI_ASSERT((!isvariadic) || (nfixedargs >= 1));
+ FFI_ASSERT(nfixedargs <= ntotalargs);
+
+ if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI))
+ return FFI_BAD_ABI;
+
+ cif->abi = abi;
+ cif->arg_types = atypes;
+ cif->nargs = ntotalargs;
+ cif->rtype = rtype;
+
+ cif->flags = 0;
+
+#if HAVE_LONG_DOUBLE_VARIANT
+ ffi_prep_types (abi);
+#endif
+
+ /* Initialize the return type if necessary */
+ if ((cif->rtype->size == 0) && (initialize_aggregate(cif->rtype) != FFI_OK))
+ return FFI_BAD_TYPEDEF;
+
+#ifndef FFI_TARGET_HAS_COMPLEX_TYPE
+ if (rtype->type == FFI_TYPE_COMPLEX)
+ abort();
+#endif
+ /* Perform a sanity check on the return type */
+ FFI_ASSERT_VALID_TYPE(cif->rtype);
+
+ /* x86, x86-64 and s390 stack space allocation is handled in prep_machdep. */
+#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION
+ /* Make space for the return structure pointer */
+ if (cif->rtype->type == FFI_TYPE_STRUCT
+#ifdef TILE
+ && (cif->rtype->size > 10 * FFI_SIZEOF_ARG)
+#endif
+#ifdef XTENSA
+ && (cif->rtype->size > 16)
+#endif
+#ifdef NIOS2
+ && (cif->rtype->size > 8)
+#endif
+ )
+ bytes = STACK_ARG_SIZE(sizeof(void*));
+#endif
+
+ for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++)
+ {
+
+ /* Initialize any uninitialized aggregate type definitions */
+ if (((*ptr)->size == 0) && (initialize_aggregate((*ptr)) != FFI_OK))
+ return FFI_BAD_TYPEDEF;
+
+#ifndef FFI_TARGET_HAS_COMPLEX_TYPE
+ if ((*ptr)->type == FFI_TYPE_COMPLEX)
+ abort();
+#endif
+ /* Perform a sanity check on the argument type, do this
+ check after the initialization. */
+ FFI_ASSERT_VALID_TYPE(*ptr);
+
+#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION
+ {
+ /* Add any padding if necessary */
+ if (((*ptr)->alignment - 1) & bytes)
+ bytes = (unsigned)ALIGN(bytes, (*ptr)->alignment);
+
+#ifdef TILE
+ if (bytes < 10 * FFI_SIZEOF_ARG &&
+ bytes + STACK_ARG_SIZE((*ptr)->size) > 10 * FFI_SIZEOF_ARG)
+ {
+ /* An argument is never split between the 10 parameter
+ registers and the stack. */
+ bytes = 10 * FFI_SIZEOF_ARG;
+ }
+#endif
+#ifdef XTENSA
+ if (bytes <= 6*4 && bytes + STACK_ARG_SIZE((*ptr)->size) > 6*4)
+ bytes = 6*4;
+#endif
+
+ bytes += STACK_ARG_SIZE((*ptr)->size);
+ }
+#endif
+ }
+
+ cif->bytes = bytes;
+
+ /* Perform machine dependent cif processing */
+#ifdef FFI_TARGET_SPECIFIC_VARIADIC
+ if (isvariadic)
+ return ffi_prep_cif_machdep_var(cif, nfixedargs, ntotalargs);
+#endif
+
+ return ffi_prep_cif_machdep(cif);
+}
+#endif /* not __CRIS__ */
+
+ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs,
+ ffi_type *rtype, ffi_type **atypes)
+{
+ return ffi_prep_cif_core(cif, abi, 0, nargs, nargs, rtype, atypes);
+}
+
+ffi_status ffi_prep_cif_var(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes)
+{
+ return ffi_prep_cif_core(cif, abi, 1, nfixedargs, ntotalargs, rtype, atypes);
+}
+
+#if FFI_CLOSURES
+
+ffi_status
+ffi_prep_closure (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data)
+{
+ return ffi_prep_closure_loc (closure, cif, fun, user_data, closure);
+}
+
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/raw_api.c modified/Modules/_ctypes/libffi_ios/raw_api.c
--- orig/Modules/_ctypes/libffi_ios/raw_api.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/raw_api.c 2015-03-12 21:34:00.000000000 +0800
@@ -0,0 +1,267 @@
+/* -----------------------------------------------------------------------
+ raw_api.c - Copyright (c) 1999, 2008 Red Hat, Inc.
+
+ Author: Kresten Krab Thorup
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+/* This file defines generic functions for use with the raw api. */
+
+#include
+#include
+
+#if !FFI_NO_RAW_API
+
+size_t
+ffi_raw_size (ffi_cif *cif)
+{
+ size_t result = 0;
+ int i;
+
+ ffi_type **at = cif->arg_types;
+
+ for (i = cif->nargs-1; i >= 0; i--, at++)
+ {
+#if !FFI_NO_STRUCTS
+ if ((*at)->type == FFI_TYPE_STRUCT)
+ result += ALIGN (sizeof (void*), FFI_SIZEOF_ARG);
+ else
+#endif
+ result += ALIGN ((*at)->size, FFI_SIZEOF_ARG);
+ }
+
+ return result;
+}
+
+
+void
+ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args)
+{
+ unsigned i;
+ ffi_type **tp = cif->arg_types;
+
+#if WORDS_BIGENDIAN
+
+ for (i = 0; i < cif->nargs; i++, tp++, args++)
+ {
+ switch ((*tp)->type)
+ {
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 1);
+ break;
+
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 2);
+ break;
+
+#if FFI_SIZEOF_ARG >= 4
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 4);
+ break;
+#endif
+
+#if !FFI_NO_STRUCTS
+ case FFI_TYPE_STRUCT:
+ *args = (raw++)->ptr;
+ break;
+#endif
+
+ case FFI_TYPE_COMPLEX:
+ *args = (raw++)->ptr;
+ break;
+
+ case FFI_TYPE_POINTER:
+ *args = (void*) &(raw++)->ptr;
+ break;
+
+ default:
+ *args = raw;
+ raw += ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG;
+ }
+ }
+
+#else /* WORDS_BIGENDIAN */
+
+#if !PDP
+
+ /* then assume little endian */
+ for (i = 0; i < cif->nargs; i++, tp++, args++)
+ {
+#if !FFI_NO_STRUCTS
+ if ((*tp)->type == FFI_TYPE_STRUCT)
+ {
+ *args = (raw++)->ptr;
+ }
+ else
+#endif
+ if ((*tp)->type == FFI_TYPE_COMPLEX)
+ {
+ *args = (raw++)->ptr;
+ }
+ else
+ {
+ *args = (void*) raw;
+ raw += ALIGN ((*tp)->size, sizeof (void*)) / sizeof (void*);
+ }
+ }
+
+#else
+#error "pdp endian not supported"
+#endif /* ! PDP */
+
+#endif /* WORDS_BIGENDIAN */
+}
+
+void
+ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw)
+{
+ unsigned i;
+ ffi_type **tp = cif->arg_types;
+
+ for (i = 0; i < cif->nargs; i++, tp++, args++)
+ {
+ switch ((*tp)->type)
+ {
+ case FFI_TYPE_UINT8:
+ (raw++)->uint = *(UINT8*) (*args);
+ break;
+
+ case FFI_TYPE_SINT8:
+ (raw++)->sint = *(SINT8*) (*args);
+ break;
+
+ case FFI_TYPE_UINT16:
+ (raw++)->uint = *(UINT16*) (*args);
+ break;
+
+ case FFI_TYPE_SINT16:
+ (raw++)->sint = *(SINT16*) (*args);
+ break;
+
+#if FFI_SIZEOF_ARG >= 4
+ case FFI_TYPE_UINT32:
+ (raw++)->uint = *(UINT32*) (*args);
+ break;
+
+ case FFI_TYPE_SINT32:
+ (raw++)->sint = *(SINT32*) (*args);
+ break;
+#endif
+
+#if !FFI_NO_STRUCTS
+ case FFI_TYPE_STRUCT:
+ (raw++)->ptr = *args;
+ break;
+#endif
+
+ case FFI_TYPE_COMPLEX:
+ (raw++)->ptr = *args;
+ break;
+
+ case FFI_TYPE_POINTER:
+ (raw++)->ptr = **(void***) args;
+ break;
+
+ default:
+ memcpy ((void*) raw->data, (void*)*args, (*tp)->size);
+ raw += ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG;
+ }
+ }
+}
+
+#if !FFI_NATIVE_RAW_API
+
+
+/* This is a generic definition of ffi_raw_call, to be used if the
+ * native system does not provide a machine-specific implementation.
+ * Having this, allows code to be written for the raw API, without
+ * the need for system-specific code to handle input in that format;
+ * these following couple of functions will handle the translation forth
+ * and back automatically. */
+
+void ffi_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *raw)
+{
+ void **avalue = (void**) alloca (cif->nargs * sizeof (void*));
+ ffi_raw_to_ptrarray (cif, raw, avalue);
+ ffi_call (cif, fn, rvalue, avalue);
+}
+
+#if FFI_CLOSURES /* base system provides closures */
+
+static void
+ffi_translate_args (ffi_cif *cif, void *rvalue,
+ void **avalue, void *user_data)
+{
+ ffi_raw *raw = (ffi_raw*)alloca (ffi_raw_size (cif));
+ ffi_raw_closure *cl = (ffi_raw_closure*)user_data;
+
+ ffi_ptrarray_to_raw (cif, avalue, raw);
+ (*cl->fun) (cif, rvalue, raw, cl->user_data);
+}
+
+ffi_status
+ffi_prep_raw_closure_loc (ffi_raw_closure* cl,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data,
+ void *codeloc)
+{
+ ffi_status status;
+
+ status = ffi_prep_closure_loc ((ffi_closure*) cl,
+ cif,
+ &ffi_translate_args,
+ codeloc,
+ codeloc);
+ if (status == FFI_OK)
+ {
+ cl->fun = fun;
+ cl->user_data = user_data;
+ }
+
+ return status;
+}
+
+#endif /* FFI_CLOSURES */
+#endif /* !FFI_NATIVE_RAW_API */
+
+#if FFI_CLOSURES
+
+/* Again, here is the generic version of ffi_prep_raw_closure, which
+ * will install an intermediate "hub" for translation of arguments from
+ * the pointer-array format, to the raw format */
+
+ffi_status
+ffi_prep_raw_closure (ffi_raw_closure* cl,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data)
+{
+ return ffi_prep_raw_closure_loc (cl, cif, fun, user_data, cl);
+}
+
+#endif /* FFI_CLOSURES */
+
+#endif /* !FFI_NO_RAW_API */
diff -Nru orig/Modules/_ctypes/libffi_ios/types.c modified/Modules/_ctypes/libffi_ios/types.c
--- orig/Modules/_ctypes/libffi_ios/types.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/types.c 2015-03-12 21:34:00.000000000 +0800
@@ -0,0 +1,106 @@
+/* -----------------------------------------------------------------------
+ types.c - Copyright (c) 1996, 1998 Red Hat, Inc.
+
+ Predefined ffi_types needed by libffi.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+/* Hide the basic type definitions from the header file, so that we
+ can redefine them here as "const". */
+#define LIBFFI_HIDE_BASIC_TYPES
+
+#include
+#include
+
+/* Type definitions */
+
+#define FFI_TYPEDEF(name, type, id, maybe_const)\
+struct struct_align_##name { \
+ char c; \
+ type x; \
+}; \
+maybe_const ffi_type ffi_type_##name = { \
+ sizeof(type), \
+ offsetof(struct struct_align_##name, x), \
+ id, NULL \
+}
+
+#define FFI_COMPLEX_TYPEDEF(name, type, maybe_const) \
+static ffi_type *ffi_elements_complex_##name [2] = { \
+ (ffi_type *)(&ffi_type_##name), NULL \
+}; \
+struct struct_align_complex_##name { \
+ char c; \
+ _Complex type x; \
+}; \
+maybe_const ffi_type ffi_type_complex_##name = { \
+ sizeof(_Complex type), \
+ offsetof(struct struct_align_complex_##name, x), \
+ FFI_TYPE_COMPLEX, \
+ (ffi_type **)ffi_elements_complex_##name \
+}
+
+/* Size and alignment are fake here. They must not be 0. */
+const ffi_type ffi_type_void = {
+ 1, 1, FFI_TYPE_VOID, NULL
+};
+
+FFI_TYPEDEF(uint8, UINT8, FFI_TYPE_UINT8, const);
+FFI_TYPEDEF(sint8, SINT8, FFI_TYPE_SINT8, const);
+FFI_TYPEDEF(uint16, UINT16, FFI_TYPE_UINT16, const);
+FFI_TYPEDEF(sint16, SINT16, FFI_TYPE_SINT16, const);
+FFI_TYPEDEF(uint32, UINT32, FFI_TYPE_UINT32, const);
+FFI_TYPEDEF(sint32, SINT32, FFI_TYPE_SINT32, const);
+FFI_TYPEDEF(uint64, UINT64, FFI_TYPE_UINT64, const);
+FFI_TYPEDEF(sint64, SINT64, FFI_TYPE_SINT64, const);
+
+FFI_TYPEDEF(pointer, void*, FFI_TYPE_POINTER, const);
+
+FFI_TYPEDEF(float, float, FFI_TYPE_FLOAT, const);
+FFI_TYPEDEF(double, double, FFI_TYPE_DOUBLE, const);
+
+#if !defined HAVE_LONG_DOUBLE_VARIANT || defined __alpha__
+#define FFI_LDBL_CONST const
+#else
+#define FFI_LDBL_CONST
+#endif
+
+#ifdef __alpha__
+/* Even if we're not configured to default to 128-bit long double,
+ maintain binary compatibility, as -mlong-double-128 can be used
+ at any time. */
+/* Validate the hard-coded number below. */
+# if defined(__LONG_DOUBLE_128__) && FFI_TYPE_LONGDOUBLE != 4
+# error FFI_TYPE_LONGDOUBLE out of date
+# endif
+const ffi_type ffi_type_longdouble = { 16, 16, 4, NULL };
+#elif FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+FFI_TYPEDEF(longdouble, long double, FFI_TYPE_LONGDOUBLE, FFI_LDBL_CONST);
+#endif
+
+#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
+FFI_COMPLEX_TYPEDEF(float, float, const);
+FFI_COMPLEX_TYPEDEF(double, double, const);
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+FFI_COMPLEX_TYPEDEF(longdouble, long double, FFI_LDBL_CONST);
+#endif
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/x86/ffi.c modified/Modules/_ctypes/libffi_ios/x86/ffi.c
--- orig/Modules/_ctypes/libffi_ios/x86/ffi.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/x86/ffi.c 2015-03-12 21:33:31.000000000 +0800
@@ -0,0 +1,729 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 1996, 1998, 1999, 2001, 2007, 2008 Red Hat, Inc.
+ Copyright (c) 2002 Ranjit Mathew
+ Copyright (c) 2002 Bo Thorsen
+ Copyright (c) 2002 Roger Sayle
+ Copyright (C) 2008, 2010 Free Software Foundation, Inc.
+
+ x86 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifndef __x86_64__
+#include
+#include
+#include
+#include "internal.h"
+
+/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE;
+ all further uses in this file will refer to the 80-bit type. */
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+# if FFI_TYPE_LONGDOUBLE != 4
+# error FFI_TYPE_LONGDOUBLE out of date
+# endif
+#else
+# undef FFI_TYPE_LONGDOUBLE
+# define FFI_TYPE_LONGDOUBLE 4
+#endif
+
+#if defined(__GNUC__) && !defined(__declspec)
+# define __declspec(x) __attribute__((x))
+#endif
+
+/* Perform machine dependent cif processing. */
+ffi_status FFI_HIDDEN
+ffi_prep_cif_machdep(ffi_cif *cif)
+{
+ size_t bytes = 0;
+ int i, n, flags, cabi = cif->abi;
+
+ switch (cabi)
+ {
+ case FFI_SYSV:
+ case FFI_STDCALL:
+ case FFI_THISCALL:
+ case FFI_FASTCALL:
+ case FFI_MS_CDECL:
+ case FFI_PASCAL:
+ case FFI_REGISTER:
+ break;
+ default:
+ return FFI_BAD_ABI;
+ }
+
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_VOID:
+ flags = X86_RET_VOID;
+ break;
+ case FFI_TYPE_FLOAT:
+ flags = X86_RET_FLOAT;
+ break;
+ case FFI_TYPE_DOUBLE:
+ flags = X86_RET_DOUBLE;
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ flags = X86_RET_LDOUBLE;
+ break;
+ case FFI_TYPE_UINT8:
+ flags = X86_RET_UINT8;
+ break;
+ case FFI_TYPE_UINT16:
+ flags = X86_RET_UINT16;
+ break;
+ case FFI_TYPE_SINT8:
+ flags = X86_RET_SINT8;
+ break;
+ case FFI_TYPE_SINT16:
+ flags = X86_RET_SINT16;
+ break;
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_POINTER:
+ flags = X86_RET_INT32;
+ break;
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ flags = X86_RET_INT64;
+ break;
+ case FFI_TYPE_STRUCT:
+#ifndef X86
+ /* ??? This should be a different ABI rather than an ifdef. */
+ if (cif->rtype->size == 1)
+ flags = X86_RET_STRUCT_1B;
+ else if (cif->rtype->size == 2)
+ flags = X86_RET_STRUCT_2B;
+ else if (cif->rtype->size == 4)
+ flags = X86_RET_INT32;
+ else if (cif->rtype->size == 8)
+ flags = X86_RET_INT64;
+ else
+#endif
+ {
+ do_struct:
+ switch (cabi)
+ {
+ case FFI_THISCALL:
+ case FFI_FASTCALL:
+ case FFI_STDCALL:
+ case FFI_MS_CDECL:
+ flags = X86_RET_STRUCTARG;
+ break;
+ default:
+ flags = X86_RET_STRUCTPOP;
+ break;
+ }
+ /* Allocate space for return value pointer. */
+ bytes += ALIGN (sizeof(void*), FFI_SIZEOF_ARG);
+ }
+ break;
+ case FFI_TYPE_COMPLEX:
+ switch (cif->rtype->elements[0]->type)
+ {
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ goto do_struct;
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT32:
+ flags = X86_RET_INT64;
+ break;
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT16:
+ flags = X86_RET_INT32;
+ break;
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT8:
+ flags = X86_RET_STRUCT_2B;
+ break;
+ default:
+ return FFI_BAD_TYPEDEF;
+ }
+ break;
+ default:
+ return FFI_BAD_TYPEDEF;
+ }
+ cif->flags = flags;
+
+ for (i = 0, n = cif->nargs; i < n; i++)
+ {
+ ffi_type *t = cif->arg_types[i];
+
+ bytes = ALIGN (bytes, t->alignment);
+ bytes += ALIGN (t->size, FFI_SIZEOF_ARG);
+ }
+ cif->bytes = ALIGN (bytes, 16);
+
+ return FFI_OK;
+}
+
+static ffi_arg
+extend_basic_type(void *arg, int type)
+{
+ switch (type)
+ {
+ case FFI_TYPE_SINT8:
+ return *(SINT8 *)arg;
+ case FFI_TYPE_UINT8:
+ return *(UINT8 *)arg;
+ case FFI_TYPE_SINT16:
+ return *(SINT16 *)arg;
+ case FFI_TYPE_UINT16:
+ return *(UINT16 *)arg;
+
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_FLOAT:
+ return *(UINT32 *)arg;
+
+ default:
+ abort();
+ }
+}
+
+struct call_frame
+{
+ void *ebp; /* 0 */
+ void *retaddr; /* 4 */
+ void (*fn)(void); /* 8 */
+ int flags; /* 12 */
+ void *rvalue; /* 16 */
+ unsigned regs[3]; /* 20-28 */
+};
+
+struct abi_params
+{
+ int dir; /* parameter growth direction */
+ int static_chain; /* the static chain register used by gcc */
+ int nregs; /* number of register parameters */
+ int regs[3];
+};
+
+static const struct abi_params abi_params[FFI_LAST_ABI] = {
+ [FFI_SYSV] = { 1, R_ECX, 0 },
+ [FFI_THISCALL] = { 1, R_EAX, 1, { R_ECX } },
+ [FFI_FASTCALL] = { 1, R_EAX, 2, { R_ECX, R_EDX } },
+ [FFI_STDCALL] = { 1, R_ECX, 0 },
+ [FFI_PASCAL] = { -1, R_ECX, 0 },
+ /* ??? No defined static chain; gcc does not support REGISTER. */
+ [FFI_REGISTER] = { -1, R_ECX, 3, { R_EAX, R_EDX, R_ECX } },
+ [FFI_MS_CDECL] = { 1, R_ECX, 0 }
+};
+
+extern void ffi_call_i386(struct call_frame *, char *)
+#if HAVE_FASTCALL
+ __declspec(fastcall)
+#endif
+ FFI_HIDDEN;
+
+static void
+ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure)
+{
+ size_t rsize, bytes;
+ struct call_frame *frame;
+ char *stack, *argp;
+ ffi_type **arg_types;
+ int flags, cabi, i, n, dir, narg_reg;
+ const struct abi_params *pabi;
+
+ flags = cif->flags;
+ cabi = cif->abi;
+ pabi = &abi_params[cabi];
+ dir = pabi->dir;
+
+ rsize = 0;
+ if (rvalue == NULL)
+ {
+ switch (flags)
+ {
+ case X86_RET_FLOAT:
+ case X86_RET_DOUBLE:
+ case X86_RET_LDOUBLE:
+ case X86_RET_STRUCTPOP:
+ case X86_RET_STRUCTARG:
+ /* The float cases need to pop the 387 stack.
+ The struct cases need to pass a valid pointer to the callee. */
+ rsize = cif->rtype->size;
+ break;
+ default:
+ /* We can pretend that the callee returns nothing. */
+ flags = X86_RET_VOID;
+ break;
+ }
+ }
+
+ bytes = cif->bytes;
+ stack = alloca(bytes + sizeof(*frame) + rsize);
+ argp = (dir < 0 ? stack + bytes : stack);
+ frame = (struct call_frame *)(stack + bytes);
+ if (rsize)
+ rvalue = frame + 1;
+
+ frame->fn = fn;
+ frame->flags = flags;
+ frame->rvalue = rvalue;
+ frame->regs[pabi->static_chain] = (unsigned)closure;
+
+ narg_reg = 0;
+ switch (flags)
+ {
+ case X86_RET_STRUCTARG:
+ /* The pointer is passed as the first argument. */
+ if (pabi->nregs > 0)
+ {
+ frame->regs[pabi->regs[0]] = (unsigned)rvalue;
+ narg_reg = 1;
+ break;
+ }
+ /* fallthru */
+ case X86_RET_STRUCTPOP:
+ *(void **)argp = rvalue;
+ argp += sizeof(void *);
+ break;
+ }
+
+ arg_types = cif->arg_types;
+ for (i = 0, n = cif->nargs; i < n; i++)
+ {
+ ffi_type *ty = arg_types[i];
+ void *valp = avalue[i];
+ size_t z = ty->size;
+ int t = ty->type;
+
+ if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT)
+ {
+ ffi_arg val = extend_basic_type (valp, t);
+
+ if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs)
+ frame->regs[pabi->regs[narg_reg++]] = val;
+ else if (dir < 0)
+ {
+ argp -= 4;
+ *(ffi_arg *)argp = val;
+ }
+ else
+ {
+ *(ffi_arg *)argp = val;
+ argp += 4;
+ }
+ }
+ else
+ {
+ size_t za = ALIGN (z, FFI_SIZEOF_ARG);
+ size_t align = FFI_SIZEOF_ARG;
+
+ /* Alignment rules for arguments are quite complex. Vectors and
+ structures with 16 byte alignment get it. Note that long double
+ on Darwin does have 16 byte alignment, and does not get this
+ alignment if passed directly; a structure with a long double
+ inside, however, would get 16 byte alignment. Since libffi does
+ not support vectors, we need non concern ourselves with other
+ cases. */
+ if (t == FFI_TYPE_STRUCT && ty->alignment >= 16)
+ align = 16;
+
+ if (dir < 0)
+ {
+ /* ??? These reverse argument ABIs are probably too old
+ to have cared about alignment. Someone should check. */
+ argp -= za;
+ memcpy (argp, valp, z);
+ }
+ else
+ {
+ argp = (char *)ALIGN (argp, align);
+ memcpy (argp, valp, z);
+ argp += za;
+ }
+ }
+ }
+ FFI_ASSERT (dir > 0 || argp == stack);
+
+ ffi_call_i386 (frame, stack);
+}
+
+void
+ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, NULL);
+}
+
+void
+ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, closure);
+}
+
+/** private members **/
+
+void FFI_HIDDEN ffi_closure_i386(void);
+void FFI_HIDDEN ffi_closure_STDCALL(void);
+void FFI_HIDDEN ffi_closure_REGISTER(void);
+
+struct closure_frame
+{
+ unsigned rettemp[4]; /* 0 */
+ unsigned regs[3]; /* 16-24 */
+ ffi_cif *cif; /* 28 */
+ void (*fun)(ffi_cif*,void*,void**,void*); /* 32 */
+ void *user_data; /* 36 */
+};
+
+int FFI_HIDDEN
+#if HAVE_FASTCALL
+__declspec(fastcall)
+#endif
+ffi_closure_inner (struct closure_frame *frame, char *stack)
+{
+ ffi_cif *cif = frame->cif;
+ int cabi, i, n, flags, dir, narg_reg;
+ const struct abi_params *pabi;
+ ffi_type **arg_types;
+ char *argp;
+ void *rvalue;
+ void **avalue;
+
+ cabi = cif->abi;
+ flags = cif->flags;
+ narg_reg = 0;
+ rvalue = frame->rettemp;
+ pabi = &abi_params[cabi];
+ dir = pabi->dir;
+ argp = (dir < 0 ? stack + cif->bytes : stack);
+
+ switch (flags)
+ {
+ case X86_RET_STRUCTARG:
+ if (pabi->nregs > 0)
+ {
+ rvalue = (void *)frame->regs[pabi->regs[0]];
+ narg_reg = 1;
+ frame->rettemp[0] = (unsigned)rvalue;
+ break;
+ }
+ /* fallthru */
+ case X86_RET_STRUCTPOP:
+ rvalue = *(void **)argp;
+ argp += sizeof(void *);
+ frame->rettemp[0] = (unsigned)rvalue;
+ break;
+ }
+
+ n = cif->nargs;
+ avalue = alloca(sizeof(void *) * n);
+
+ arg_types = cif->arg_types;
+ for (i = 0; i < n; ++i)
+ {
+ ffi_type *ty = arg_types[i];
+ size_t z = ty->size;
+ int t = ty->type;
+ void *valp;
+
+ if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT)
+ {
+ if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs)
+ valp = &frame->regs[pabi->regs[narg_reg++]];
+ else if (dir < 0)
+ {
+ argp -= 4;
+ valp = argp;
+ }
+ else
+ {
+ valp = argp;
+ argp += 4;
+ }
+ }
+ else
+ {
+ size_t za = ALIGN (z, FFI_SIZEOF_ARG);
+ size_t align = FFI_SIZEOF_ARG;
+
+ /* See the comment in ffi_call_int. */
+ if (t == FFI_TYPE_STRUCT && ty->alignment >= 16)
+ align = 16;
+
+ if (dir < 0)
+ {
+ /* ??? These reverse argument ABIs are probably too old
+ to have cared about alignment. Someone should check. */
+ argp -= za;
+ valp = argp;
+ }
+ else
+ {
+ argp = (char *)ALIGN (argp, align);
+ valp = argp;
+ argp += za;
+ }
+ }
+
+ avalue[i] = valp;
+ }
+
+ frame->fun (cif, rvalue, avalue, frame->user_data);
+
+ if (cabi == FFI_STDCALL)
+ return flags + (cif->bytes << X86_RET_POP_SHIFT);
+ else
+ return flags;
+}
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void *codeloc)
+{
+ char *tramp = closure->tramp;
+ void (*dest)(void);
+ int op = 0xb8; /* movl imm, %eax */
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ case FFI_THISCALL:
+ case FFI_FASTCALL:
+ case FFI_MS_CDECL:
+ dest = ffi_closure_i386;
+ break;
+ case FFI_STDCALL:
+ case FFI_PASCAL:
+ dest = ffi_closure_STDCALL;
+ break;
+ case FFI_REGISTER:
+ dest = ffi_closure_REGISTER;
+ op = 0x68; /* pushl imm */
+ default:
+ return FFI_BAD_ABI;
+ }
+
+ /* movl or pushl immediate. */
+ tramp[0] = op;
+ *(void **)(tramp + 1) = codeloc;
+
+ /* jmp dest */
+ tramp[5] = 0xe9;
+ *(unsigned *)(tramp + 6) = (unsigned)dest - ((unsigned)codeloc + 10);
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ return FFI_OK;
+}
+
+void FFI_HIDDEN ffi_go_closure_EAX(void);
+void FFI_HIDDEN ffi_go_closure_ECX(void);
+void FFI_HIDDEN ffi_go_closure_STDCALL(void);
+
+ffi_status
+ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*))
+{
+ void (*dest)(void);
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ case FFI_MS_CDECL:
+ dest = ffi_go_closure_ECX;
+ break;
+ case FFI_THISCALL:
+ case FFI_FASTCALL:
+ dest = ffi_go_closure_EAX;
+ break;
+ case FFI_STDCALL:
+ case FFI_PASCAL:
+ dest = ffi_go_closure_STDCALL;
+ break;
+ case FFI_REGISTER:
+ default:
+ return FFI_BAD_ABI;
+ }
+
+ closure->tramp = dest;
+ closure->cif = cif;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+/* ------- Native raw API support -------------------------------- */
+
+#if !FFI_NO_RAW_API
+
+void FFI_HIDDEN ffi_closure_raw_SYSV(void);
+void FFI_HIDDEN ffi_closure_raw_THISCALL(void);
+
+ffi_status
+ffi_prep_raw_closure_loc (ffi_raw_closure *closure,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data,
+ void *codeloc)
+{
+ char *tramp = closure->tramp;
+ void (*dest)(void);
+ int i;
+
+ /* We currently don't support certain kinds of arguments for raw
+ closures. This should be implemented by a separate assembly
+ language routine, since it would require argument processing,
+ something we don't do now for performance. */
+ for (i = cif->nargs-1; i >= 0; i--)
+ switch (cif->arg_types[i]->type)
+ {
+ case FFI_TYPE_STRUCT:
+ case FFI_TYPE_LONGDOUBLE:
+ return FFI_BAD_TYPEDEF;
+ }
+
+ switch (cif->abi)
+ {
+ case FFI_THISCALL:
+ dest = ffi_closure_raw_THISCALL;
+ break;
+ case FFI_SYSV:
+ dest = ffi_closure_raw_SYSV;
+ break;
+ default:
+ return FFI_BAD_ABI;
+ }
+
+ /* movl imm, %eax. */
+ tramp[0] = 0xb8;
+ *(void **)(tramp + 1) = codeloc;
+
+ /* jmp dest */
+ tramp[5] = 0xe9;
+ *(unsigned *)(tramp + 6) = (unsigned)dest - ((unsigned)codeloc + 10);
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ return FFI_OK;
+}
+
+void
+ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue)
+{
+ size_t rsize, bytes;
+ struct call_frame *frame;
+ char *stack, *argp;
+ ffi_type **arg_types;
+ int flags, cabi, i, n, narg_reg;
+ const struct abi_params *pabi;
+
+ flags = cif->flags;
+ cabi = cif->abi;
+ pabi = &abi_params[cabi];
+
+ rsize = 0;
+ if (rvalue == NULL)
+ {
+ switch (flags)
+ {
+ case X86_RET_FLOAT:
+ case X86_RET_DOUBLE:
+ case X86_RET_LDOUBLE:
+ case X86_RET_STRUCTPOP:
+ case X86_RET_STRUCTARG:
+ /* The float cases need to pop the 387 stack.
+ The struct cases need to pass a valid pointer to the callee. */
+ rsize = cif->rtype->size;
+ break;
+ default:
+ /* We can pretend that the callee returns nothing. */
+ flags = X86_RET_VOID;
+ break;
+ }
+ }
+
+ bytes = cif->bytes;
+ argp = stack = alloca(bytes + sizeof(*frame) + rsize);
+ frame = (struct call_frame *)(stack + bytes);
+ if (rsize)
+ rvalue = frame + 1;
+
+ frame->fn = fn;
+ frame->flags = flags;
+ frame->rvalue = rvalue;
+
+ narg_reg = 0;
+ switch (flags)
+ {
+ case X86_RET_STRUCTARG:
+ /* The pointer is passed as the first argument. */
+ if (pabi->nregs > 0)
+ {
+ frame->regs[pabi->regs[0]] = (unsigned)rvalue;
+ narg_reg = 1;
+ break;
+ }
+ /* fallthru */
+ case X86_RET_STRUCTPOP:
+ *(void **)argp = rvalue;
+ argp += sizeof(void *);
+ bytes -= sizeof(void *);
+ break;
+ }
+
+ arg_types = cif->arg_types;
+ for (i = 0, n = cif->nargs; narg_reg < pabi->nregs && i < n; i++)
+ {
+ ffi_type *ty = arg_types[i];
+ size_t z = ty->size;
+ int t = ty->type;
+
+ if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT && t != FFI_TYPE_FLOAT)
+ {
+ ffi_arg val = extend_basic_type (avalue, t);
+ frame->regs[pabi->regs[narg_reg++]] = val;
+ z = FFI_SIZEOF_ARG;
+ }
+ else
+ {
+ memcpy (argp, avalue, z);
+ z = ALIGN (z, FFI_SIZEOF_ARG);
+ argp += z;
+ }
+ avalue += z;
+ bytes -= z;
+ }
+ if (i < n)
+ memcpy (argp, avalue, bytes);
+
+ ffi_call_i386 (frame, stack);
+}
+#endif /* !FFI_NO_RAW_API */
+#endif /* !__x86_64__ */
diff -Nru orig/Modules/_ctypes/libffi_ios/x86/ffi64.c modified/Modules/_ctypes/libffi_ios/x86/ffi64.c
--- orig/Modules/_ctypes/libffi_ios/x86/ffi64.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/x86/ffi64.c 2015-03-12 21:33:31.000000000 +0800
@@ -0,0 +1,824 @@
+/* -----------------------------------------------------------------------
+ ffi64.c - Copyright (c) 2013 The Written Word, Inc.
+ Copyright (c) 2011 Anthony Green
+ Copyright (c) 2008, 2010 Red Hat, Inc.
+ Copyright (c) 2002, 2007 Bo Thorsen
+
+ x86-64 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include
+#include
+
+#include
+#include
+#include
+#include "internal64.h"
+
+#ifdef __x86_64__
+
+#define MAX_GPR_REGS 6
+#define MAX_SSE_REGS 8
+
+#if defined(__INTEL_COMPILER)
+#include "xmmintrin.h"
+#define UINT128 __m128
+#else
+#if defined(__SUNPRO_C)
+#include
+#define UINT128 __m128i
+#else
+#define UINT128 __int128_t
+#endif
+#endif
+
+union big_int_union
+{
+ UINT32 i32;
+ UINT64 i64;
+ UINT128 i128;
+};
+
+struct register_args
+{
+ /* Registers for argument passing. */
+ UINT64 gpr[MAX_GPR_REGS];
+ union big_int_union sse[MAX_SSE_REGS];
+ UINT64 rax; /* ssecount */
+ UINT64 r10; /* static chain */
+};
+
+extern void ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags,
+ void *raddr, void (*fnaddr)(void)) FFI_HIDDEN;
+
+/* All reference to register classes here is identical to the code in
+ gcc/config/i386/i386.c. Do *not* change one without the other. */
+
+/* Register class used for passing given 64bit part of the argument.
+ These represent classes as documented by the PS ABI, with the
+ exception of SSESF, SSEDF classes, that are basically SSE class,
+ just gcc will use SF or DFmode move instead of DImode to avoid
+ reformatting penalties.
+
+ Similary we play games with INTEGERSI_CLASS to use cheaper SImode moves
+ whenever possible (upper half does contain padding). */
+enum x86_64_reg_class
+ {
+ X86_64_NO_CLASS,
+ X86_64_INTEGER_CLASS,
+ X86_64_INTEGERSI_CLASS,
+ X86_64_SSE_CLASS,
+ X86_64_SSESF_CLASS,
+ X86_64_SSEDF_CLASS,
+ X86_64_SSEUP_CLASS,
+ X86_64_X87_CLASS,
+ X86_64_X87UP_CLASS,
+ X86_64_COMPLEX_X87_CLASS,
+ X86_64_MEMORY_CLASS
+ };
+
+#define MAX_CLASSES 4
+
+#define SSE_CLASS_P(X) ((X) >= X86_64_SSE_CLASS && X <= X86_64_SSEUP_CLASS)
+
+/* x86-64 register passing implementation. See x86-64 ABI for details. Goal
+ of this code is to classify each 8bytes of incoming argument by the register
+ class and assign registers accordingly. */
+
+/* Return the union class of CLASS1 and CLASS2.
+ See the x86-64 PS ABI for details. */
+
+static enum x86_64_reg_class
+merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
+{
+ /* Rule #1: If both classes are equal, this is the resulting class. */
+ if (class1 == class2)
+ return class1;
+
+ /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
+ the other class. */
+ if (class1 == X86_64_NO_CLASS)
+ return class2;
+ if (class2 == X86_64_NO_CLASS)
+ return class1;
+
+ /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
+ if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
+ return X86_64_MEMORY_CLASS;
+
+ /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
+ if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
+ || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
+ return X86_64_INTEGERSI_CLASS;
+ if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
+ || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
+ return X86_64_INTEGER_CLASS;
+
+ /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
+ MEMORY is used. */
+ if (class1 == X86_64_X87_CLASS
+ || class1 == X86_64_X87UP_CLASS
+ || class1 == X86_64_COMPLEX_X87_CLASS
+ || class2 == X86_64_X87_CLASS
+ || class2 == X86_64_X87UP_CLASS
+ || class2 == X86_64_COMPLEX_X87_CLASS)
+ return X86_64_MEMORY_CLASS;
+
+ /* Rule #6: Otherwise class SSE is used. */
+ return X86_64_SSE_CLASS;
+}
+
+/* Classify the argument of type TYPE and mode MODE.
+ CLASSES will be filled by the register class used to pass each word
+ of the operand. The number of words is returned. In case the parameter
+ should be passed in memory, 0 is returned. As a special case for zero
+ sized containers, classes[0] will be NO_CLASS and 1 is returned.
+
+ See the x86-64 PS ABI for details.
+*/
+static size_t
+classify_argument (ffi_type *type, enum x86_64_reg_class classes[],
+ size_t byte_offset)
+{
+ switch (type->type)
+ {
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_POINTER:
+ do_integer:
+ {
+ size_t size = byte_offset + type->size;
+
+ if (size <= 4)
+ {
+ classes[0] = X86_64_INTEGERSI_CLASS;
+ return 1;
+ }
+ else if (size <= 8)
+ {
+ classes[0] = X86_64_INTEGER_CLASS;
+ return 1;
+ }
+ else if (size <= 12)
+ {
+ classes[0] = X86_64_INTEGER_CLASS;
+ classes[1] = X86_64_INTEGERSI_CLASS;
+ return 2;
+ }
+ else if (size <= 16)
+ {
+ classes[0] = classes[1] = X86_64_INTEGER_CLASS;
+ return 2;
+ }
+ else
+ FFI_ASSERT (0);
+ }
+ case FFI_TYPE_FLOAT:
+ if (!(byte_offset % 8))
+ classes[0] = X86_64_SSESF_CLASS;
+ else
+ classes[0] = X86_64_SSE_CLASS;
+ return 1;
+ case FFI_TYPE_DOUBLE:
+ classes[0] = X86_64_SSEDF_CLASS;
+ return 1;
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ case FFI_TYPE_LONGDOUBLE:
+ classes[0] = X86_64_X87_CLASS;
+ classes[1] = X86_64_X87UP_CLASS;
+ return 2;
+#endif
+ case FFI_TYPE_STRUCT:
+ {
+ const size_t UNITS_PER_WORD = 8;
+ size_t words = (type->size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ ffi_type **ptr;
+ int i;
+ enum x86_64_reg_class subclasses[MAX_CLASSES];
+
+ /* If the struct is larger than 32 bytes, pass it on the stack. */
+ if (type->size > 32)
+ return 0;
+
+ for (i = 0; i < words; i++)
+ classes[i] = X86_64_NO_CLASS;
+
+ /* Zero sized arrays or structures are NO_CLASS. We return 0 to
+ signalize memory class, so handle it as special case. */
+ if (!words)
+ {
+ case FFI_TYPE_VOID:
+ classes[0] = X86_64_NO_CLASS;
+ return 1;
+ }
+
+ /* Merge the fields of structure. */
+ for (ptr = type->elements; *ptr != NULL; ptr++)
+ {
+ size_t num;
+
+ byte_offset = ALIGN (byte_offset, (*ptr)->alignment);
+
+ num = classify_argument (*ptr, subclasses, byte_offset % 8);
+ if (num == 0)
+ return 0;
+ for (i = 0; i < num; i++)
+ {
+ size_t pos = byte_offset / 8;
+ classes[i + pos] =
+ merge_classes (subclasses[i], classes[i + pos]);
+ }
+
+ byte_offset += (*ptr)->size;
+ }
+
+ if (words > 2)
+ {
+ /* When size > 16 bytes, if the first one isn't
+ X86_64_SSE_CLASS or any other ones aren't
+ X86_64_SSEUP_CLASS, everything should be passed in
+ memory. */
+ if (classes[0] != X86_64_SSE_CLASS)
+ return 0;
+
+ for (i = 1; i < words; i++)
+ if (classes[i] != X86_64_SSEUP_CLASS)
+ return 0;
+ }
+
+ /* Final merger cleanup. */
+ for (i = 0; i < words; i++)
+ {
+ /* If one class is MEMORY, everything should be passed in
+ memory. */
+ if (classes[i] == X86_64_MEMORY_CLASS)
+ return 0;
+
+ /* The X86_64_SSEUP_CLASS should be always preceded by
+ X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
+ if (classes[i] == X86_64_SSEUP_CLASS
+ && classes[i - 1] != X86_64_SSE_CLASS
+ && classes[i - 1] != X86_64_SSEUP_CLASS)
+ {
+ /* The first one should never be X86_64_SSEUP_CLASS. */
+ FFI_ASSERT (i != 0);
+ classes[i] = X86_64_SSE_CLASS;
+ }
+
+ /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
+ everything should be passed in memory. */
+ if (classes[i] == X86_64_X87UP_CLASS
+ && (classes[i - 1] != X86_64_X87_CLASS))
+ {
+ /* The first one should never be X86_64_X87UP_CLASS. */
+ FFI_ASSERT (i != 0);
+ return 0;
+ }
+ }
+ return words;
+ }
+ case FFI_TYPE_COMPLEX:
+ {
+ ffi_type *inner = type->elements[0];
+ switch (inner->type)
+ {
+ case FFI_TYPE_INT:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ goto do_integer;
+
+ case FFI_TYPE_FLOAT:
+ classes[0] = X86_64_SSE_CLASS;
+ if (byte_offset % 8)
+ {
+ classes[1] = X86_64_SSESF_CLASS;
+ return 2;
+ }
+ return 1;
+ case FFI_TYPE_DOUBLE:
+ classes[0] = classes[1] = X86_64_SSEDF_CLASS;
+ return 2;
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ case FFI_TYPE_LONGDOUBLE:
+ classes[0] = X86_64_COMPLEX_X87_CLASS;
+ return 1;
+#endif
+ }
+ }
+ }
+ abort();
+}
+
+/* Examine the argument and return set number of register required in each
+ class. Return zero iff parameter should be passed in memory, otherwise
+ the number of registers. */
+
+static size_t
+examine_argument (ffi_type *type, enum x86_64_reg_class classes[MAX_CLASSES],
+ _Bool in_return, int *pngpr, int *pnsse)
+{
+ size_t n;
+ int i, ngpr, nsse;
+
+ n = classify_argument (type, classes, 0);
+ if (n == 0)
+ return 0;
+
+ ngpr = nsse = 0;
+ for (i = 0; i < n; ++i)
+ switch (classes[i])
+ {
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ ngpr++;
+ break;
+ case X86_64_SSE_CLASS:
+ case X86_64_SSESF_CLASS:
+ case X86_64_SSEDF_CLASS:
+ nsse++;
+ break;
+ case X86_64_NO_CLASS:
+ case X86_64_SSEUP_CLASS:
+ break;
+ case X86_64_X87_CLASS:
+ case X86_64_X87UP_CLASS:
+ case X86_64_COMPLEX_X87_CLASS:
+ return in_return != 0;
+ default:
+ abort ();
+ }
+
+ *pngpr = ngpr;
+ *pnsse = nsse;
+
+ return n;
+}
+
+/* Perform machine dependent cif processing. */
+
+ffi_status
+ffi_prep_cif_machdep (ffi_cif *cif)
+{
+ int gprcount, ssecount, i, avn, ngpr, nsse, flags;
+ enum x86_64_reg_class classes[MAX_CLASSES];
+ size_t bytes, n, rtype_size;
+ ffi_type *rtype;
+
+ if (cif->abi != FFI_UNIX64)
+ return FFI_BAD_ABI;
+
+ gprcount = ssecount = 0;
+
+ rtype = cif->rtype;
+ rtype_size = rtype->size;
+ switch (rtype->type)
+ {
+ case FFI_TYPE_VOID:
+ flags = UNIX64_RET_VOID;
+ break;
+ case FFI_TYPE_UINT8:
+ flags = UNIX64_RET_UINT8;
+ break;
+ case FFI_TYPE_SINT8:
+ flags = UNIX64_RET_SINT8;
+ break;
+ case FFI_TYPE_UINT16:
+ flags = UNIX64_RET_UINT16;
+ break;
+ case FFI_TYPE_SINT16:
+ flags = UNIX64_RET_SINT16;
+ break;
+ case FFI_TYPE_UINT32:
+ flags = UNIX64_RET_UINT32;
+ break;
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ flags = UNIX64_RET_SINT32;
+ break;
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ flags = UNIX64_RET_INT64;
+ break;
+ case FFI_TYPE_POINTER:
+ flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64);
+ break;
+ case FFI_TYPE_FLOAT:
+ flags = UNIX64_RET_XMM32;
+ break;
+ case FFI_TYPE_DOUBLE:
+ flags = UNIX64_RET_XMM64;
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ flags = UNIX64_RET_X87;
+ break;
+ case FFI_TYPE_STRUCT:
+ n = examine_argument (cif->rtype, classes, 1, &ngpr, &nsse);
+ if (n == 0)
+ {
+ /* The return value is passed in memory. A pointer to that
+ memory is the first argument. Allocate a register for it. */
+ gprcount++;
+ /* We don't have to do anything in asm for the return. */
+ flags = UNIX64_RET_VOID | UNIX64_FLAG_RET_IN_MEM;
+ }
+ else
+ {
+ _Bool sse0 = SSE_CLASS_P (classes[0]);
+
+ if (rtype_size == 4 && sse0)
+ flags = UNIX64_RET_XMM32;
+ else if (rtype_size == 8)
+ flags = sse0 ? UNIX64_RET_XMM64 : UNIX64_RET_INT64;
+ else
+ {
+ _Bool sse1 = n == 2 && SSE_CLASS_P (classes[1]);
+ if (sse0 && sse1)
+ flags = UNIX64_RET_ST_XMM0_XMM1;
+ else if (sse0)
+ flags = UNIX64_RET_ST_XMM0_RAX;
+ else if (sse1)
+ flags = UNIX64_RET_ST_RAX_XMM0;
+ else
+ flags = UNIX64_RET_ST_RAX_RDX;
+ flags |= rtype_size << UNIX64_SIZE_SHIFT;
+ }
+ }
+ break;
+ case FFI_TYPE_COMPLEX:
+ switch (rtype->elements[0]->type)
+ {
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ flags = UNIX64_RET_ST_RAX_RDX | (rtype_size << UNIX64_SIZE_SHIFT);
+ break;
+ case FFI_TYPE_FLOAT:
+ flags = UNIX64_RET_XMM64;
+ break;
+ case FFI_TYPE_DOUBLE:
+ flags = UNIX64_RET_ST_XMM0_XMM1 | (16 << UNIX64_SIZE_SHIFT);
+ break;
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ case FFI_TYPE_LONGDOUBLE:
+ flags = UNIX64_RET_X87_2;
+ break;
+#endif
+ default:
+ return FFI_BAD_TYPEDEF;
+ }
+ break;
+ default:
+ return FFI_BAD_TYPEDEF;
+ }
+
+ /* Go over all arguments and determine the way they should be passed.
+ If it's in a register and there is space for it, let that be so. If
+ not, add it's size to the stack byte count. */
+ for (bytes = 0, i = 0, avn = cif->nargs; i < avn; i++)
+ {
+ if (examine_argument (cif->arg_types[i], classes, 0, &ngpr, &nsse) == 0
+ || gprcount + ngpr > MAX_GPR_REGS
+ || ssecount + nsse > MAX_SSE_REGS)
+ {
+ long align = cif->arg_types[i]->alignment;
+
+ if (align < 8)
+ align = 8;
+
+ bytes = ALIGN (bytes, align);
+ bytes += cif->arg_types[i]->size;
+ }
+ else
+ {
+ gprcount += ngpr;
+ ssecount += nsse;
+ }
+ }
+ if (ssecount)
+ flags |= UNIX64_FLAG_XMM_ARGS;
+
+ cif->flags = flags;
+ cif->bytes = ALIGN (bytes, 8);
+
+ return FFI_OK;
+}
+
+static void
+ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure)
+{
+ enum x86_64_reg_class classes[MAX_CLASSES];
+ char *stack, *argp;
+ ffi_type **arg_types;
+ int gprcount, ssecount, ngpr, nsse, i, avn, flags;
+ struct register_args *reg_args;
+
+ /* Can't call 32-bit mode from 64-bit mode. */
+ FFI_ASSERT (cif->abi == FFI_UNIX64);
+
+ /* If the return value is a struct and we don't have a return value
+ address then we need to make one. Otherwise we can ignore it. */
+ flags = cif->flags;
+ if (rvalue == NULL)
+ {
+ if (flags & UNIX64_FLAG_RET_IN_MEM)
+ rvalue = alloca (cif->rtype->size);
+ else
+ flags = UNIX64_RET_VOID;
+ }
+
+ /* Allocate the space for the arguments, plus 4 words of temp space. */
+ stack = alloca (sizeof (struct register_args) + cif->bytes + 4*8);
+ reg_args = (struct register_args *) stack;
+ argp = stack + sizeof (struct register_args);
+
+ reg_args->r10 = (uintptr_t) closure;
+
+ gprcount = ssecount = 0;
+
+ /* If the return value is passed in memory, add the pointer as the
+ first integer argument. */
+ if (flags & UNIX64_FLAG_RET_IN_MEM)
+ reg_args->gpr[gprcount++] = (unsigned long) rvalue;
+
+ avn = cif->nargs;
+ arg_types = cif->arg_types;
+
+ for (i = 0; i < avn; ++i)
+ {
+ size_t n, size = arg_types[i]->size;
+
+ n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse);
+ if (n == 0
+ || gprcount + ngpr > MAX_GPR_REGS
+ || ssecount + nsse > MAX_SSE_REGS)
+ {
+ long align = arg_types[i]->alignment;
+
+ /* Stack arguments are *always* at least 8 byte aligned. */
+ if (align < 8)
+ align = 8;
+
+ /* Pass this argument in memory. */
+ argp = (void *) ALIGN (argp, align);
+ memcpy (argp, avalue[i], size);
+ argp += size;
+ }
+ else
+ {
+ /* The argument is passed entirely in registers. */
+ char *a = (char *) avalue[i];
+ int j;
+
+ for (j = 0; j < n; j++, a += 8, size -= 8)
+ {
+ switch (classes[j])
+ {
+ case X86_64_NO_CLASS:
+ case X86_64_SSEUP_CLASS:
+ break;
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ /* Sign-extend integer arguments passed in general
+ purpose registers, to cope with the fact that
+ LLVM incorrectly assumes that this will be done
+ (the x86-64 PS ABI does not specify this). */
+ switch (arg_types[i]->type)
+ {
+ case FFI_TYPE_SINT8:
+ reg_args->gpr[gprcount] = (SINT64) *((SINT8 *) a);
+ break;
+ case FFI_TYPE_SINT16:
+ reg_args->gpr[gprcount] = (SINT64) *((SINT16 *) a);
+ break;
+ case FFI_TYPE_SINT32:
+ reg_args->gpr[gprcount] = (SINT64) *((SINT32 *) a);
+ break;
+ default:
+ reg_args->gpr[gprcount] = 0;
+ memcpy (®_args->gpr[gprcount], a, size);
+ }
+ gprcount++;
+ break;
+ case X86_64_SSE_CLASS:
+ case X86_64_SSEDF_CLASS:
+ reg_args->sse[ssecount++].i64 = *(UINT64 *) a;
+ break;
+ case X86_64_SSESF_CLASS:
+ reg_args->sse[ssecount++].i32 = *(UINT32 *) a;
+ break;
+ default:
+ abort();
+ }
+ }
+ }
+ }
+ reg_args->rax = ssecount;
+
+ ffi_call_unix64 (stack, cif->bytes + sizeof (struct register_args),
+ flags, rvalue, fn);
+}
+
+void
+ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, NULL);
+}
+
+void
+ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, closure);
+}
+
+extern void ffi_closure_unix64(void) FFI_HIDDEN;
+extern void ffi_closure_unix64_sse(void) FFI_HIDDEN;
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*, void*, void**, void*),
+ void *user_data,
+ void *codeloc)
+{
+ static const unsigned char trampoline[16] = {
+ /* leaq -0x7(%rip),%r10 # 0x0 */
+ 0x4c, 0x8d, 0x15, 0xf9, 0xff, 0xff, 0xff,
+ /* jmpq *0x3(%rip) # 0x10 */
+ 0xff, 0x25, 0x03, 0x00, 0x00, 0x00,
+ /* nopl (%rax) */
+ 0x0f, 0x1f, 0x00
+ };
+ void (*dest)(void);
+ char *tramp = closure->tramp;
+
+ if (cif->abi != FFI_UNIX64)
+ return FFI_BAD_ABI;
+
+ if (cif->flags & UNIX64_FLAG_XMM_ARGS)
+ dest = ffi_closure_unix64_sse;
+ else
+ dest = ffi_closure_unix64;
+
+ memcpy (tramp, trampoline, sizeof(trampoline));
+ *(UINT64 *)(tramp + 16) = (uintptr_t)dest;
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ return FFI_OK;
+}
+
+int FFI_HIDDEN
+ffi_closure_unix64_inner(ffi_cif *cif,
+ void (*fun)(ffi_cif*, void*, void**, void*),
+ void *user_data,
+ void *rvalue,
+ struct register_args *reg_args,
+ char *argp)
+{
+ void **avalue;
+ ffi_type **arg_types;
+ long i, avn;
+ int gprcount, ssecount, ngpr, nsse;
+ int flags;
+
+ avn = cif->nargs;
+ flags = cif->flags;
+ avalue = alloca(avn * sizeof(void *));
+ gprcount = ssecount = 0;
+
+ if (flags & UNIX64_FLAG_RET_IN_MEM)
+ {
+ /* On return, %rax will contain the address that was passed
+ by the caller in %rdi. */
+ void *r = (void *)(uintptr_t)reg_args->gpr[gprcount++];
+ *(void **)rvalue = r;
+ rvalue = r;
+ flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64);
+ }
+
+ arg_types = cif->arg_types;
+ for (i = 0; i < avn; ++i)
+ {
+ enum x86_64_reg_class classes[MAX_CLASSES];
+ size_t n;
+
+ n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse);
+ if (n == 0
+ || gprcount + ngpr > MAX_GPR_REGS
+ || ssecount + nsse > MAX_SSE_REGS)
+ {
+ long align = arg_types[i]->alignment;
+
+ /* Stack arguments are *always* at least 8 byte aligned. */
+ if (align < 8)
+ align = 8;
+
+ /* Pass this argument in memory. */
+ argp = (void *) ALIGN (argp, align);
+ avalue[i] = argp;
+ argp += arg_types[i]->size;
+ }
+ /* If the argument is in a single register, or two consecutive
+ integer registers, then we can use that address directly. */
+ else if (n == 1
+ || (n == 2 && !(SSE_CLASS_P (classes[0])
+ || SSE_CLASS_P (classes[1]))))
+ {
+ /* The argument is in a single register. */
+ if (SSE_CLASS_P (classes[0]))
+ {
+ avalue[i] = ®_args->sse[ssecount];
+ ssecount += n;
+ }
+ else
+ {
+ avalue[i] = ®_args->gpr[gprcount];
+ gprcount += n;
+ }
+ }
+ /* Otherwise, allocate space to make them consecutive. */
+ else
+ {
+ char *a = alloca (16);
+ int j;
+
+ avalue[i] = a;
+ for (j = 0; j < n; j++, a += 8)
+ {
+ if (SSE_CLASS_P (classes[j]))
+ memcpy (a, ®_args->sse[ssecount++], 8);
+ else
+ memcpy (a, ®_args->gpr[gprcount++], 8);
+ }
+ }
+ }
+
+ /* Invoke the closure. */
+ fun (cif, rvalue, avalue, user_data);
+
+ /* Tell assembly how to perform return type promotions. */
+ return flags;
+}
+
+extern void ffi_go_closure_unix64(void) FFI_HIDDEN;
+extern void ffi_go_closure_unix64_sse(void) FFI_HIDDEN;
+
+ffi_status
+ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif,
+ void (*fun)(ffi_cif*, void*, void**, void*))
+{
+ if (cif->abi != FFI_UNIX64)
+ return FFI_BAD_ABI;
+
+ closure->tramp = (cif->flags & UNIX64_FLAG_XMM_ARGS
+ ? ffi_go_closure_unix64_sse
+ : ffi_go_closure_unix64);
+ closure->cif = cif;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+#endif /* __x86_64__ */
diff -Nru orig/Modules/_ctypes/libffi_ios/x86/ffitarget.h modified/Modules/_ctypes/libffi_ios/x86/ffitarget.h
--- orig/Modules/_ctypes/libffi_ios/x86/ffitarget.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/x86/ffitarget.h 2015-03-12 21:33:31.000000000 +0800
@@ -0,0 +1,139 @@
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 2012, 2014 Anthony Green
+ Copyright (c) 1996-2003, 2010 Red Hat, Inc.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+
+ Target configuration macros for x86 and x86-64.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+/* ---- System specific configurations ----------------------------------- */
+
+/* For code common to all platforms on x86 and x86_64. */
+#define X86_ANY
+
+#if defined (X86_64) && defined (__i386__)
+#undef X86_64
+#define X86
+#endif
+
+#ifdef X86_WIN64
+#define FFI_SIZEOF_ARG 8
+#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */
+#endif
+
+#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION
+#ifndef _MSC_VER
+#define FFI_TARGET_HAS_COMPLEX_TYPE
+#endif
+
+/* ---- Generic type definitions ----------------------------------------- */
+
+#ifndef LIBFFI_ASM
+#ifdef X86_WIN64
+#ifdef _MSC_VER
+typedef unsigned __int64 ffi_arg;
+typedef __int64 ffi_sarg;
+#else
+typedef unsigned long long ffi_arg;
+typedef long long ffi_sarg;
+#endif
+#else
+#if defined __x86_64__ && defined __ILP32__
+#define FFI_SIZEOF_ARG 8
+#define FFI_SIZEOF_JAVA_RAW 4
+typedef unsigned long long ffi_arg;
+typedef long long ffi_sarg;
+#else
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+#endif
+#endif
+
+typedef enum ffi_abi {
+#if defined(X86_WIN64)
+ FFI_FIRST_ABI = 0,
+ FFI_WIN64,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_WIN64
+
+#elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN))
+ FFI_FIRST_ABI = 1,
+ FFI_UNIX64,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_UNIX64
+
+#elif defined(X86_WIN32)
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV = 1,
+ FFI_STDCALL = 2,
+ FFI_THISCALL = 3,
+ FFI_FASTCALL = 4,
+ FFI_MS_CDECL = 5,
+ FFI_PASCAL = 6,
+ FFI_REGISTER = 7,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_MS_CDECL
+#else
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV = 1,
+ FFI_THISCALL = 3,
+ FFI_FASTCALL = 4,
+ FFI_STDCALL = 5,
+ FFI_PASCAL = 6,
+ FFI_REGISTER = 7,
+ FFI_MS_CDECL = 8,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+#endif
+} ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_GO_CLOSURES 1
+
+#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1)
+#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2)
+#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3)
+#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4)
+
+#if defined (X86_64) || defined(X86_WIN64) \
+ || (defined (__x86_64__) && defined (X86_DARWIN))
+# define FFI_TRAMPOLINE_SIZE 24
+# define FFI_NATIVE_RAW_API 0
+#else
+# define FFI_TRAMPOLINE_SIZE 12
+# define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */
+#endif
+
+#endif
+
diff -Nru orig/Modules/_ctypes/libffi_ios/x86/ffiw64.c modified/Modules/_ctypes/libffi_ios/x86/ffiw64.c
--- orig/Modules/_ctypes/libffi_ios/x86/ffiw64.c 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/x86/ffiw64.c 2015-03-12 21:33:31.000000000 +0800
@@ -0,0 +1,281 @@
+/* -----------------------------------------------------------------------
+ ffiw64.c - Copyright (c) 2014 Red Hat, Inc.
+
+ x86 win64 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include
+#include
+#include
+#include
+
+#ifdef X86_WIN64
+
+struct win64_call_frame
+{
+ UINT64 rbp; /* 0 */
+ UINT64 retaddr; /* 8 */
+ UINT64 fn; /* 16 */
+ UINT64 flags; /* 24 */
+ UINT64 rvalue; /* 32 */
+};
+
+extern void ffi_call_win64 (void *stack, struct win64_call_frame *,
+ void *closure) FFI_HIDDEN;
+
+ffi_status
+ffi_prep_cif_machdep (ffi_cif *cif)
+{
+ int flags, n;
+
+ if (cif->abi != FFI_WIN64)
+ return FFI_BAD_ABI;
+
+ flags = cif->rtype->type;
+ switch (flags)
+ {
+ default:
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ flags = FFI_TYPE_STRUCT;
+ break;
+ case FFI_TYPE_COMPLEX:
+ flags = FFI_TYPE_STRUCT;
+ /* FALLTHRU */
+ case FFI_TYPE_STRUCT:
+ switch (cif->rtype->size)
+ {
+ case 8:
+ flags = FFI_TYPE_UINT64;
+ break;
+ case 4:
+ flags = FFI_TYPE_SMALL_STRUCT_4B;
+ break;
+ case 2:
+ flags = FFI_TYPE_SMALL_STRUCT_2B;
+ break;
+ case 1:
+ flags = FFI_TYPE_SMALL_STRUCT_1B;
+ break;
+ }
+ break;
+ }
+ cif->flags = flags;
+
+ /* Each argument either fits in a register, an 8 byte slot, or is
+ passed by reference with the pointer in the 8 byte slot. */
+ n = cif->nargs;
+ n += (flags == FFI_TYPE_STRUCT);
+ if (n < 4)
+ n = 4;
+ cif->bytes = n * 8;
+
+ return FFI_OK;
+}
+
+static void
+ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure)
+{
+ int i, j, n, flags;
+ UINT64 *stack;
+ size_t rsize;
+ struct win64_call_frame *frame;
+
+ FFI_ASSERT(cif->abi == FFI_WIN64);
+
+ flags = cif->flags;
+ rsize = 0;
+
+ /* If we have no return value for a structure, we need to create one.
+ Otherwise we can ignore the return type entirely. */
+ if (rvalue == NULL)
+ {
+ if (flags == FFI_TYPE_STRUCT)
+ rsize = cif->rtype->size;
+ else
+ flags = FFI_TYPE_VOID;
+ }
+
+ stack = alloca(cif->bytes + sizeof(struct win64_call_frame) + rsize);
+ frame = (struct win64_call_frame *)((char *)stack + cif->bytes);
+ if (rsize)
+ rvalue = frame + 1;
+
+ frame->fn = (uintptr_t)fn;
+ frame->flags = flags;
+ frame->rvalue = (uintptr_t)rvalue;
+
+ j = 0;
+ if (flags == FFI_TYPE_STRUCT)
+ {
+ stack[0] = (uintptr_t)rvalue;
+ j = 1;
+ }
+
+ for (i = 0, n = cif->nargs; i < n; ++i, ++j)
+ {
+ switch (cif->arg_types[i]->size)
+ {
+ case 8:
+ stack[j] = *(UINT64 *)avalue[i];
+ break;
+ case 4:
+ stack[j] = *(UINT32 *)avalue[i];
+ break;
+ case 2:
+ stack[j] = *(UINT16 *)avalue[i];
+ break;
+ case 1:
+ stack[j] = *(UINT8 *)avalue[i];
+ break;
+ default:
+ stack[j] = (uintptr_t)avalue[i];
+ break;
+ }
+ }
+
+ ffi_call_win64 (stack, frame, closure);
+}
+
+void
+ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, NULL);
+}
+
+void
+ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue,
+ void **avalue, void *closure)
+{
+ ffi_call_int (cif, fn, rvalue, avalue, closure);
+}
+
+
+extern void ffi_closure_win64(void) FFI_HIDDEN;
+extern void ffi_go_closure_win64(void) FFI_HIDDEN;
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*, void*, void**, void*),
+ void *user_data,
+ void *codeloc)
+{
+ static const unsigned char trampoline[16] = {
+ /* leaq -0x7(%rip),%r10 # 0x0 */
+ 0x4c, 0x8d, 0x15, 0xf9, 0xff, 0xff, 0xff,
+ /* jmpq *0x3(%rip) # 0x10 */
+ 0xff, 0x25, 0x03, 0x00, 0x00, 0x00,
+ /* nopl (%rax) */
+ 0x0f, 0x1f, 0x00
+ };
+ unsigned char *tramp = closure->tramp;
+
+ if (cif->abi != FFI_WIN64)
+ return FFI_BAD_ABI;
+
+ memcpy (tramp, trampoline, sizeof(trampoline));
+ *(UINT64 *)(tramp + 16) = (uintptr_t)ffi_closure_win64;
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ return FFI_OK;
+}
+
+ffi_status
+ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif,
+ void (*fun)(ffi_cif*, void*, void**, void*))
+{
+ if (cif->abi != FFI_WIN64)
+ return FFI_BAD_ABI;
+
+ closure->tramp = ffi_go_closure_win64;
+ closure->cif = cif;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+struct win64_closure_frame
+{
+ UINT64 rvalue[2];
+ UINT64 fargs[4];
+ UINT64 retaddr;
+ UINT64 args[];
+};
+
+int FFI_HIDDEN
+ffi_closure_win64_inner(ffi_cif *cif,
+ void (*fun)(ffi_cif*, void*, void**, void*),
+ void *user_data,
+ struct win64_closure_frame *frame)
+{
+ void **avalue;
+ void *rvalue;
+ int i, n, nreg, flags;
+
+ avalue = alloca(cif->nargs * sizeof(void *));
+ rvalue = frame->rvalue;
+ nreg = 0;
+
+ /* When returning a structure, the address is in the first argument.
+ We must also be prepared to return the same address in eax, so
+ install that address in the frame and pretend we return a pointer. */
+ flags = cif->flags;
+ if (flags == FFI_TYPE_STRUCT)
+ {
+ rvalue = (void *)(uintptr_t)frame->args[0];
+ frame->rvalue[0] = frame->args[0];
+ nreg = 1;
+ }
+
+ for (i = 0, n = cif->nargs; i < n; ++i, ++nreg)
+ {
+ size_t size = cif->arg_types[i]->size;
+ size_t type = cif->arg_types[i]->type;
+ void *a;
+
+ if (type == FFI_TYPE_DOUBLE || type == FFI_TYPE_FLOAT)
+ {
+ if (nreg < 4)
+ a = &frame->fargs[nreg];
+ else
+ a = &frame->args[nreg];
+ }
+ else if (size == 1 || size == 2 || size == 4 || size == 8)
+ a = &frame->args[nreg];
+ else
+ a = (void *)(uintptr_t)frame->args[nreg];
+
+ avalue[i] = a;
+ }
+
+ /* Invoke the closure. */
+ fun (cif, rvalue, avalue, user_data);
+ return flags;
+}
+
+#endif /* X86_WIN64 */
diff -Nru orig/Modules/_ctypes/libffi_ios/x86/internal.h modified/Modules/_ctypes/libffi_ios/x86/internal.h
--- orig/Modules/_ctypes/libffi_ios/x86/internal.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/x86/internal.h 2015-03-12 21:33:31.000000000 +0800
@@ -0,0 +1,29 @@
+#define X86_RET_FLOAT 0
+#define X86_RET_DOUBLE 1
+#define X86_RET_LDOUBLE 2
+#define X86_RET_SINT8 3
+#define X86_RET_SINT16 4
+#define X86_RET_UINT8 5
+#define X86_RET_UINT16 6
+#define X86_RET_INT64 7
+#define X86_RET_INT32 8
+#define X86_RET_VOID 9
+#define X86_RET_STRUCTPOP 10
+#define X86_RET_STRUCTARG 11
+#define X86_RET_STRUCT_1B 12
+#define X86_RET_STRUCT_2B 13
+#define X86_RET_UNUSED14 14
+#define X86_RET_UNUSED15 15
+
+#define X86_RET_TYPE_MASK 15
+#define X86_RET_POP_SHIFT 4
+
+#define R_EAX 0
+#define R_EDX 1
+#define R_ECX 2
+
+#ifdef __PCC__
+# define HAVE_FASTCALL 0
+#else
+# define HAVE_FASTCALL 1
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/x86/internal64.h modified/Modules/_ctypes/libffi_ios/x86/internal64.h
--- orig/Modules/_ctypes/libffi_ios/x86/internal64.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/x86/internal64.h 2015-03-12 21:33:31.000000000 +0800
@@ -0,0 +1,22 @@
+#define UNIX64_RET_VOID 0
+#define UNIX64_RET_UINT8 1
+#define UNIX64_RET_UINT16 2
+#define UNIX64_RET_UINT32 3
+#define UNIX64_RET_SINT8 4
+#define UNIX64_RET_SINT16 5
+#define UNIX64_RET_SINT32 6
+#define UNIX64_RET_INT64 7
+#define UNIX64_RET_XMM32 8
+#define UNIX64_RET_XMM64 9
+#define UNIX64_RET_X87 10
+#define UNIX64_RET_X87_2 11
+#define UNIX64_RET_ST_XMM0_RAX 12
+#define UNIX64_RET_ST_RAX_XMM0 13
+#define UNIX64_RET_ST_XMM0_XMM1 14
+#define UNIX64_RET_ST_RAX_RDX 15
+
+#define UNIX64_RET_LAST 15
+
+#define UNIX64_FLAG_RET_IN_MEM (1 << 10)
+#define UNIX64_FLAG_XMM_ARGS (1 << 11)
+#define UNIX64_SIZE_SHIFT 12
diff -Nru orig/Modules/_ctypes/libffi_ios/x86/sysv.S modified/Modules/_ctypes/libffi_ios/x86/sysv.S
--- orig/Modules/_ctypes/libffi_ios/x86/sysv.S 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/x86/sysv.S 2015-03-12 21:33:31.000000000 +0800
@@ -0,0 +1,1040 @@
+/* -----------------------------------------------------------------------
+ sysv.S - Copyright (c) 2013 The Written Word, Inc.
+ - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc.
+
+ X86 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifndef __x86_64__
+
+#define LIBFFI_ASM
+#include
+#include
+#include "internal.h"
+
+#define C2(X, Y) X ## Y
+#define C1(X, Y) C2(X, Y)
+#ifdef __USER_LABEL_PREFIX__
+# define C(X) C1(__USER_LABEL_PREFIX__, X)
+#else
+# define C(X) X
+#endif
+
+#ifdef X86_DARWIN
+# define L(X) C1(L, X)
+#else
+# define L(X) C1(.L, X)
+#endif
+
+#ifdef __ELF__
+# define ENDF(X) .type X,@function; .size X, . - X
+#else
+# define ENDF(X)
+#endif
+
+/* Handle win32 fastcall name mangling. */
+#ifdef X86_WIN32
+# define ffi_call_i386 @ffi_call_i386@8
+# define ffi_closure_inner @ffi_closure_inner@8
+#else
+# define ffi_call_i386 C(ffi_call_i386)
+# define ffi_closure_inner C(ffi_closure_inner)
+#endif
+
+/* This macro allows the safe creation of jump tables without an
+ actual table. The entry points into the table are all 8 bytes.
+ The use of ORG asserts that we're at the correct location. */
+/* ??? The clang assembler doesn't handle .org with symbolic expressions. */
+#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__))
+# define E(BASE, X) .balign 8
+#else
+# define E(BASE, X) .balign 8; .org BASE + X * 8
+#endif
+
+ .text
+ .balign 16
+ .globl ffi_call_i386
+ FFI_HIDDEN(ffi_call_i386)
+
+/* This is declared as
+
+ void ffi_call_i386(struct call_frame *frame, char *argp)
+ __attribute__((fastcall));
+
+ Thus the arguments are present in
+
+ ecx: frame
+ edx: argp
+*/
+
+ffi_call_i386:
+L(UW0):
+ # cfi_startproc
+#if !HAVE_FASTCALL
+ movl 4(%esp), %ecx
+ movl 8(%esp), %edx
+#endif
+ movl (%esp), %eax /* move the return address */
+ movl %ebp, (%ecx) /* store %ebp into local frame */
+ movl %eax, 4(%ecx) /* store retaddr into local frame */
+
+ /* New stack frame based off ebp. This is a itty bit of unwind
+ trickery in that the CFA *has* changed. There is no easy way
+ to describe it correctly on entry to the function. Fortunately,
+ it doesn't matter too much since at all points we can correctly
+ unwind back to ffi_call. Note that the location to which we
+ moved the return address is (the new) CFA-4, so from the
+ perspective of the unwind info, it hasn't moved. */
+ movl %ecx, %ebp
+L(UW1):
+ # cfi_def_cfa(%ebp, 8)
+ # cfi_rel_offset(%ebp, 0)
+
+ movl %edx, %esp /* set outgoing argument stack */
+ movl 20+R_EAX*4(%ebp), %eax /* set register arguments */
+ movl 20+R_EDX*4(%ebp), %edx
+ movl 20+R_ECX*4(%ebp), %ecx
+
+ call *8(%ebp)
+
+ movl 12(%ebp), %ecx /* load return type code */
+ movl %ebx, 8(%ebp) /* preserve %ebx */
+L(UW2):
+ # cfi_rel_offset(%ebx, 8)
+
+ andl $X86_RET_TYPE_MASK, %ecx
+#ifdef __PIC__
+ call C(__x86.get_pc_thunk.bx)
+L(pc1):
+ leal L(store_table)-L(pc1)(%ebx, %ecx, 8), %ebx
+#else
+ leal L(store_table)(,%ecx, 8), %ebx
+#endif
+ movl 16(%ebp), %ecx /* load result address */
+ jmp *%ebx
+
+ .balign 8
+L(store_table):
+E(L(store_table), X86_RET_FLOAT)
+ fstps (%ecx)
+ jmp L(e1)
+E(L(store_table), X86_RET_DOUBLE)
+ fstpl (%ecx)
+ jmp L(e1)
+E(L(store_table), X86_RET_LDOUBLE)
+ fstpt (%ecx)
+ jmp L(e1)
+E(L(store_table), X86_RET_SINT8)
+ movsbl %al, %eax
+ mov %eax, (%ecx)
+ jmp L(e1)
+E(L(store_table), X86_RET_SINT16)
+ movswl %ax, %eax
+ mov %eax, (%ecx)
+ jmp L(e1)
+E(L(store_table), X86_RET_UINT8)
+ movzbl %al, %eax
+ mov %eax, (%ecx)
+ jmp L(e1)
+E(L(store_table), X86_RET_UINT16)
+ movzwl %ax, %eax
+ mov %eax, (%ecx)
+ jmp L(e1)
+E(L(store_table), X86_RET_INT64)
+ movl %edx, 4(%ecx)
+ /* fallthru */
+E(L(store_table), X86_RET_INT32)
+ movl %eax, (%ecx)
+ /* fallthru */
+E(L(store_table), X86_RET_VOID)
+L(e1):
+ movl 8(%ebp), %ebx
+ movl %ebp, %esp
+ popl %ebp
+L(UW3):
+ # cfi_remember_state
+ # cfi_def_cfa(%esp, 4)
+ # cfi_restore(%ebx)
+ # cfi_restore(%ebp)
+ ret
+L(UW4):
+ # cfi_restore_state
+
+E(L(store_table), X86_RET_STRUCTPOP)
+ jmp L(e1)
+E(L(store_table), X86_RET_STRUCTARG)
+ jmp L(e1)
+E(L(store_table), X86_RET_STRUCT_1B)
+ movb %al, (%ecx)
+ jmp L(e1)
+E(L(store_table), X86_RET_STRUCT_2B)
+ movw %ax, (%ecx)
+ jmp L(e1)
+
+ /* Fill out the table so that bad values are predictable. */
+E(L(store_table), X86_RET_UNUSED14)
+ ud2
+E(L(store_table), X86_RET_UNUSED15)
+ ud2
+
+L(UW5):
+ # cfi_endproc
+ENDF(ffi_call_i386)
+
+/* The inner helper is declared as
+
+ void ffi_closure_inner(struct closure_frame *frame, char *argp)
+ __attribute_((fastcall))
+
+ Thus the arguments are placed in
+
+ ecx: frame
+ edx: argp
+*/
+
+/* Macros to help setting up the closure_data structure. */
+
+#if HAVE_FASTCALL
+# define closure_FS (40 + 4)
+# define closure_CF 0
+#else
+# define closure_FS (8 + 40 + 12)
+# define closure_CF 8
+#endif
+
+#define FFI_CLOSURE_SAVE_REGS \
+ movl %eax, closure_CF+16+R_EAX*4(%esp); \
+ movl %edx, closure_CF+16+R_EDX*4(%esp); \
+ movl %ecx, closure_CF+16+R_ECX*4(%esp)
+
+#define FFI_CLOSURE_COPY_TRAMP_DATA \
+ movl FFI_TRAMPOLINE_SIZE(%eax), %edx; /* copy cif */ \
+ movl FFI_TRAMPOLINE_SIZE+4(%eax), %ecx; /* copy fun */ \
+ movl FFI_TRAMPOLINE_SIZE+8(%eax), %eax; /* copy user_data */ \
+ movl %edx, closure_CF+28(%esp); \
+ movl %ecx, closure_CF+32(%esp); \
+ movl %eax, closure_CF+36(%esp)
+
+#if HAVE_FASTCALL
+# define FFI_CLOSURE_PREP_CALL \
+ movl %esp, %ecx; /* load closure_data */ \
+ leal closure_FS+4(%esp), %edx; /* load incoming stack */
+#else
+# define FFI_CLOSURE_PREP_CALL \
+ leal closure_CF(%esp), %ecx; /* load closure_data */ \
+ leal closure_FS+4(%esp), %edx; /* load incoming stack */ \
+ movl %ecx, (%esp); \
+ movl %edx, 4(%esp)
+#endif
+
+#define FFI_CLOSURE_CALL_INNER(UWN) \
+ call ffi_closure_inner
+
+#define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \
+ andl $X86_RET_TYPE_MASK, %eax; \
+ leal L(C1(load_table,N))(, %eax, 8), %edx; \
+ movl closure_CF(%esp), %eax; /* optimiztic load */ \
+ jmp *%edx
+
+#ifdef __PIC__
+# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
+# undef FFI_CLOSURE_MASK_AND_JUMP
+# define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \
+ andl $X86_RET_TYPE_MASK, %eax; \
+ call C(__x86.get_pc_thunk.dx); \
+L(C1(pc,N)): \
+ leal L(C1(load_table,N))-L(C1(pc,N))(%edx, %eax, 8), %edx; \
+ movl closure_CF(%esp), %eax; /* optimiztic load */ \
+ jmp *%edx
+# else
+# define FFI_CLOSURE_CALL_INNER_SAVE_EBX
+# undef FFI_CLOSURE_CALL_INNER
+# define FFI_CLOSURE_CALL_INNER(UWN) \
+ movl %ebx, 40(%esp); /* save ebx */ \
+L(C1(UW,UWN)): \
+ # cfi_rel_offset(%ebx, 40); \
+ call C(__x86.get_pc_thunk.bx); /* load got register */ \
+ addl $C(_GLOBAL_OFFSET_TABLE_), %ebx; \
+ call ffi_closure_inner@PLT
+# undef FFI_CLOSURE_MASK_AND_JUMP
+# define FFI_CLOSURE_MASK_AND_JUMP(N, UWN) \
+ andl $X86_RET_TYPE_MASK, %eax; \
+ leal L(C1(load_table,N))@GOTOFF(%ebx, %eax, 8), %edx; \
+ movl 40(%esp), %ebx; /* restore ebx */ \
+L(C1(UW,UWN)): \
+ # cfi_restore(%ebx); \
+ movl closure_CF(%esp), %eax; /* optimiztic load */ \
+ jmp *%edx
+# endif /* DARWIN || HIDDEN */
+#endif /* __PIC__ */
+
+ .balign 16
+ .globl C(ffi_go_closure_EAX)
+ FFI_HIDDEN(C(ffi_go_closure_EAX))
+C(ffi_go_closure_EAX):
+L(UW6):
+ # cfi_startproc
+ subl $closure_FS, %esp
+L(UW7):
+ # cfi_def_cfa_offset(closure_FS + 4)
+ FFI_CLOSURE_SAVE_REGS
+ movl 4(%eax), %edx /* copy cif */
+ movl 8(%eax), %ecx /* copy fun */
+ movl %edx, closure_CF+28(%esp)
+ movl %ecx, closure_CF+32(%esp)
+ movl %eax, closure_CF+36(%esp) /* closure is user_data */
+ jmp L(do_closure_i386)
+L(UW8):
+ # cfi_endproc
+ENDF(C(ffi_go_closure_EAX))
+
+ .balign 16
+ .globl C(ffi_go_closure_ECX)
+ FFI_HIDDEN(C(ffi_go_closure_ECX))
+C(ffi_go_closure_ECX):
+L(UW9):
+ # cfi_startproc
+ subl $closure_FS, %esp
+L(UW10):
+ # cfi_def_cfa_offset(closure_FS + 4)
+ FFI_CLOSURE_SAVE_REGS
+ movl 4(%ecx), %edx /* copy cif */
+ movl 8(%ecx), %eax /* copy fun */
+ movl %edx, closure_CF+28(%esp)
+ movl %eax, closure_CF+32(%esp)
+ movl %ecx, closure_CF+36(%esp) /* closure is user_data */
+ jmp L(do_closure_i386)
+L(UW11):
+ # cfi_endproc
+ENDF(C(ffi_go_closure_ECX))
+
+/* The closure entry points are reached from the ffi_closure trampoline.
+ On entry, %eax contains the address of the ffi_closure. */
+
+ .balign 16
+ .globl C(ffi_closure_i386)
+ FFI_HIDDEN(C(ffi_closure_i386))
+
+C(ffi_closure_i386):
+L(UW12):
+ # cfi_startproc
+ subl $closure_FS, %esp
+L(UW13):
+ # cfi_def_cfa_offset(closure_FS + 4)
+
+ FFI_CLOSURE_SAVE_REGS
+ FFI_CLOSURE_COPY_TRAMP_DATA
+
+ /* Entry point from preceeding Go closures. */
+L(do_closure_i386):
+
+ FFI_CLOSURE_PREP_CALL
+ FFI_CLOSURE_CALL_INNER(14)
+ FFI_CLOSURE_MASK_AND_JUMP(2, 15)
+
+ .balign 8
+L(load_table2):
+E(L(load_table2), X86_RET_FLOAT)
+ flds closure_CF(%esp)
+ jmp L(e2)
+E(L(load_table2), X86_RET_DOUBLE)
+ fldl closure_CF(%esp)
+ jmp L(e2)
+E(L(load_table2), X86_RET_LDOUBLE)
+ fldt closure_CF(%esp)
+ jmp L(e2)
+E(L(load_table2), X86_RET_SINT8)
+ movsbl %al, %eax
+ jmp L(e2)
+E(L(load_table2), X86_RET_SINT16)
+ movswl %ax, %eax
+ jmp L(e2)
+E(L(load_table2), X86_RET_UINT8)
+ movzbl %al, %eax
+ jmp L(e2)
+E(L(load_table2), X86_RET_UINT16)
+ movzwl %ax, %eax
+ jmp L(e2)
+E(L(load_table2), X86_RET_INT64)
+ movl closure_CF+4(%esp), %edx
+ jmp L(e2)
+E(L(load_table2), X86_RET_INT32)
+ nop
+ /* fallthru */
+E(L(load_table2), X86_RET_VOID)
+L(e2):
+ addl $closure_FS, %esp
+L(UW16):
+ # cfi_adjust_cfa_offset(-closure_FS)
+ ret
+L(UW17):
+ # cfi_adjust_cfa_offset(closure_FS)
+E(L(load_table2), X86_RET_STRUCTPOP)
+ addl $closure_FS, %esp
+L(UW18):
+ # cfi_adjust_cfa_offset(-closure_FS)
+ ret $4
+L(UW19):
+ # cfi_adjust_cfa_offset(closure_FS)
+E(L(load_table2), X86_RET_STRUCTARG)
+ jmp L(e2)
+E(L(load_table2), X86_RET_STRUCT_1B)
+ movzbl %al, %eax
+ jmp L(e2)
+E(L(load_table2), X86_RET_STRUCT_2B)
+ movzwl %ax, %eax
+ jmp L(e2)
+
+ /* Fill out the table so that bad values are predictable. */
+E(L(load_table2), X86_RET_UNUSED14)
+ ud2
+E(L(load_table2), X86_RET_UNUSED15)
+ ud2
+
+L(UW20):
+ # cfi_endproc
+ENDF(C(ffi_closure_i386))
+
+ .balign 16
+ .globl C(ffi_go_closure_STDCALL)
+ FFI_HIDDEN(C(ffi_go_closure_STDCALL))
+C(ffi_go_closure_STDCALL):
+L(UW21):
+ # cfi_startproc
+ subl $closure_FS, %esp
+L(UW22):
+ # cfi_def_cfa_offset(closure_FS + 4)
+ FFI_CLOSURE_SAVE_REGS
+ movl 4(%ecx), %edx /* copy cif */
+ movl 8(%ecx), %eax /* copy fun */
+ movl %edx, closure_CF+28(%esp)
+ movl %eax, closure_CF+32(%esp)
+ movl %ecx, closure_CF+36(%esp) /* closure is user_data */
+ jmp L(do_closure_STDCALL)
+L(UW23):
+ # cfi_endproc
+ENDF(C(ffi_go_closure_STDCALL))
+
+/* For REGISTER, we have no available parameter registers, and so we
+ enter here having pushed the closure onto the stack. */
+
+ .balign 16
+ .globl C(ffi_closure_REGISTER)
+ FFI_HIDDEN(C(ffi_closure_REGISTER))
+C(ffi_closure_REGISTER):
+L(UW24):
+ # cfi_startproc
+ # cfi_def_cfa(%esp, 8)
+ # cfi_offset(%eip, -8)
+ subl $closure_FS-4, %esp
+L(UW25):
+ # cfi_def_cfa_offset(closure_FS + 4)
+ FFI_CLOSURE_SAVE_REGS
+ movl closure_FS-4(%esp), %ecx /* load retaddr */
+ movl closure_FS(%esp), %eax /* load closure */
+ movl %ecx, closure_FS(%esp) /* move retaddr */
+ jmp L(do_closure_REGISTER)
+L(UW26):
+ # cfi_endproc
+ENDF(C(ffi_closure_REGISTER))
+
+/* For STDCALL (and others), we need to pop N bytes of arguments off
+ the stack following the closure. The amount needing to be popped
+ is returned to us from ffi_closure_inner. */
+
+ .balign 16
+ .globl C(ffi_closure_STDCALL)
+ FFI_HIDDEN(C(ffi_closure_STDCALL))
+C(ffi_closure_STDCALL):
+L(UW27):
+ # cfi_startproc
+ subl $closure_FS, %esp
+L(UW28):
+ # cfi_def_cfa_offset(closure_FS + 4)
+
+ FFI_CLOSURE_SAVE_REGS
+
+ /* Entry point from ffi_closure_REGISTER. */
+L(do_closure_REGISTER):
+
+ FFI_CLOSURE_COPY_TRAMP_DATA
+
+ /* Entry point from preceeding Go closure. */
+L(do_closure_STDCALL):
+
+ FFI_CLOSURE_PREP_CALL
+ FFI_CLOSURE_CALL_INNER(29)
+
+ movl %eax, %ecx
+ shrl $X86_RET_POP_SHIFT, %ecx /* isolate pop count */
+ leal closure_FS(%esp, %ecx), %ecx /* compute popped esp */
+ movl closure_FS(%esp), %edx /* move return address */
+ movl %edx, (%ecx)
+
+ /* From this point on, the value of %esp upon return is %ecx+4,
+ and we've copied the return address to %ecx to make return easy.
+ There's no point in representing this in the unwind info, as
+ there is always a window between the mov and the ret which
+ will be wrong from one point of view or another. */
+
+ FFI_CLOSURE_MASK_AND_JUMP(3, 30)
+
+ .balign 8
+L(load_table3):
+E(L(load_table3), X86_RET_FLOAT)
+ flds closure_CF(%esp)
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_DOUBLE)
+ fldl closure_CF(%esp)
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_LDOUBLE)
+ fldt closure_CF(%esp)
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_SINT8)
+ movsbl %al, %eax
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_SINT16)
+ movswl %ax, %eax
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_UINT8)
+ movzbl %al, %eax
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_UINT16)
+ movzwl %ax, %eax
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_INT64)
+ movl closure_CF+4(%esp), %edx
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_INT32)
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_VOID)
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_STRUCTPOP)
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_STRUCTARG)
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_STRUCT_1B)
+ movzbl %al, %eax
+ movl %ecx, %esp
+ ret
+E(L(load_table3), X86_RET_STRUCT_2B)
+ movzwl %ax, %eax
+ movl %ecx, %esp
+ ret
+
+ /* Fill out the table so that bad values are predictable. */
+E(L(load_table3), X86_RET_UNUSED14)
+ ud2
+E(L(load_table3), X86_RET_UNUSED15)
+ ud2
+
+L(UW31):
+ # cfi_endproc
+ENDF(C(ffi_closure_STDCALL))
+
+#if !FFI_NO_RAW_API
+
+#define raw_closure_S_FS (16+16+12)
+
+ .balign 16
+ .globl C(ffi_closure_raw_SYSV)
+ FFI_HIDDEN(C(ffi_closure_raw_SYSV))
+C(ffi_closure_raw_SYSV):
+L(UW32):
+ # cfi_startproc
+ subl $raw_closure_S_FS, %esp
+L(UW33):
+ # cfi_def_cfa_offset(raw_closure_S_FS + 4)
+ movl %ebx, raw_closure_S_FS-4(%esp)
+L(UW34):
+ # cfi_rel_offset(%ebx, raw_closure_S_FS-4)
+
+ movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */
+ movl %edx, 12(%esp)
+ leal raw_closure_S_FS+4(%esp), %edx /* load raw_args */
+ movl %edx, 8(%esp)
+ leal 16(%esp), %edx /* load &res */
+ movl %edx, 4(%esp)
+ movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */
+ movl %ebx, (%esp)
+ call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */
+
+ movl 20(%ebx), %eax /* load cif->flags */
+ andl $X86_RET_TYPE_MASK, %eax
+#ifdef __PIC__
+ call C(__x86.get_pc_thunk.bx)
+L(pc4):
+ leal L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx
+#else
+ leal L(load_table4)(,%eax, 8), %ecx
+#endif
+ movl raw_closure_S_FS-4(%esp), %ebx
+L(UW35):
+ # cfi_restore(%ebx)
+ movl 16(%esp), %eax /* Optimistic load */
+ jmp *%ecx
+
+ .balign 8
+L(load_table4):
+E(L(load_table4), X86_RET_FLOAT)
+ flds 16(%esp)
+ jmp L(e4)
+E(L(load_table4), X86_RET_DOUBLE)
+ fldl 16(%esp)
+ jmp L(e4)
+E(L(load_table4), X86_RET_LDOUBLE)
+ fldt 16(%esp)
+ jmp L(e4)
+E(L(load_table4), X86_RET_SINT8)
+ movsbl %al, %eax
+ jmp L(e4)
+E(L(load_table4), X86_RET_SINT16)
+ movswl %ax, %eax
+ jmp L(e4)
+E(L(load_table4), X86_RET_UINT8)
+ movzbl %al, %eax
+ jmp L(e4)
+E(L(load_table4), X86_RET_UINT16)
+ movzwl %ax, %eax
+ jmp L(e4)
+E(L(load_table4), X86_RET_INT64)
+ movl 16+4(%esp), %edx
+ jmp L(e4)
+E(L(load_table4), X86_RET_INT32)
+ nop
+ /* fallthru */
+E(L(load_table4), X86_RET_VOID)
+L(e4):
+ addl $raw_closure_S_FS, %esp
+L(UW36):
+ # cfi_adjust_cfa_offset(-raw_closure_S_FS)
+ ret
+L(UW37):
+ # cfi_adjust_cfa_offset(raw_closure_S_FS)
+E(L(load_table4), X86_RET_STRUCTPOP)
+ addl $raw_closure_S_FS, %esp
+L(UW38):
+ # cfi_adjust_cfa_offset(-raw_closure_S_FS)
+ ret $4
+L(UW39):
+ # cfi_adjust_cfa_offset(raw_closure_S_FS)
+E(L(load_table4), X86_RET_STRUCTARG)
+ jmp L(e4)
+E(L(load_table4), X86_RET_STRUCT_1B)
+ movzbl %al, %eax
+ jmp L(e4)
+E(L(load_table4), X86_RET_STRUCT_2B)
+ movzwl %ax, %eax
+ jmp L(e4)
+
+ /* Fill out the table so that bad values are predictable. */
+E(L(load_table4), X86_RET_UNUSED14)
+ ud2
+E(L(load_table4), X86_RET_UNUSED15)
+ ud2
+
+L(UW40):
+ # cfi_endproc
+ENDF(C(ffi_closure_raw_SYSV))
+
+#define raw_closure_T_FS (16+16+8)
+
+ .balign 16
+ .globl C(ffi_closure_raw_THISCALL)
+ FFI_HIDDEN(C(ffi_closure_raw_THISCALL))
+C(ffi_closure_raw_THISCALL):
+L(UW41):
+ # cfi_startproc
+ /* Rearrange the stack such that %ecx is the first argument.
+ This means moving the return address. */
+ popl %edx
+L(UW42):
+ # cfi_def_cfa_offset(0)
+ # cfi_register(%eip, %edx)
+ pushl %ecx
+L(UW43):
+ # cfi_adjust_cfa_offset(4)
+ pushl %edx
+L(UW44):
+ # cfi_adjust_cfa_offset(4)
+ # cfi_rel_offset(%eip, 0)
+ subl $raw_closure_T_FS, %esp
+L(UW45):
+ # cfi_adjust_cfa_offset(raw_closure_T_FS)
+ movl %ebx, raw_closure_T_FS-4(%esp)
+L(UW46):
+ # cfi_rel_offset(%ebx, raw_closure_T_FS-4)
+
+ movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */
+ movl %edx, 12(%esp)
+ leal raw_closure_T_FS+4(%esp), %edx /* load raw_args */
+ movl %edx, 8(%esp)
+ leal 16(%esp), %edx /* load &res */
+ movl %edx, 4(%esp)
+ movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */
+ movl %ebx, (%esp)
+ call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */
+
+ movl 20(%ebx), %eax /* load cif->flags */
+ andl $X86_RET_TYPE_MASK, %eax
+#ifdef __PIC__
+ call C(__x86.get_pc_thunk.bx)
+L(pc5):
+ leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx
+#else
+ leal L(load_table5)(,%eax, 8), %ecx
+#endif
+ movl raw_closure_T_FS-4(%esp), %ebx
+L(UW47):
+ # cfi_restore(%ebx)
+ movl 16(%esp), %eax /* Optimistic load */
+ jmp *%ecx
+
+ .balign 8
+L(load_table5):
+E(L(load_table5), X86_RET_FLOAT)
+ flds 16(%esp)
+ jmp L(e5)
+E(L(load_table5), X86_RET_DOUBLE)
+ fldl 16(%esp)
+ jmp L(e5)
+E(L(load_table5), X86_RET_LDOUBLE)
+ fldt 16(%esp)
+ jmp L(e5)
+E(L(load_table5), X86_RET_SINT8)
+ movsbl %al, %eax
+ jmp L(e5)
+E(L(load_table5), X86_RET_SINT16)
+ movswl %ax, %eax
+ jmp L(e5)
+E(L(load_table5), X86_RET_UINT8)
+ movzbl %al, %eax
+ jmp L(e5)
+E(L(load_table5), X86_RET_UINT16)
+ movzwl %ax, %eax
+ jmp L(e5)
+E(L(load_table5), X86_RET_INT64)
+ movl 16+4(%esp), %edx
+ jmp L(e5)
+E(L(load_table5), X86_RET_INT32)
+ nop
+ /* fallthru */
+E(L(load_table5), X86_RET_VOID)
+L(e5):
+ addl $raw_closure_T_FS, %esp
+L(UW48):
+ # cfi_adjust_cfa_offset(-raw_closure_T_FS)
+ /* Remove the extra %ecx argument we pushed. */
+ ret $4
+L(UW49):
+ # cfi_adjust_cfa_offset(raw_closure_T_FS)
+E(L(load_table5), X86_RET_STRUCTPOP)
+ addl $raw_closure_T_FS, %esp
+L(UW50):
+ # cfi_adjust_cfa_offset(-raw_closure_T_FS)
+ ret $8
+L(UW51):
+ # cfi_adjust_cfa_offset(raw_closure_T_FS)
+E(L(load_table5), X86_RET_STRUCTARG)
+ jmp L(e5)
+E(L(load_table5), X86_RET_STRUCT_1B)
+ movzbl %al, %eax
+ jmp L(e5)
+E(L(load_table5), X86_RET_STRUCT_2B)
+ movzwl %ax, %eax
+ jmp L(e5)
+
+ /* Fill out the table so that bad values are predictable. */
+E(L(load_table5), X86_RET_UNUSED14)
+ ud2
+E(L(load_table5), X86_RET_UNUSED15)
+ ud2
+
+L(UW52):
+ # cfi_endproc
+ENDF(C(ffi_closure_raw_THISCALL))
+
+#endif /* !FFI_NO_RAW_API */
+
+#ifdef X86_DARWIN
+# define COMDAT(X) \
+ .section __TEXT,__textcoal_nt,coalesced,pure_instructions; \
+ .weak_definition X; \
+ .private_extern X
+#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__))
+# define COMDAT(X) \
+ .section .text.X,"axG",@progbits,X,comdat; \
+ .globl X; \
+ FFI_HIDDEN(X)
+#else
+# define COMDAT(X)
+#endif
+
+#if defined(__PIC__)
+ COMDAT(C(__x86.get_pc_thunk.bx))
+C(__x86.get_pc_thunk.bx):
+ movl (%esp), %ebx
+ ret
+ENDF(C(__x86.get_pc_thunk.bx))
+# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
+ COMDAT(C(__x86.get_pc_thunk.dx))
+C(__x86.get_pc_thunk.dx):
+ movl (%esp), %edx
+ ret
+ENDF(C(__x86.get_pc_thunk.dx))
+#endif /* DARWIN || HIDDEN */
+#endif /* __PIC__ */
+
+/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */
+
+#ifdef __APPLE__
+.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
+EHFrame0:
+#elif defined(X86_WIN32)
+.section .eh_frame,"r"
+#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE)
+.section .eh_frame,EH_FRAME_FLAGS,@unwind
+#else
+.section .eh_frame,EH_FRAME_FLAGS,@progbits
+#endif
+
+#ifdef HAVE_AS_X86_PCREL
+# define PCREL(X) X - .
+#else
+# define PCREL(X) X@rel
+#endif
+
+/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */
+#define ADV(N, P) .byte 2, L(N)-L(P)
+
+ .balign 4
+L(CIE):
+ .set L(set0),L(ECIE)-L(SCIE)
+ .long L(set0) /* CIE Length */
+L(SCIE):
+ .long 0 /* CIE Identifier Tag */
+ .byte 1 /* CIE Version */
+ .ascii "zR\0" /* CIE Augmentation */
+ .byte 1 /* CIE Code Alignment Factor */
+ .byte 0x7c /* CIE Data Alignment Factor */
+ .byte 0x8 /* CIE RA Column */
+ .byte 1 /* Augmentation size */
+ .byte 0x1b /* FDE Encoding (pcrel sdata4) */
+ .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */
+ .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */
+ .balign 4
+L(ECIE):
+
+ .set L(set1),L(EFDE1)-L(SFDE1)
+ .long L(set1) /* FDE Length */
+L(SFDE1):
+ .long L(SFDE1)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW0)) /* Initial location */
+ .long L(UW5)-L(UW0) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW1, UW0)
+ .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */
+ .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */
+ ADV(UW2, UW1)
+ .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */
+ ADV(UW3, UW2)
+ .byte 0xa /* DW_CFA_remember_state */
+ .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */
+ .byte 0xc0+3 /* DW_CFA_restore, %ebx */
+ .byte 0xc0+5 /* DW_CFA_restore, %ebp */
+ ADV(UW4, UW3)
+ .byte 0xb /* DW_CFA_restore_state */
+ .balign 4
+L(EFDE1):
+
+ .set L(set2),L(EFDE2)-L(SFDE2)
+ .long L(set2) /* FDE Length */
+L(SFDE2):
+ .long L(SFDE2)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW6)) /* Initial location */
+ .long L(UW8)-L(UW6) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW7, UW6)
+ .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
+ .balign 4
+L(EFDE2):
+
+ .set L(set3),L(EFDE3)-L(SFDE3)
+ .long L(set3) /* FDE Length */
+L(SFDE3):
+ .long L(SFDE3)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW9)) /* Initial location */
+ .long L(UW11)-L(UW9) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW10, UW9)
+ .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
+ .balign 4
+L(EFDE3):
+
+ .set L(set4),L(EFDE4)-L(SFDE4)
+ .long L(set4) /* FDE Length */
+L(SFDE4):
+ .long L(SFDE4)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW12)) /* Initial location */
+ .long L(UW20)-L(UW12) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW13, UW12)
+ .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
+#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX
+ ADV(UW14, UW13)
+ .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */
+ ADV(UW15, UW14)
+ .byte 0xc0+3 /* DW_CFA_restore %ebx */
+ ADV(UW16, UW15)
+#else
+ ADV(UW16, UW13)
+#endif
+ .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
+ ADV(UW17, UW16)
+ .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
+ ADV(UW18, UW17)
+ .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
+ ADV(UW19, UW18)
+ .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
+ .balign 4
+L(EFDE4):
+
+ .set L(set5),L(EFDE5)-L(SFDE5)
+ .long L(set5) /* FDE Length */
+L(SFDE5):
+ .long L(SFDE5)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW21)) /* Initial location */
+ .long L(UW23)-L(UW21) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW22, UW21)
+ .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
+ .balign 4
+L(EFDE5):
+
+ .set L(set6),L(EFDE6)-L(SFDE6)
+ .long L(set6) /* FDE Length */
+L(SFDE6):
+ .long L(SFDE6)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW24)) /* Initial location */
+ .long L(UW26)-L(UW24) /* Address range */
+ .byte 0 /* Augmentation size */
+ .byte 0xe, 8 /* DW_CFA_def_cfa_offset */
+ .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */
+ ADV(UW25, UW24)
+ .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
+ .balign 4
+L(EFDE6):
+
+ .set L(set7),L(EFDE7)-L(SFDE7)
+ .long L(set7) /* FDE Length */
+L(SFDE7):
+ .long L(SFDE7)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW27)) /* Initial location */
+ .long L(UW31)-L(UW27) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW28, UW27)
+ .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
+#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX
+ ADV(UW29, UW28)
+ .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */
+ ADV(UW30, UW29)
+ .byte 0xc0+3 /* DW_CFA_restore %ebx */
+#endif
+ .balign 4
+L(EFDE7):
+
+#if !FFI_NO_RAW_API
+ .set L(set8),L(EFDE8)-L(SFDE8)
+ .long L(set8) /* FDE Length */
+L(SFDE8):
+ .long L(SFDE8)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW32)) /* Initial location */
+ .long L(UW40)-L(UW32) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW33, UW32)
+ .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
+ ADV(UW34, UW33)
+ .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */
+ ADV(UW35, UW34)
+ .byte 0xc0+3 /* DW_CFA_restore %ebx */
+ ADV(UW36, UW35)
+ .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
+ ADV(UW37, UW36)
+ .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
+ ADV(UW38, UW37)
+ .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
+ ADV(UW39, UW38)
+ .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
+ .balign 4
+L(EFDE8):
+
+ .set L(set9),L(EFDE9)-L(SFDE9)
+ .long L(set9) /* FDE Length */
+L(SFDE9):
+ .long L(SFDE9)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW41)) /* Initial location */
+ .long L(UW52)-L(UW41) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW42, UW41)
+ .byte 0xe, 0 /* DW_CFA_def_cfa_offset */
+ .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */
+ ADV(UW43, UW42)
+ .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
+ ADV(UW44, UW43)
+ .byte 0xe, 8 /* DW_CFA_def_cfa_offset */
+ .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */
+ ADV(UW45, UW44)
+ .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
+ ADV(UW46, UW45)
+ .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */
+ ADV(UW47, UW46)
+ .byte 0xc0+3 /* DW_CFA_restore %ebx */
+ ADV(UW48, UW47)
+ .byte 0xe, 8 /* DW_CFA_def_cfa_offset */
+ ADV(UW49, UW48)
+ .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
+ ADV(UW50, UW49)
+ .byte 0xe, 8 /* DW_CFA_def_cfa_offset */
+ ADV(UW51, UW50)
+ .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
+ .balign 4
+L(EFDE9):
+#endif /* !FFI_NO_RAW_API */
+
+#endif /* ifndef __x86_64__ */
+
+#if defined __ELF__ && defined __linux__
+ .section .note.GNU-stack,"",@progbits
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/x86/unix64.S modified/Modules/_ctypes/libffi_ios/x86/unix64.S
--- orig/Modules/_ctypes/libffi_ios/x86/unix64.S 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/x86/unix64.S 2015-03-12 21:33:31.000000000 +0800
@@ -0,0 +1,546 @@
+/* -----------------------------------------------------------------------
+ unix64.S - Copyright (c) 2013 The Written Word, Inc.
+ - Copyright (c) 2008 Red Hat, Inc
+ - Copyright (c) 2002 Bo Thorsen
+
+ x86-64 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifdef __x86_64__
+#define LIBFFI_ASM
+#include
+#include
+#include "internal64.h"
+
+ .text
+
+#define C2(X, Y) X ## Y
+#define C1(X, Y) C2(X, Y)
+#ifdef __USER_LABEL_PREFIX__
+# define C(X) C1(__USER_LABEL_PREFIX__, X)
+#else
+# define C(X) X
+#endif
+
+#ifdef __APPLE__
+# define L(X) C1(L, X)
+#else
+# define L(X) C1(.L, X)
+#endif
+
+#ifdef __ELF__
+# define PLT(X) X@PLT
+# define ENDF(X) .type X,@function; .size X, . - X
+#else
+# define PLT(X) X
+# define ENDF(X)
+#endif
+
+/* This macro allows the safe creation of jump tables without an
+ actual table. The entry points into the table are all 8 bytes.
+ The use of ORG asserts that we're at the correct location. */
+/* ??? The clang assembler doesn't handle .org with symbolic expressions. */
+#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__))
+# define E(BASE, X) .balign 8
+#else
+# define E(BASE, X) .balign 8; .org BASE + X * 8
+#endif
+
+/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags,
+ void *raddr, void (*fnaddr)(void));
+
+ Bit o trickiness here -- ARGS+BYTES is the base of the stack frame
+ for this function. This has been allocated by ffi_call. We also
+ deallocate some of the stack that has been alloca'd. */
+
+ .balign 8
+ .globl C(ffi_call_unix64)
+ FFI_HIDDEN(C(ffi_call_unix64))
+
+C(ffi_call_unix64):
+L(UW0):
+ movq (%rsp), %r10 /* Load return address. */
+ leaq (%rdi, %rsi), %rax /* Find local stack base. */
+ movq %rdx, (%rax) /* Save flags. */
+ movq %rcx, 8(%rax) /* Save raddr. */
+ movq %rbp, 16(%rax) /* Save old frame pointer. */
+ movq %r10, 24(%rax) /* Relocate return address. */
+ movq %rax, %rbp /* Finalize local stack frame. */
+
+ /* New stack frame based off rbp. This is a itty bit of unwind
+ trickery in that the CFA *has* changed. There is no easy way
+ to describe it correctly on entry to the function. Fortunately,
+ it doesn't matter too much since at all points we can correctly
+ unwind back to ffi_call. Note that the location to which we
+ moved the return address is (the new) CFA-8, so from the
+ perspective of the unwind info, it hasn't moved. */
+L(UW1):
+ /* cfi_def_cfa(%rbp, 32) */
+ /* cfi_rel_offset(%rbp, 16) */
+
+ movq %rdi, %r10 /* Save a copy of the register area. */
+ movq %r8, %r11 /* Save a copy of the target fn. */
+ movl %r9d, %eax /* Set number of SSE registers. */
+
+ /* Load up all argument registers. */
+ movq (%r10), %rdi
+ movq 0x08(%r10), %rsi
+ movq 0x10(%r10), %rdx
+ movq 0x18(%r10), %rcx
+ movq 0x20(%r10), %r8
+ movq 0x28(%r10), %r9
+ movl 0xb0(%r10), %eax
+ testl %eax, %eax
+ jnz L(load_sse)
+L(ret_from_load_sse):
+
+ /* Deallocate the reg arg area, except for r10, then load via pop. */
+ leaq 0xb8(%r10), %rsp
+ popq %r10
+
+ /* Call the user function. */
+ call *%r11
+
+ /* Deallocate stack arg area; local stack frame in redzone. */
+ leaq 24(%rbp), %rsp
+
+ movq 0(%rbp), %rcx /* Reload flags. */
+ movq 8(%rbp), %rdi /* Reload raddr. */
+ movq 16(%rbp), %rbp /* Reload old frame pointer. */
+L(UW2):
+ /* cfi_remember_state */
+ /* cfi_def_cfa(%rsp, 8) */
+ /* cfi_restore(%rbp) */
+
+ /* The first byte of the flags contains the FFI_TYPE. */
+ cmpb $UNIX64_RET_LAST, %cl
+ movzbl %cl, %r10d
+ leaq L(store_table)(%rip), %r11
+ ja L(sa)
+ leaq (%r11, %r10, 8), %r10
+
+ /* Prep for the structure cases: scratch area in redzone. */
+ leaq -20(%rsp), %rsi
+ jmp *%r10
+
+ .balign 8
+L(store_table):
+E(L(store_table), UNIX64_RET_VOID)
+ ret
+E(L(store_table), UNIX64_RET_UINT8)
+ movzbl %al, %eax
+ movq %rax, (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_UINT16)
+ movzwl %ax, %eax
+ movq %rax, (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_UINT32)
+ movl %eax, %eax
+ movq %rax, (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_SINT8)
+ movsbq %al, %rax
+ movq %rax, (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_SINT16)
+ movswq %ax, %rax
+ movq %rax, (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_SINT32)
+ cltq
+ movq %rax, (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_INT64)
+ movq %rax, (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_XMM32)
+ movd %xmm0, (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_XMM64)
+ movq %xmm0, (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_X87)
+ fstpt (%rdi)
+ ret
+E(L(store_table), UNIX64_RET_X87_2)
+ fstpt (%rdi)
+ fstpt 16(%rdi)
+ ret
+E(L(store_table), UNIX64_RET_ST_XMM0_RAX)
+ movq %rax, 8(%rsi)
+ jmp L(s3)
+E(L(store_table), UNIX64_RET_ST_RAX_XMM0)
+ movq %xmm0, 8(%rsi)
+ jmp L(s2)
+E(L(store_table), UNIX64_RET_ST_XMM0_XMM1)
+ movq %xmm1, 8(%rsi)
+ jmp L(s3)
+E(L(store_table), UNIX64_RET_ST_RAX_RDX)
+ movq %rdx, 8(%rsi)
+L(s2):
+ movq %rax, (%rsi)
+ shrl $UNIX64_SIZE_SHIFT, %ecx
+ rep movsb
+ ret
+ .balign 8
+L(s3):
+ movq %xmm0, (%rsi)
+ shrl $UNIX64_SIZE_SHIFT, %ecx
+ rep movsb
+ ret
+
+L(sa): call PLT(C(abort))
+
+ /* Many times we can avoid loading any SSE registers at all.
+ It's not worth an indirect jump to load the exact set of
+ SSE registers needed; zero or all is a good compromise. */
+ .balign 2
+L(UW3):
+ /* cfi_restore_state */
+L(load_sse):
+ movdqa 0x30(%r10), %xmm0
+ movdqa 0x40(%r10), %xmm1
+ movdqa 0x50(%r10), %xmm2
+ movdqa 0x60(%r10), %xmm3
+ movdqa 0x70(%r10), %xmm4
+ movdqa 0x80(%r10), %xmm5
+ movdqa 0x90(%r10), %xmm6
+ movdqa 0xa0(%r10), %xmm7
+ jmp L(ret_from_load_sse)
+
+L(UW4):
+ENDF(C(ffi_call_unix64))
+
+/* 6 general registers, 8 vector registers,
+ 32 bytes of rvalue, 8 bytes of alignment. */
+#define ffi_closure_OFS_G 0
+#define ffi_closure_OFS_V (6*8)
+#define ffi_closure_OFS_RVALUE (ffi_closure_OFS_V + 8*16)
+#define ffi_closure_FS (ffi_closure_OFS_RVALUE + 32 + 8)
+
+/* The location of rvalue within the red zone after deallocating the frame. */
+#define ffi_closure_RED_RVALUE (ffi_closure_OFS_RVALUE - ffi_closure_FS)
+
+ .balign 2
+ .globl C(ffi_closure_unix64_sse)
+ FFI_HIDDEN(C(ffi_closure_unix64_sse))
+
+C(ffi_closure_unix64_sse):
+L(UW5):
+ subq $ffi_closure_FS, %rsp
+L(UW6):
+ /* cfi_adjust_cfa_offset(ffi_closure_FS) */
+
+ movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp)
+ movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp)
+ movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp)
+ movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp)
+ movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp)
+ movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp)
+ movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp)
+ movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp)
+ jmp L(sse_entry1)
+
+L(UW7):
+ENDF(C(ffi_closure_unix64_sse))
+
+ .balign 2
+ .globl C(ffi_closure_unix64)
+ FFI_HIDDEN(C(ffi_closure_unix64))
+
+C(ffi_closure_unix64):
+L(UW8):
+ subq $ffi_closure_FS, %rsp
+L(UW9):
+ /* cfi_adjust_cfa_offset(ffi_closure_FS) */
+L(sse_entry1):
+ movq %rdi, ffi_closure_OFS_G+0x00(%rsp)
+ movq %rsi, ffi_closure_OFS_G+0x08(%rsp)
+ movq %rdx, ffi_closure_OFS_G+0x10(%rsp)
+ movq %rcx, ffi_closure_OFS_G+0x18(%rsp)
+ movq %r8, ffi_closure_OFS_G+0x20(%rsp)
+ movq %r9, ffi_closure_OFS_G+0x28(%rsp)
+
+#ifdef __ILP32__
+ movl FFI_TRAMPOLINE_SIZE(%r10), %edi /* Load cif */
+ movl FFI_TRAMPOLINE_SIZE+4(%r10), %esi /* Load fun */
+ movl FFI_TRAMPOLINE_SIZE+8(%r10), %edx /* Load user_data */
+#else
+ movq FFI_TRAMPOLINE_SIZE(%r10), %rdi /* Load cif */
+ movq FFI_TRAMPOLINE_SIZE+8(%r10), %rsi /* Load fun */
+ movq FFI_TRAMPOLINE_SIZE+16(%r10), %rdx /* Load user_data */
+#endif
+L(do_closure):
+ leaq ffi_closure_OFS_RVALUE(%rsp), %rcx /* Load rvalue */
+ movq %rsp, %r8 /* Load reg_args */
+ leaq ffi_closure_FS+8(%rsp), %r9 /* Load argp */
+ call C(ffi_closure_unix64_inner)
+
+ /* Deallocate stack frame early; return value is now in redzone. */
+ addq $ffi_closure_FS, %rsp
+L(UW10):
+ /* cfi_adjust_cfa_offset(-ffi_closure_FS) */
+
+ /* The first byte of the return value contains the FFI_TYPE. */
+ cmpb $UNIX64_RET_LAST, %al
+ movzbl %al, %r10d
+ leaq L(load_table)(%rip), %r11
+ ja L(la)
+ leaq (%r11, %r10, 8), %r10
+ leaq ffi_closure_RED_RVALUE(%rsp), %rsi
+ jmp *%r10
+
+ .balign 8
+L(load_table):
+E(L(load_table), UNIX64_RET_VOID)
+ ret
+E(L(load_table), UNIX64_RET_UINT8)
+ movzbl (%rsi), %eax
+ ret
+E(L(load_table), UNIX64_RET_UINT16)
+ movzwl (%rsi), %eax
+ ret
+E(L(load_table), UNIX64_RET_UINT32)
+ movl (%rsi), %eax
+ ret
+E(L(load_table), UNIX64_RET_SINT8)
+ movsbl (%rsi), %eax
+ ret
+E(L(load_table), UNIX64_RET_SINT16)
+ movswl (%rsi), %eax
+ ret
+E(L(load_table), UNIX64_RET_SINT32)
+ movl (%rsi), %eax
+ ret
+E(L(load_table), UNIX64_RET_INT64)
+ movq (%rsi), %rax
+ ret
+E(L(load_table), UNIX64_RET_XMM32)
+ movd (%rsi), %xmm0
+ ret
+E(L(load_table), UNIX64_RET_XMM64)
+ movq (%rsi), %xmm0
+ ret
+E(L(load_table), UNIX64_RET_X87)
+ fldt (%rsi)
+ ret
+E(L(load_table), UNIX64_RET_X87_2)
+ fldt 16(%rsi)
+ fldt (%rsi)
+ ret
+E(L(load_table), UNIX64_RET_ST_XMM0_RAX)
+ movq 8(%rsi), %rax
+ jmp L(l3)
+E(L(load_table), UNIX64_RET_ST_RAX_XMM0)
+ movq 8(%rsi), %xmm0
+ jmp L(l2)
+E(L(load_table), UNIX64_RET_ST_XMM0_XMM1)
+ movq 8(%rsi), %xmm1
+ jmp L(l3)
+E(L(load_table), UNIX64_RET_ST_RAX_RDX)
+ movq 8(%rsi), %rdx
+L(l2):
+ movq (%rsi), %rax
+ ret
+ .balign 8
+L(l3):
+ movq (%rsi), %xmm0
+ ret
+
+L(la): call PLT(C(abort))
+
+L(UW11):
+ENDF(C(ffi_closure_unix64))
+
+ .balign 2
+ .globl C(ffi_go_closure_unix64_sse)
+ FFI_HIDDEN(C(ffi_go_closure_unix64_sse))
+
+C(ffi_go_closure_unix64_sse):
+L(UW12):
+ subq $ffi_closure_FS, %rsp
+L(UW13):
+ /* cfi_adjust_cfa_offset(ffi_closure_FS) */
+
+ movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp)
+ movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp)
+ movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp)
+ movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp)
+ movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp)
+ movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp)
+ movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp)
+ movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp)
+ jmp L(sse_entry2)
+
+L(UW14):
+ENDF(C(ffi_go_closure_unix64_sse))
+
+ .balign 2
+ .globl C(ffi_go_closure_unix64)
+ FFI_HIDDEN(C(ffi_go_closure_unix64))
+
+C(ffi_go_closure_unix64):
+L(UW15):
+ subq $ffi_closure_FS, %rsp
+L(UW16):
+ /* cfi_adjust_cfa_offset(ffi_closure_FS) */
+L(sse_entry2):
+ movq %rdi, ffi_closure_OFS_G+0x00(%rsp)
+ movq %rsi, ffi_closure_OFS_G+0x08(%rsp)
+ movq %rdx, ffi_closure_OFS_G+0x10(%rsp)
+ movq %rcx, ffi_closure_OFS_G+0x18(%rsp)
+ movq %r8, ffi_closure_OFS_G+0x20(%rsp)
+ movq %r9, ffi_closure_OFS_G+0x28(%rsp)
+
+#ifdef __ILP32__
+ movl 4(%r10), %edi /* Load cif */
+ movl 8(%r10), %esi /* Load fun */
+ movl %r10d, %edx /* Load closure (user_data) */
+#else
+ movq 8(%r10), %rdi /* Load cif */
+ movq 16(%r10), %rsi /* Load fun */
+ movq %r10, %rdx /* Load closure (user_data) */
+#endif
+ jmp L(do_closure)
+
+L(UW17):
+ENDF(C(ffi_go_closure_unix64))
+
+/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */
+
+#ifdef __APPLE__
+.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
+EHFrame0:
+#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE)
+.section .eh_frame,"a",@unwind
+#else
+.section .eh_frame,"a",@progbits
+#endif
+
+#ifdef HAVE_AS_X86_PCREL
+# define PCREL(X) X - .
+#else
+# define PCREL(X) X@rel
+#endif
+
+/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */
+#define ADV(N, P) .byte 2, L(N)-L(P)
+
+ .balign 8
+L(CIE):
+ .set L(set0),L(ECIE)-L(SCIE)
+ .long L(set0) /* CIE Length */
+L(SCIE):
+ .long 0 /* CIE Identifier Tag */
+ .byte 1 /* CIE Version */
+ .ascii "zR\0" /* CIE Augmentation */
+ .byte 1 /* CIE Code Alignment Factor */
+ .byte 0x78 /* CIE Data Alignment Factor */
+ .byte 0x10 /* CIE RA Column */
+ .byte 1 /* Augmentation size */
+ .byte 0x1b /* FDE Encoding (pcrel sdata4) */
+ .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp offset 8 */
+ .byte 0x80+16, 1 /* DW_CFA_offset, %rip offset 1*-8 */
+ .balign 8
+L(ECIE):
+
+ .set L(set1),L(EFDE1)-L(SFDE1)
+ .long L(set1) /* FDE Length */
+L(SFDE1):
+ .long L(SFDE1)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW0)) /* Initial location */
+ .long L(UW4)-L(UW0) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW1, UW0)
+ .byte 0xc, 6, 32 /* DW_CFA_def_cfa, %rbp 32 */
+ .byte 0x80+6, 2 /* DW_CFA_offset, %rbp 2*-8 */
+ ADV(UW2, UW1)
+ .byte 0xa /* DW_CFA_remember_state */
+ .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp 8 */
+ .byte 0xc0+6 /* DW_CFA_restore, %rbp */
+ ADV(UW3, UW2)
+ .byte 0xb /* DW_CFA_restore_state */
+ .balign 8
+L(EFDE1):
+
+ .set L(set2),L(EFDE2)-L(SFDE2)
+ .long L(set2) /* FDE Length */
+L(SFDE2):
+ .long L(SFDE2)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW5)) /* Initial location */
+ .long L(UW7)-L(UW5) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW6, UW5)
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */
+ .balign 8
+L(EFDE2):
+
+ .set L(set3),L(EFDE3)-L(SFDE3)
+ .long L(set3) /* FDE Length */
+L(SFDE3):
+ .long L(SFDE3)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW8)) /* Initial location */
+ .long L(UW11)-L(UW8) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW9, UW8)
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */
+ ADV(UW10, UW9)
+ .byte 0xe, 8 /* DW_CFA_def_cfa_offset 8 */
+L(EFDE3):
+
+ .set L(set4),L(EFDE4)-L(SFDE4)
+ .long L(set4) /* FDE Length */
+L(SFDE4):
+ .long L(SFDE4)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW12)) /* Initial location */
+ .long L(UW14)-L(UW12) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW13, UW12)
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */
+ .balign 8
+L(EFDE4):
+
+ .set L(set5),L(EFDE5)-L(SFDE5)
+ .long L(set5) /* FDE Length */
+L(SFDE5):
+ .long L(SFDE5)-L(CIE) /* FDE CIE offset */
+ .long PCREL(L(UW15)) /* Initial location */
+ .long L(UW17)-L(UW15) /* Address range */
+ .byte 0 /* Augmentation size */
+ ADV(UW16, UW15)
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */
+ .balign 8
+L(EFDE5):
+#ifdef __APPLE__
+ .subsections_via_symbols
+#endif
+
+#endif /* __x86_64__ */
+#if defined __ELF__ && defined __linux__
+ .section .note.GNU-stack,"",@progbits
+#endif
diff -Nru orig/Modules/_ctypes/libffi_ios/x86/win64.S modified/Modules/_ctypes/libffi_ios/x86/win64.S
--- orig/Modules/_ctypes/libffi_ios/x86/win64.S 1970-01-01 08:00:00.000000000 +0800
+++ modified/Modules/_ctypes/libffi_ios/x86/win64.S 2015-03-12 21:33:31.000000000 +0800
@@ -0,0 +1,219 @@
+#define LIBFFI_ASM
+#include
+#include
+#include
+
+#if defined(HAVE_AS_CFI_PSEUDO_OP)
+ .cfi_sections .debug_frame
+#endif
+
+#define arg0 %rcx
+#define arg1 %rdx
+#define arg2 %r8
+#define arg3 %r9
+
+#ifdef SYMBOL_UNDERSCORE
+#define SYMBOL_NAME(name) _##name
+#else
+#define SYMBOL_NAME(name) name
+#endif
+
+.macro E which
+ .align 8
+ .org 0b + \which * 8
+.endm
+
+ .text
+
+/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10)
+
+ Bit o trickiness here -- FRAME is the base of the stack frame
+ for this function. This has been allocated by ffi_call. We also
+ deallocate some of the stack that has been alloca'd. */
+
+ .align 8
+ .globl ffi_call_win64
+
+ .seh_proc ffi_call_win64
+ffi_call_win64:
+ cfi_startproc
+ /* Set up the local stack frame and install it in rbp/rsp. */
+ movq (%rsp), %rax
+ movq %rbp, (arg1)
+ movq %rax, 8(arg1)
+ movq arg1, %rbp
+ cfi_def_cfa(%rbp, 16)
+ cfi_rel_offset(%rbp, 0)
+ .seh_pushreg %rbp
+ .seh_setframe %rbp, 0
+ .seh_endprologue
+ movq arg0, %rsp
+
+ movq arg2, %r10
+
+ /* Load all slots into both general and xmm registers. */
+ movq (%rsp), %rcx
+ movsd (%rsp), %xmm0
+ movq 8(%rsp), %rdx
+ movsd 8(%rsp), %xmm1
+ movq 16(%rsp), %r8
+ movsd 16(%rsp), %xmm2
+ movq 24(%rsp), %r9
+ movsd 24(%rsp), %xmm3
+
+ call *16(%rbp)
+
+ movl 24(%rbp), %ecx
+ movq 32(%rbp), %r8
+ leaq 0f(%rip), %r10
+ cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx
+ leaq (%r10, %rcx, 8), %r10
+ ja 99f
+ jmp *%r10
+
+/* Below, we're space constrained most of the time. Thus we eschew the
+ modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */
+.macro epilogue
+ leaveq
+ cfi_remember_state
+ cfi_def_cfa(%rsp, 8)
+ cfi_restore(%rbp)
+ ret
+ cfi_restore_state
+.endm
+
+ .align 8
+0:
+E FFI_TYPE_VOID
+ epilogue
+E FFI_TYPE_INT
+ movslq %eax, %rax
+ movq %rax, (%r8)
+ epilogue
+E FFI_TYPE_FLOAT
+ movss %xmm0, (%r8)
+ epilogue
+E FFI_TYPE_DOUBLE
+ movsd %xmm0, (%r8)
+ epilogue
+E FFI_TYPE_LONGDOUBLE
+ call abort
+E FFI_TYPE_UINT8
+ movzbl %al, %eax
+ movq %rax, (%r8)
+ epilogue
+E FFI_TYPE_SINT8
+ movsbq %al, %rax
+ jmp 98f
+E FFI_TYPE_UINT16
+ movzwl %ax, %eax
+ movq %rax, (%r8)
+ epilogue
+E FFI_TYPE_SINT16
+ movswq %ax, %rax
+ jmp 98f
+E FFI_TYPE_UINT32
+ movl %eax, %eax
+ movq %rax, (%r8)
+ epilogue
+E FFI_TYPE_SINT32
+ movslq %eax, %rax
+ movq %rax, (%r8)
+ epilogue
+E FFI_TYPE_UINT64
+98: movq %rax, (%r8)
+ epilogue
+E FFI_TYPE_SINT64
+ movq %rax, (%r8)
+ epilogue
+E FFI_TYPE_STRUCT
+ epilogue
+E FFI_TYPE_POINTER
+ movq %rax, (%r8)
+ epilogue
+E FFI_TYPE_COMPLEX
+ call abort
+E FFI_TYPE_SMALL_STRUCT_1B
+ movb %al, (%r8)
+ epilogue
+E FFI_TYPE_SMALL_STRUCT_2B
+ movw %ax, (%r8)
+ epilogue
+E FFI_TYPE_SMALL_STRUCT_4B
+ movl %eax, (%r8)
+ epilogue
+
+ .align 8
+99: call abort
+
+.purgem epilogue
+
+ cfi_endproc
+ .seh_endproc
+
+
+/* 32 bytes of outgoing register stack space, 8 bytes of alignment,
+ 16 bytes of result, 32 bytes of xmm registers. */
+#define ffi_clo_FS (32+8+16+32)
+#define ffi_clo_OFF_R (32+8)
+#define ffi_clo_OFF_X (32+8+16)
+
+ .align 8
+ .globl ffi_go_closure_win64
+
+ .seh_proc ffi_go_closure_win64
+ffi_go_closure_win64:
+ cfi_startproc
+ /* Save all integer arguments into the incoming reg stack space. */
+ movq arg0, 8(%rsp)
+ movq arg1, 16(%rsp)
+ movq arg2, 24(%rsp)
+ movq arg3, 32(%rsp)
+
+ movq 8(%r10), arg0 /* load cif */
+ movq 16(%r10), arg1 /* load fun */
+ movq %r10, arg2 /* closure is user_data */
+ jmp 0f
+ cfi_endproc
+ .seh_endproc
+
+ .align 8
+ .globl ffi_closure_win64
+
+ .seh_proc ffi_closure_win64
+ffi_closure_win64:
+ cfi_startproc
+ /* Save all integer arguments into the incoming reg stack space. */
+ movq arg0, 8(%rsp)
+ movq arg1, 16(%rsp)
+ movq arg2, 24(%rsp)
+ movq arg3, 32(%rsp)
+
+ movq FFI_TRAMPOLINE_SIZE(%r10), arg0 /* load cif */
+ movq FFI_TRAMPOLINE_SIZE+8(%r10), arg1 /* load fun */
+ movq FFI_TRAMPOLINE_SIZE+16(%r10), arg2 /* load user_data */
+0:
+ subq $ffi_clo_FS, %rsp
+ cfi_adjust_cfa_offset(ffi_clo_FS)
+ .seh_stackalloc ffi_clo_FS
+ .seh_endprologue
+
+ /* Save all sse arguments into the stack frame. */
+ movsd %xmm0, ffi_clo_OFF_X(%rsp)
+ movsd %xmm1, ffi_clo_OFF_X+8(%rsp)
+ movsd %xmm2, ffi_clo_OFF_X+16(%rsp)
+ movsd %xmm3, ffi_clo_OFF_X+24(%rsp)
+
+ leaq ffi_clo_OFF_R(%rsp), arg3
+ call ffi_closure_win64_inner
+
+ /* Load the result into both possible result registers. */
+ movq ffi_clo_OFF_R(%rsp), %rax
+ movsd ffi_clo_OFF_R(%rsp), %xmm0
+
+ addq $ffi_clo_FS, %rsp
+ cfi_adjust_cfa_offset(-ffi_clo_FS)
+ ret
+
+ cfi_endproc
+ .seh_endproc
diff -Nru orig/Modules/makesetup modified/Modules/makesetup
--- orig/Modules/makesetup 2015-02-03 19:49:04.000000000 +0800
+++ modified/Modules/makesetup 2015-02-28 13:35:58.000000000 +0800
@@ -176,6 +176,7 @@
*.c++) srcs="$srcs $arg";;
*.cxx) srcs="$srcs $arg";;
*.cpp) srcs="$srcs $arg";;
+ *.S) srcs="$srcs $arg";;
\$*) libs="$libs $arg"
cpps="$cpps $arg";;
*.*) echo 1>&2 "bad word $arg in $line"
@@ -205,6 +206,7 @@
*.C) obj=`basename $src .C`.o; cc='$(CXX)';;
*.cxx) obj=`basename $src .cxx`.o; cc='$(CXX)';;
*.cpp) obj=`basename $src .cpp`.o; cc='$(CXX)';;
+ *.S) obj=`basename $src .S`.o; cc='$(CC)';; # Assembly
*.m) obj=`basename $src .m`.o; cc='$(CC)';; # Obj-C
*) continue;;
esac
@@ -266,7 +268,7 @@
*) sed -e "
1i$NL/* Generated automatically from $config by makesetup. */
/MARKER 1/i$NL$EXTDECLS
-
+
/MARKER 2/i$NL$INITBITS
" $config >config.c
diff -Nru orig/config.sub modified/config.sub
--- orig/config.sub 2015-02-03 19:49:02.000000000 +0800
+++ modified/config.sub 2015-02-15 08:25:31.000000000 +0800
@@ -1512,6 +1512,8 @@
;;
-nacl*)
;;
+ -ios*)
+ ;;
-none)
;;
*)
diff -Nru orig/configure modified/configure
--- orig/configure 2015-02-03 19:49:02.000000000 +0800
+++ modified/configure 2015-03-14 23:11:10.000000000 +0800
@@ -3290,6 +3290,9 @@
*-*-cygwin*)
ac_sys_system=Cygwin
;;
+ *-apple-ios)
+ ac_sys_system=iOS
+ ;;
*)
# for now, limit cross builds to known configurations
MACHDEP="unknown"
@@ -3332,6 +3335,15 @@
_host_cpu=$host_cpu
esac
;;
+ *-apple-ios)
+ case "$host_cpu" in
+ arm*)
+ _host_cpu=arm
+ ;;
+ *)
+ _host_cpu=$host_cpu
+ esac
+ ;;
*-*-cygwin*)
_host_cpu=
;;
@@ -3411,6 +3423,9 @@
define_xopen_source=no;;
Darwin/1[0-9].*)
define_xopen_source=no;;
+ # On iOS, defining _POSIX_C_SOURCE also disables platform specific features.
+ iOS/*)
+ define_xopen_source=no;;
# On AIX 4 and 5.1, mbstate_t is defined only when _XOPEN_SOURCE == 500 but
# used in wcsnrtombs() and mbsnrtowcs() even if _XOPEN_SOURCE is not defined
# or has another value. By not (re)defining it, the defaults come in place.
@@ -5993,11 +6008,17 @@
fi
if test "$cross_compiling" = yes; then
- case "$READELF" in
- readelf|:)
- as_fn_error $? "readelf for the host is required for cross builds" "$LINENO" 5
- ;;
- esac
+ case "$host" in
+ *-apple-ios)
+ # readelf not required for iOS cross builds.
+ ;;
+ *)
+ case "$READELF" in
+ readelf|:)
+ as_fn_error $? "readelf for the host is required for cross builds" "$LINENO" 5
+ ;;
+ esac
+ esac
fi
@@ -16732,7 +16753,17 @@
echo "creating Modules/Setup.local" >&6
if test ! -f Modules/Setup.local
then
- echo "# Edit this file for local setup changes" >Modules/Setup.local
+ if test ! -f Modules/Setup.$_PYTHON_HOST_PLATFORM
+ then
+ echo "# Edit this file for local setup changes" >Modules/Setup.local
+ else
+ if test -f Modules/Setup.$_PYTHON_HOST_PLATFORM.local
+ then
+ cp Modules/Setup.$_PYTHON_HOST_PLATFORM.local Modules/Setup.local
+ else
+ cp Modules/Setup.$_PYTHON_HOST_PLATFORM Modules/Setup.local
+ fi
+ fi
fi
echo "creating Makefile" >&6
diff -Nru orig/configure.ac modified/configure.ac
--- orig/configure.ac 2015-02-25 21:21:37.000000000 +0800
+++ modified/configure.ac 2015-03-14 23:11:05.000000000 +0800
@@ -373,6 +373,9 @@
*-*-cygwin*)
ac_sys_system=Cygwin
;;
+ *-apple-ios)
+ ac_sys_system=iOS
+ ;;
*)
# for now, limit cross builds to known configurations
MACHDEP="unknown"
@@ -415,6 +418,15 @@
_host_cpu=$host_cpu
esac
;;
+ *-apple-ios)
+ case "$host_cpu" in
+ arm*)
+ _host_cpu=arm
+ ;;
+ *)
+ _host_cpu=$host_cpu
+ esac
+ ;;
*-*-cygwin*)
_host_cpu=
;;
@@ -491,6 +503,9 @@
define_xopen_source=no;;
Darwin/1@<:@0-9@:>@.*)
define_xopen_source=no;;
+ # On iOS, defining _POSIX_C_SOURCE also disables platform specific features.
+ iOS/*)
+ define_xopen_source=no;;
# On AIX 4 and 5.1, mbstate_t is defined only when _XOPEN_SOURCE == 500 but
# used in wcsnrtombs() and mbsnrtowcs() even if _XOPEN_SOURCE is not defined
# or has another value. By not (re)defining it, the defaults come in place.
@@ -1023,11 +1038,17 @@
AC_CHECK_TOOLS([READELF], [readelf], [:])
if test "$cross_compiling" = yes; then
- case "$READELF" in
- readelf|:)
- AC_MSG_ERROR([readelf for the host is required for cross builds])
- ;;
- esac
+ case "$host" in
+ *-apple-ios)
+ # readelf not required for iOS cross builds.
+ ;;
+ *)
+ case "$READELF" in
+ readelf|:)
+ AC_MSG_ERROR([readelf for the host is required for cross builds])
+ ;;
+ esac
+ esac
fi
AC_SUBST(READELF)
@@ -4826,7 +4847,17 @@
echo "creating Modules/Setup.local" >&AS_MESSAGE_FD
if test ! -f Modules/Setup.local
then
- echo "# Edit this file for local setup changes" >Modules/Setup.local
+ if test ! -f Modules/Setup.$_PYTHON_HOST_PLATFORM
+ then
+ echo "# Edit this file for local setup changes" >Modules/Setup.local
+ else
+ if test -f Modules/Setup.$_PYTHON_HOST_PLATFORM.local
+ then
+ cp Modules/Setup.$_PYTHON_HOST_PLATFORM.local Modules/Setup.local
+ else
+ cp Modules/Setup.$_PYTHON_HOST_PLATFORM Modules/Setup.local
+ fi
+ fi
fi
echo "creating Makefile" >&AS_MESSAGE_FD
diff -Nru orig/iOS/Makefile modified/iOS/Makefile
--- orig/iOS/Makefile 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/Makefile 2015-03-14 23:01:05.000000000 +0800
@@ -0,0 +1,168 @@
+# This is a "Meta-makefile" for building an iOS-compatible Python.framework.
+# It invokes the base makefile multiple times, once for each hardware platform
+# that needs to be supported; then it combines the products into a single "fat"
+# binary.
+
+IOS_DIR=$(shell pwd)
+
+# Extract the Python version from the master configure.ac
+PYTHON_VERSION=$(shell grep "m4_define.PYTHON_VERSION," ../configure.ac | sed "s/m4_define(PYTHON_VERSION, \(.*\))/\1/")
+
+# Build identifier of the build OS.
+BUILD_OS_ID=x86_64-apple-darwin$(shell uname -r)
+
+# IOS ARMV7 build commands and flags
+IOS_ARMV7_SDK_ROOT=$(shell xcrun --sdk iphoneos --show-sdk-path1)
+IOS_ARMV7_CC=$(shell xcrun -find -sdk iphoneos clang) -arch armv7 --sysroot=$(IOS_ARMV7_SDK_ROOT) -miphoneos-version-min=6.0
+IOS_ARMV7_LD=$(shell xcrun -find -sdk iphoneos ld) -arch armv7 --sysroot=$(IOS_ARMV7_SDK_ROOT) -miphoneos-version-min=6.0
+
+# IOS ARM64 build commands and flags
+IOS_ARM64_SDK_ROOT=$(shell xcrun --sdk iphoneos --show-sdk-path)
+IOS_ARM64_CC=$(shell xcrun -find -sdk iphoneos clang) -arch arm64 --sysroot=$(IOS_ARM64_SDK_ROOT) -miphoneos-version-min=6.0
+IOS_ARM64_LD=$(shell xcrun -find -sdk iphoneos ld) -arch arm64 --sysroot=$(IOS_ARM64_SDK_ROOT) -miphoneos-version-min=6.0
+
+# IOS_SIMULATOR_64 build commands and flags
+IOS_SIMULATOR_64_SDK_ROOT=$(shell xcrun --sdk iphonesimulator --show-sdk-path)
+IOS_SIMULATOR_64_CC=$(shell xcrun -find -sdk iphonesimulator clang) -arch x86_64 --sysroot=$(IOS_SIMULATOR_64_SDK_ROOT) -miphoneos-version-min=6.0
+IOS_SIMULATOR_64_LD=$(shell xcrun -find -sdk iphonesimulator ld) -arch x86_64 --sysroot=$(IOS_SIMULATOR_64_SDK_ROOT) -miphoneos-version-min=6.0
+
+# IOS_SIMULATOR build commands and flags
+# This is provided for completeness, but shouldn't be required any more
+IOS_SIMULATOR_SDK_ROOT=$(shell xcrun --sdk iphonesimulator --show-sdk-path)
+IOS_SIMULATOR_CC=$(shell xcrun -find -sdk iphonesimulator clang) -arch x86_64 --sysroot=$(IOS_SIMULATOR_SDK_ROOT) -miphoneos-version-min=6.0
+IOS_SIMULATOR_LD=$(shell xcrun -find -sdk iphonesimulator ld) -arch x86_64 --sysroot=$(IOS_SIMULATOR_SDK_ROOT) -miphoneos-version-min=6.0
+
+.PHONY: all clean
+
+all: ../Python.framework
+
+clean:
+ rm -rf build
+ rm -rf ../Python.framework
+ rm -rf XCode-sample/sample.xcodeproj/project.xcworkspace
+ rm -rf XCode-sample/sample.xcodeproj/xcuserdata
+ rm -rf XCode-sample/Python.framework
+
+build/host/bin/python$(PYTHON_VERSION):
+ # Configure and make the local build, providing compiled resources.
+ if test -f ../Makefile; then cd .. && make distclean; fi
+ cd .. && ./configure --prefix=$(IOS_DIR)/build/host --without-ensurepip
+ cd .. && make
+ cd .. && make install
+
+build/ios-simulator-x86_64.tar.gz: build/host/bin/python$(PYTHON_VERSION)
+ # Configure and build Simulator library
+ if test -f ../Makefile; then cd .. && make distclean; fi
+ cd .. && \
+ PATH=$(IOS_DIR)/build/host/bin:$(PATH) ./configure \
+ --host=x86_64-apple-ios --build=$(BUILD_OS_ID) \
+ CC="$(IOS_SIMULATOR_64_CC)" LD="$(IOS_SIMULATOR_64_LD)" \
+ --prefix=$(IOS_DIR)/build/ios-simulator-x86_64 \
+ --without-pymalloc --without-doc-strings --disable-ipv6 --without-ensurepip \
+ ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
+ cd .. && PATH=$(IOS_DIR)/build/host/bin:$(PATH) make
+ cd .. && PATH=$(IOS_DIR)/build/host/bin:$(PATH) make install
+ # Now pack away all the built artefacts so that the top level distclean doesn't touch them
+ cd build && tar zcvf ios-simulator-x86_64.tar.gz ios-simulator-x86_64 && rm -rf ios-simulator-x86_64
+
+build/ios-simulator-i386.tar.gz: build/host/bin/python$(PYTHON_VERSION)
+ # Configure and build Simulator library
+ if test -f ../Makefile; then cd .. && make distclean; fi
+ cd .. && \
+ PATH=$(IOS_DIR)/build/host/bin:$(PATH) ./configure \
+ --host=i386-apple-ios --build=$(BUILD_OS_ID) \
+ CC="$(IOS_SIMULATOR_CC)" LD="$(IOS_SIMULATOR_LD)" \
+ --prefix=$(IOS_DIR)/build/ios-simulator-i386 \
+ --without-pymalloc --without-doc-strings --disable-ipv6 --without-ensurepip \
+ ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
+ cd .. && PATH=$(IOS_DIR)/build/host/bin:$(PATH) make
+ cd .. && PATH=$(IOS_DIR)/build/host/bin:$(PATH) make install
+ # Now pack away all the built artefacts so that the top level distclean doesn't touch them
+ cd build && tar zcvf ios-simulator-i386.tar.gz ios-simulator-i386 && rm -rf ios-simulator-i386
+
+build/ios-armv7.tar.gz: build/host/bin/python$(PYTHON_VERSION)
+ # Configure and build ARMv7 library
+ if test -f ../Makefile; then cd .. && make distclean; fi
+ cd .. && \
+ PATH=$(IOS_DIR)/build/host/bin:$(PATH) ./configure
+ --host=armv7-apple-ios --build=$(BUILD_OS_ID) \
+ CC="$(IOS_ARMV7_CC)" LD="$(IOS_ARMV7_LD)" \
+ --prefix=$(IOS_DIR)/build/ios-armv7 \
+ --without-pymalloc --without-doc-strings --disable-ipv6 --without-ensurepip \
+ ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
+ cd .. && PATH=$(IOS_DIR)/build/host/bin:$(PATH) make
+ cd .. && PATH=$(IOS_DIR)/build/host/bin:$(PATH) make install
+ # Now pack away all the built artefacts so that the top level distclean doesn't touch them
+ cd build && tar zcvf ios-armv7.tar.gz ios-armv7 && rm -rf ios-armv7
+
+build/ios-arm64.tar.gz: build/host/bin/python$(PYTHON_VERSION)
+ # Configure and build ARM64 library
+ if test -f ../Makefile; then cd .. && make distclean; fi
+ cd .. && \
+ PATH=$(IOS_DIR)/build/host/bin:$(PATH) ./configure \
+ --host=aarch64-apple-ios --build=$(BUILD_OS_ID) \
+ CC="$(IOS_ARM64_CC)" LD="$(IOS_ARM64_LD)" \
+ --prefix=$(IOS_DIR)/build/ios-arm64 \
+ --without-pymalloc --without-doc-strings --disable-ipv6 --without-ensurepip \
+ ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
+ cd .. && PATH=$(IOS_DIR)/build/host/bin:$(PATH) make
+ cd .. && PATH=$(IOS_DIR)/build/host/bin:$(PATH) make install
+ # Now pack away all the built artefacts so that the top level distclean doesn't touch them
+ cd build && tar zcvf ios-arm64.tar.gz ios-arm64 && rm -rf ios-arm64
+
+# FIXME - ARMv7 build is currently broken due to libffi problems.
+# ../Python.framework: build/ios-simulator-x86_64.tar.gz build/ios-armv7.tar.gz build/ios-arm64.tar.gz
+../Python.framework: build/ios-simulator-x86_64.tar.gz build/ios-arm64.tar.gz
+ # Unpack the archived install directories
+ cd build && tar zxvf ios-simulator-x86_64.tar.gz
+ # cd build && tar zxvf ios-armv7.tar.gz
+ cd build && tar zxvf ios-arm64.tar.gz
+
+ # Create the framework directory and set it as the current version
+ mkdir -p ../Python.framework/Versions/$(PYTHON_VERSION)/
+ cd ../Python.framework/Versions && ln -fs $(PYTHON_VERSION) Current
+
+ # Copy the headers. The headers are the same for every platform, except for pyconfig.h;
+ # use the x86_64 simulator build because reasons.
+ cp -r build/ios-simulator-x86_64/include/python$(PYTHON_VERSION) ../Python.framework/Versions/$(PYTHON_VERSION)/Headers
+ # The only headers that change between versions is pyconfig.h; copy each supported version...
+ cp build/ios-simulator-x86_64/include/python$(PYTHON_VERSION)/pyconfig.h ../Python.framework/Versions/$(PYTHON_VERSION)/Headers/pyconfig-x86_64.h
+ cp build/ios-arm64/include/python$(PYTHON_VERSION)/pyconfig.h ../Python.framework/Versions/$(PYTHON_VERSION)/Headers/pyconfig-arm64.h
+ # cp build/ios-armv7/include/python$(PYTHON_VERSION)/pyconfig.h ../Python.framework/Versions/$(PYTHON_VERSION)/Headers/pyconfig-armv7.h
+ # ... and then copy in a master pyconfig.h to unify them all.
+ cp include/pyconfig.h ../Python.framework/Versions/$(PYTHON_VERSION)/Headers/pyconfig.h
+
+ # Link the current Headers to the top level
+ cd ../Python.framework && ln -fs Versions/Current/Headers
+
+ # Copy the standard library from the simulator build. Again, the
+ # pure Python standard library is the same on every platform;
+ # use the simulator version because reasons.
+ mkdir -p ../Python.framework/Versions/$(PYTHON_VERSION)/Resources
+ cp -r build/ios-simulator-x86_64/lib ../Python.framework/Versions/$(PYTHON_VERSION)/Resources
+
+ # Remove the pieces of the resources directory that aren't needed:
+ # libpython.a isn't needed in the lib directory
+ rm -f ../Python.framework/Versions/$(PYTHON_VERSION)/Resources/lib/libpython$(PYTHON_VERSION).a
+ # pkgconfig isn't needed on the device
+ rm -rf ../Python.framework/Versions/$(PYTHON_VERSION)/Resources/lib/pkgconfig
+ # Remove all the modules we don't need, and compress the rest.
+ cd ../Python.framework/Versions/$(PYTHON_VERSION)/Resources/lib/python$(PYTHON_VERSION) && \
+ rm -rf *test* lib* bsddb curses ensurepip hotshot idlelib tkinter turtledemo wsgiref \
+ config-$(PYTHON_VERSION) ctypes/test distutils/tests site-pacakges sqlite3/test
+ cd ../Python.framework/Versions/$(PYTHON_VERSION)/Resources/lib/python$(PYTHON_VERSION) && \
+ zip -r ../python$(subst .,,$(PYTHON_VERSION)).zip *
+ cd ../Python.framework/Versions/$(PYTHON_VERSION)/Resources/lib/python$(PYTHON_VERSION) && rm -rf *
+
+ # Link the current Resources to the top level
+ cd ../Python.framework && ln -fs Versions/Current/Resources
+
+ # Create a fat binary for the libPython library
+ xcrun lipo -create -output \
+ ../Python.framework/Versions/$(PYTHON_VERSION)/Python \
+ build/ios-simulator-x86_64/lib/libpython$(PYTHON_VERSION).a \
+ build/ios-arm64/lib/libpython$(PYTHON_VERSION).a;
+ # build/python/ios-armv7/lib/libpython$(PYTHON_VERSION).a;
+
+ # Link the current Python library to the top level
+ cd ../Python.framework && ln -fs Versions/Current/Python
diff -Nru orig/iOS/README modified/iOS/README
--- orig/iOS/README 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/README 2015-03-14 23:20:19.000000000 +0800
@@ -0,0 +1,131 @@
+====================
+Python on iOS README
+====================
+
+:Authors:
+ Russell Keith-Magee (2015)
+
+:Version: 3.4.2
+
+This document provides a overview of eccentricities of building and using
+Python on iOS.
+
+Build instructions
+==================
+
+The iOS build must be run on an Mac with XCode installed. To build the iOS
+framework, unpack the Python sources, move into the iOS subdirectory, and
+run ``make``. There are no configuration options to this build process -
+it will use XCode utilities to identify the location of compilers,
+resource directories, and so on.
+
+The build process will configure and build Python 4 times, producing:
+
+ * A "host" version of Python
+ * A version of Python compiled for the x86-64 iOS Simulator
+ * A version of Python compiled for ARMv7 iOS devices
+ * A version of Python compiled for ARM64 iOS devices
+
+Build products will be "installed" into iOS/build. The built products will
+then be combined into a single "fat" ``Python.framework`` that can be added to
+an XCode project. The resulting framework will be located in the root
+directory of the Python source tree.
+
+A ``make clean`` target also exists to clean out all build products.
+
+iOS-specific details
+====================
+
+* ``import sys; sys.platform`` will report as "ios", regardless of whether you are on
+ a simulator or a real platform.
+
+* ``import platform; platform.machine()`` will return the device identifier.
+ For example, an iPhone 5S will return "iPhone6,2"
+
+* The following modules are not currently supported:
+
+ - ``bsddb``
+ - ``bz2``
+ - ``curses``
+ - ``dbm``
+ - ``gdbm``
+ - ``hotshot``
+ - ``idlelib``
+ - ``lzma``
+ - ``nis``
+ - ``ossaudiodev``
+ - ``readline``
+ - ``spwd``
+ - ``sqlite3``
+ - ``ssl``
+ - ``tkinter``
+ - ``turtledemo``
+ - ``wsgiref``
+
+* Due to limitations in using dynamic loading on iOS, binary Python modules must be
+ statically-linked into the exectuable. The framework package produced by the iOS
+ ``make install`` statically links all the supported standard library modules.
+ If you have a third-party Python binary module, you'll need to incorporate the
+ source files for that module into the sources for your own app.
+
+ If you want to add or remove a binary module from the set that is included in the
+ Python library, you can do so by providing module setup files for each platform.
+ There are three default module configuration files:
+
+ - ``Modules/Setup.ios-aarch64`` for ARM64 iOS builds
+ - ``Modules/Setup.ios-arm`` for ARMv7 iOS builds
+ - ``Modules/Setup.ios-x86_64`` for x86_64 iOS simulator builds
+
+ If you copy these files to a ``.local`` version (e.g.,
+ ``Modules/Setup.ios-aarch64.local``), the local version will override the
+ default. You can then make modifications to the modules that will be included
+ in the iOS framework, and the flags passed to the compiler when compiling those
+ modules.
+
+Adding Python to an iOS project
+===============================
+
+The iOS subdirectory contains a sample XCode 6.1 project to demonstrate how
+Python can be added to an iOS project. After building the Python iOS framework,
+copy it into the ``iOS/XCode-sample`` directory. You should end up with a directory
+structure that looks like this::
+
+ XCode-sample/
+ Python.framework/ - Manually copied into the project
+ app/
+ sample/
+ __init__.py
+ main.py - The Python script to be executed
+ app_packages/ - A directory that will be added to the `PYTHONPATH` at runtime
+ sample
+ Images.xcassets
+ en.lproj
+ main.c - The main() definition for the iOS application
+ sample-Info.plist
+ sample-Prefix.pch
+ sample.xcodeproj - The XCode project file
+
+If you open the project file is project and run it, you should get output
+similar to the following::
+
+ 2015-03-14 22:15:19.595 sample[84454:22100187] PythonHome is: /Users/rkm/Library/Developer/CoreSimulator/Devices/19FE988F-E5C3-4A6C-8752-C12DE9BF079D/data/Containers/Bundle/Application/A949B323-FD20-4C76-B370-99AFF294E9D5/sample.app
+ 2015-03-14 22:15:19.597 sample[84454:22100187] Initializing Python runtime
+ 2015-03-14 22:15:19.758 sample[84454:22100187] Running /Users/rkm/Library/Developer/CoreSimulator/Devices/19FE988F-E5C3-4A6C-8752-C12DE9BF079D/data/Containers/Bundle/Application/A949B323-FD20-4C76-B370-99AFF294E9D5/sample.app/app/sample/main.py
+ Hello, World.
+ 2015-03-14 22:15:19.792 sample[84454:22100187] Leaving
+
+You can now modify the provide Python source code, import and use
+code from the Python standard library, and add third-party modules to
+app_packages.
+
+The sample app is a console-only app, so it isn't of any real practical use.
+Python can be embedded into any Objective-C project using the normal Python
+APIs for embedding; but if you want to write a full iOS app in Python, or
+you want to access iOS services from within embedded code, you'll need to
+bridge between the Objective-C environment and the Python environment.
+This briding isn't something that Python does out of the box; you'll need
+to use a third-party library like `Rubicon ObjC`_, `Pyobjus`_ or `PyObjC`_.
+
+.. _Rubicon ObjC: http://pybee.org/rubicon
+.. _Pyobjus: http://pyobjus.readthedocs.org/
+.. _PyObjC: https://pythonhosted.org/pyobjc/
Binary files orig/iOS/XCode-sample/.DS_Store and modified/iOS/XCode-sample/.DS_Store differ
diff -Nru orig/iOS/XCode-sample/app/sample/main.py modified/iOS/XCode-sample/app/sample/main.py
--- orig/iOS/XCode-sample/app/sample/main.py 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/XCode-sample/app/sample/main.py 2015-03-14 22:02:09.000000000 +0800
@@ -0,0 +1,3 @@
+
+if __name__ == '__main__':
+ print("Hello, World.")
diff -Nru orig/iOS/XCode-sample/app_packages/README modified/iOS/XCode-sample/app_packages/README
--- orig/iOS/XCode-sample/app_packages/README 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/XCode-sample/app_packages/README 2015-03-14 21:57:52.000000000 +0800
@@ -0,0 +1 @@
+This directory exists so that 3rd party packages can be installed here.
\ No newline at end of file
diff -Nru orig/iOS/XCode-sample/sample/Images.xcassets/AppIcon.appiconset/Contents.json modified/iOS/XCode-sample/sample/Images.xcassets/AppIcon.appiconset/Contents.json
--- orig/iOS/XCode-sample/sample/Images.xcassets/AppIcon.appiconset/Contents.json 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/XCode-sample/sample/Images.xcassets/AppIcon.appiconset/Contents.json 2015-03-14 21:57:52.000000000 +0800
@@ -0,0 +1,53 @@
+{
+ "images" : [
+ {
+ "idiom" : "iphone",
+ "size" : "29x29",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "40x40",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "60x60",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "29x29",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "29x29",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "40x40",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "40x40",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "76x76",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "76x76",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+}
\ No newline at end of file
diff -Nru orig/iOS/XCode-sample/sample/Images.xcassets/LaunchImage.launchimage/Contents.json modified/iOS/XCode-sample/sample/Images.xcassets/LaunchImage.launchimage/Contents.json
--- orig/iOS/XCode-sample/sample/Images.xcassets/LaunchImage.launchimage/Contents.json 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/XCode-sample/sample/Images.xcassets/LaunchImage.launchimage/Contents.json 2015-03-14 21:57:52.000000000 +0800
@@ -0,0 +1,51 @@
+{
+ "images" : [
+ {
+ "orientation" : "portrait",
+ "idiom" : "iphone",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "iphone",
+ "subtype" : "retina4",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "1x"
+ },
+ {
+ "orientation" : "landscape",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "1x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "landscape",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+}
\ No newline at end of file
diff -Nru orig/iOS/XCode-sample/sample/en.lproj/InfoPlist.strings modified/iOS/XCode-sample/sample/en.lproj/InfoPlist.strings
--- orig/iOS/XCode-sample/sample/en.lproj/InfoPlist.strings 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/XCode-sample/sample/en.lproj/InfoPlist.strings 2015-03-14 21:57:52.000000000 +0800
@@ -0,0 +1 @@
+/* Localized versions of Info.plist keys */
diff -Nru orig/iOS/XCode-sample/sample/main.m modified/iOS/XCode-sample/sample/main.m
--- orig/iOS/XCode-sample/sample/main.m 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/XCode-sample/sample/main.m 2015-03-14 22:34:08.000000000 +0800
@@ -0,0 +1,97 @@
+//
+// main.m
+// A main module for starting Python projects under iOS.
+//
+
+#import
+#import
+#include
+#include
+
+int main(int argc, char *argv[]) {
+ int ret = 0;
+ unsigned int i;
+ wchar_t* python_home;
+ wchar_t** python_argv;
+ @autoreleasepool {
+
+ NSString * resourcePath = [[NSBundle mainBundle] resourcePath];
+
+ // Special environment to prefer .pyo, and don't write bytecode if .py are found
+ // because the process will not have write attribute on the device.
+ putenv("PYTHONOPTIMIZE=2");
+ putenv("PYTHONDONTWRITEBYTECODE=1");
+ putenv("PYTHONNOUSERSITE=1");
+
+ NSString *python_path = [NSString stringWithFormat:@"PYTHONPATH=%@/src:%@/app_packages",
+ resourcePath, resourcePath, nil];
+ putenv((char *)[python_path UTF8String]);
+ // putenv("PYTHONVERBOSE=1");
+
+ NSLog(@"PythonHome is: %s", [resourcePath UTF8String]);
+ python_home = _Py_char2wchar([resourcePath cStringUsingEncoding:NSUTF8StringEncoding], NULL);
+ Py_SetPythonHome(python_home);
+
+ NSLog(@"Initializing Python runtime");
+ Py_Initialize();
+
+ python_argv = PyMem_RawMalloc(sizeof(wchar_t*) * argc);
+ for (i = 0; i < argc; i++) {
+ python_argv[i] = _Py_char2wchar(argv[i], NULL);
+ }
+ PySys_SetArgv(argc, python_argv);
+
+ // If other modules are using thread, we need to initialize them before.
+ PyEval_InitThreads();
+
+ // Search and start main.py
+ const char * prog = [
+ [[NSBundle mainBundle] pathForResource:@"app/sample/main"
+ ofType:@"py"] cStringUsingEncoding:NSUTF8StringEncoding];
+ NSLog(@"Running %s", prog);
+
+ @try {
+ FILE* fd = fopen(prog, "r");
+ if (fd == NULL) {
+ ret = 1;
+ NSLog(@"Unable to open main.py, abort.");
+ } else {
+ ret = PyRun_SimpleFileEx(fd, prog, 1);
+ if (ret != 0) {
+ NSLog(@"Application quit abnormally!");
+ } else {
+ // In a normal iOS application, the following line is what
+ // actually runs the application. It requires that the
+ // Objective-C runtime environment has a class named
+ // "PythonAppDelegate". This sample project doesn't define
+ // one, because Objective-C bridging isn't something
+ // Python does out of the box. You'll need to use
+ // a library like Rubicon-ObjC [1], Pyobjus [2] or
+ // PyObjC [3] if you want to run an *actual* iOS app.
+ // [1] http://pybee.org/rubicon
+ // [2] http://pyobjus.readthedocs.org/
+ // [3] https://pythonhosted.org/pyobjc/
+
+ // UIApplicationMain(argc, argv, nil, @"PythonAppDelegate");
+ }
+ }
+ }
+ @catch (NSException *exception) {
+ NSLog(@"Python runtime error: %@", [exception reason]);
+ }
+ @finally {
+ Py_Finalize();
+ }
+
+ error:
+ if (python_argv) {
+ for (i = 0; i < argc; i++)
+ PyMem_RawFree(python_argv[i]);
+ PyMem_RawFree(python_argv);
+ }
+ NSLog(@"Leaving");
+ }
+
+ exit(ret);
+ return ret;
+}
\ No newline at end of file
diff -Nru orig/iOS/XCode-sample/sample/sample-Info.plist modified/iOS/XCode-sample/sample/sample-Info.plist
--- orig/iOS/XCode-sample/sample/sample-Info.plist 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/XCode-sample/sample/sample-Info.plist 2015-03-14 21:57:52.000000000 +0800
@@ -0,0 +1,45 @@
+
+
+
+
+ CFBundleDevelopmentRegion
+ en
+ CFBundleDisplayName
+ ${PRODUCT_NAME}
+ CFBundleExecutable
+ ${EXECUTABLE_NAME}
+ CFBundleIdentifier
+ com.example.${PRODUCT_NAME:rfc1034identifier}
+ CFBundleInfoDictionaryVersion
+ 6.0
+ CFBundleName
+ ${PRODUCT_NAME}
+ CFBundlePackageType
+ APPL
+ CFBundleShortVersionString
+ 1.0
+ CFBundleSignature
+ ????
+ CFBundleVersion
+ 1.0
+ LSRequiresIPhoneOS
+
+ UIRequiredDeviceCapabilities
+
+ armv7
+
+ UISupportedInterfaceOrientations
+
+ UIInterfaceOrientationPortrait
+ UIInterfaceOrientationLandscapeLeft
+ UIInterfaceOrientationLandscapeRight
+
+ UISupportedInterfaceOrientations~ipad
+
+ UIInterfaceOrientationPortrait
+ UIInterfaceOrientationPortraitUpsideDown
+ UIInterfaceOrientationLandscapeLeft
+ UIInterfaceOrientationLandscapeRight
+
+
+
\ No newline at end of file
diff -Nru orig/iOS/XCode-sample/sample/sample-Prefix.pch modified/iOS/XCode-sample/sample/sample-Prefix.pch
--- orig/iOS/XCode-sample/sample/sample-Prefix.pch 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/XCode-sample/sample/sample-Prefix.pch 2015-03-14 21:57:52.000000000 +0800
@@ -0,0 +1,16 @@
+//
+// Prefix header
+//
+// The contents of this file are implicitly included at the beginning of every source file.
+//
+
+#import
+
+#ifndef __IPHONE_3_0
+#warning "This project uses features only available in iOS SDK 3.0 and later."
+#endif
+
+#ifdef __OBJC__
+ #import
+ #import
+#endif
\ No newline at end of file
diff -Nru orig/iOS/XCode-sample/sample.xcodeproj/project.pbxproj modified/iOS/XCode-sample/sample.xcodeproj/project.pbxproj
--- orig/iOS/XCode-sample/sample.xcodeproj/project.pbxproj 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/XCode-sample/sample.xcodeproj/project.pbxproj 2015-03-14 22:03:22.000000000 +0800
@@ -0,0 +1,373 @@
+// !$*UTF8*$!
+{
+ archiveVersion = 1;
+ classes = {
+ };
+ objectVersion = 46;
+ objects = {
+
+/* Begin PBXBuildFile section */
+ 605737FC1AB4772A00F05C6F /* lib in Resources */ = {isa = PBXBuildFile; fileRef = 60796F35191A76C400A9926B /* lib */; };
+ 60796EE619190F4100A9926B /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 60796EE519190F4100A9926B /* Foundation.framework */; };
+ 60796EE819190F4100A9926B /* CoreGraphics.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 60796EE719190F4100A9926B /* CoreGraphics.framework */; };
+ 60796EEA19190F4100A9926B /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 60796EE919190F4100A9926B /* UIKit.framework */; };
+ 60796EF019190F4100A9926B /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 60796EEE19190F4100A9926B /* InfoPlist.strings */; };
+ 60796EF219190F4100A9926B /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 60796EF119190F4100A9926B /* main.m */; };
+ 60796EF819190F4100A9926B /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 60796EF719190F4100A9926B /* Images.xcassets */; };
+ 60796F1919190FBB00A9926B /* libz.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 60796F1819190FBB00A9926B /* libz.dylib */; };
+ 60796F201919174D00A9926B /* libsqlite3.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 60796F1F1919174D00A9926B /* libsqlite3.dylib */; };
+ 60796F2C1919C70800A9926B /* Python.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 60796F2B1919C70800A9926B /* Python.framework */; };
+ 60796F39191CDBBA00A9926B /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 60796F38191CDBBA00A9926B /* CoreFoundation.framework */; };
+ 60F0BABE191FC83F006EC268 /* app in Resources */ = {isa = PBXBuildFile; fileRef = 60F0BABD191FC83F006EC268 /* app */; };
+ 60F0BAC0191FC868006EC268 /* app_packages in Resources */ = {isa = PBXBuildFile; fileRef = 60F0BABF191FC868006EC268 /* app_packages */; };
+/* End PBXBuildFile section */
+
+/* Begin PBXFileReference section */
+ 60796EE219190F4100A9926B /* sample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = sample.app; sourceTree = BUILT_PRODUCTS_DIR; };
+ 60796EE519190F4100A9926B /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; };
+ 60796EE719190F4100A9926B /* CoreGraphics.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreGraphics.framework; path = System/Library/Frameworks/CoreGraphics.framework; sourceTree = SDKROOT; };
+ 60796EE919190F4100A9926B /* UIKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = UIKit.framework; path = System/Library/Frameworks/UIKit.framework; sourceTree = SDKROOT; };
+ 60796EED19190F4100A9926B /* sample-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "sample-Info.plist"; sourceTree = ""; };
+ 60796EEF19190F4100A9926B /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; };
+ 60796EF119190F4100A9926B /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; };
+ 60796EF319190F4100A9926B /* sample-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "sample-Prefix.pch"; sourceTree = ""; };
+ 60796EF719190F4100A9926B /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; };
+ 60796F1819190FBB00A9926B /* libz.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libz.dylib; path = usr/lib/libz.dylib; sourceTree = SDKROOT; };
+ 60796F1F1919174D00A9926B /* libsqlite3.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libsqlite3.dylib; path = usr/lib/libsqlite3.dylib; sourceTree = SDKROOT; };
+ 60796F2B1919C70800A9926B /* Python.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; path = Python.framework; sourceTree = ""; };
+ 60796F35191A76C400A9926B /* lib */ = {isa = PBXFileReference; lastKnownFileType = folder; name = lib; path = Python.framework/Resources/lib; sourceTree = ""; };
+ 60796F38191CDBBA00A9926B /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; };
+ 60F0BABD191FC83F006EC268 /* app */ = {isa = PBXFileReference; lastKnownFileType = folder; path = app; sourceTree = SOURCE_ROOT; };
+ 60F0BABF191FC868006EC268 /* app_packages */ = {isa = PBXFileReference; lastKnownFileType = folder; path = app_packages; sourceTree = SOURCE_ROOT; };
+/* End PBXFileReference section */
+
+/* Begin PBXFrameworksBuildPhase section */
+ 60796EDF19190F4100A9926B /* Frameworks */ = {
+ isa = PBXFrameworksBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 60796F39191CDBBA00A9926B /* CoreFoundation.framework in Frameworks */,
+ 60796F2C1919C70800A9926B /* Python.framework in Frameworks */,
+ 60796F201919174D00A9926B /* libsqlite3.dylib in Frameworks */,
+ 60796F1919190FBB00A9926B /* libz.dylib in Frameworks */,
+ 60796EE819190F4100A9926B /* CoreGraphics.framework in Frameworks */,
+ 60796EEA19190F4100A9926B /* UIKit.framework in Frameworks */,
+ 60796EE619190F4100A9926B /* Foundation.framework in Frameworks */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+/* End PBXFrameworksBuildPhase section */
+
+/* Begin PBXGroup section */
+ 60796ED919190F4100A9926B = {
+ isa = PBXGroup;
+ children = (
+ 60796F1A19190FEB00A9926B /* Python */,
+ 60796EEB19190F4100A9926B /* sample */,
+ 60796EE419190F4100A9926B /* Frameworks */,
+ 60796EE319190F4100A9926B /* Products */,
+ );
+ sourceTree = "";
+ };
+ 60796EE319190F4100A9926B /* Products */ = {
+ isa = PBXGroup;
+ children = (
+ 60796EE219190F4100A9926B /* sample.app */,
+ );
+ name = Products;
+ sourceTree = "";
+ };
+ 60796EE419190F4100A9926B /* Frameworks */ = {
+ isa = PBXGroup;
+ children = (
+ 60796F1F1919174D00A9926B /* libsqlite3.dylib */,
+ 60796F1819190FBB00A9926B /* libz.dylib */,
+ 60796F2B1919C70800A9926B /* Python.framework */,
+ 60796EE519190F4100A9926B /* Foundation.framework */,
+ 60796F38191CDBBA00A9926B /* CoreFoundation.framework */,
+ 60796EE719190F4100A9926B /* CoreGraphics.framework */,
+ 60796EE919190F4100A9926B /* UIKit.framework */,
+ );
+ name = Frameworks;
+ sourceTree = "";
+ };
+ 60796EEB19190F4100A9926B /* sample */ = {
+ isa = PBXGroup;
+ children = (
+ 60F0BABF191FC868006EC268 /* app_packages */,
+ 60F0BABD191FC83F006EC268 /* app */,
+ 60796EF719190F4100A9926B /* Images.xcassets */,
+ 60796EEC19190F4100A9926B /* Supporting Files */,
+ );
+ path = sample;
+ sourceTree = "";
+ };
+ 60796EEC19190F4100A9926B /* Supporting Files */ = {
+ isa = PBXGroup;
+ children = (
+ 60796EED19190F4100A9926B /* sample-Info.plist */,
+ 60796EEE19190F4100A9926B /* InfoPlist.strings */,
+ 60796EF119190F4100A9926B /* main.m */,
+ 60796EF319190F4100A9926B /* sample-Prefix.pch */,
+ );
+ name = "Supporting Files";
+ sourceTree = "";
+ };
+ 60796F1A19190FEB00A9926B /* Python */ = {
+ isa = PBXGroup;
+ children = (
+ 60796F35191A76C400A9926B /* lib */,
+ );
+ name = Python;
+ sourceTree = "";
+ };
+/* End PBXGroup section */
+
+/* Begin PBXNativeTarget section */
+ 60796EE119190F4100A9926B /* sample */ = {
+ isa = PBXNativeTarget;
+ buildConfigurationList = 60796F0E19190F4100A9926B /* Build configuration list for PBXNativeTarget "sample" */;
+ buildPhases = (
+ 60796F2F1919C7E700A9926B /* Refresh Python source */,
+ 60796EDE19190F4100A9926B /* Sources */,
+ 60796EDF19190F4100A9926B /* Frameworks */,
+ 60796EE019190F4100A9926B /* Resources */,
+ );
+ buildRules = (
+ );
+ dependencies = (
+ );
+ name = sample;
+ productName = sample;
+ productReference = 60796EE219190F4100A9926B /* sample.app */;
+ productType = "com.apple.product-type.application";
+ };
+/* End PBXNativeTarget section */
+
+/* Begin PBXProject section */
+ 60796EDA19190F4100A9926B /* Project object */ = {
+ isa = PBXProject;
+ attributes = {
+ CLASSPREFIX = Py;
+ LastUpgradeCheck = 0510;
+ ORGANIZATIONNAME = "Example Corp";
+ };
+ buildConfigurationList = 60796EDD19190F4100A9926B /* Build configuration list for PBXProject "sample" */;
+ compatibilityVersion = "Xcode 3.2";
+ developmentRegion = English;
+ hasScannedForEncodings = 0;
+ knownRegions = (
+ en,
+ );
+ mainGroup = 60796ED919190F4100A9926B;
+ productRefGroup = 60796EE319190F4100A9926B /* Products */;
+ projectDirPath = "";
+ projectRoot = "";
+ targets = (
+ 60796EE119190F4100A9926B /* sample */,
+ );
+ };
+/* End PBXProject section */
+
+/* Begin PBXResourcesBuildPhase section */
+ 60796EE019190F4100A9926B /* Resources */ = {
+ isa = PBXResourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 60F0BABE191FC83F006EC268 /* app in Resources */,
+ 605737FC1AB4772A00F05C6F /* lib in Resources */,
+ 60796EF019190F4100A9926B /* InfoPlist.strings in Resources */,
+ 60796EF819190F4100A9926B /* Images.xcassets in Resources */,
+ 60F0BAC0191FC868006EC268 /* app_packages in Resources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+/* End PBXResourcesBuildPhase section */
+
+/* Begin PBXShellScriptBuildPhase section */
+ 60796F2F1919C7E700A9926B /* Refresh Python source */ = {
+ isa = PBXShellScriptBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ inputPaths = (
+ );
+ name = "Refresh Python source";
+ outputPaths = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ shellPath = /bin/sh;
+ shellScript = "rsync -pvtrL --exclude .hg --exclude .svn --exclude .git $PROJECT_DIR/Python.framework/Resources/lib $BUILT_PRODUCTS_DIR/$CONTENTS_FOLDER_PATH\nrsync -pvtrL --exclude .hg --exclude .svn --exclude .git $PROJECT_DIR/Python.framework/Resources/include $BUILT_PRODUCTS_DIR/$CONTENTS_FOLDER_PATH\nrsync -pvtrL --exclude .hg --exclude .svn --exclude .git $PROJECT_DIR/app $BUILT_PRODUCTS_DIR/$CONTENTS_FOLDER_PATH\nrsync -pvtrL --exclude .hg --exclude .svn --exclude .git $PROJECT_DIR/app_packages $BUILT_PRODUCTS_DIR/$CONTENTS_FOLDER_PATH\n";
+ };
+/* End PBXShellScriptBuildPhase section */
+
+/* Begin PBXSourcesBuildPhase section */
+ 60796EDE19190F4100A9926B /* Sources */ = {
+ isa = PBXSourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 60796EF219190F4100A9926B /* main.m in Sources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+/* End PBXSourcesBuildPhase section */
+
+/* Begin PBXVariantGroup section */
+ 60796EEE19190F4100A9926B /* InfoPlist.strings */ = {
+ isa = PBXVariantGroup;
+ children = (
+ 60796EEF19190F4100A9926B /* en */,
+ );
+ name = InfoPlist.strings;
+ sourceTree = "";
+ };
+/* End PBXVariantGroup section */
+
+/* Begin XCBuildConfiguration section */
+ 60796F0C19190F4100A9926B /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ ARCHS = "$(ARCHS_STANDARD_32_BIT)";
+ CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
+ CLANG_CXX_LIBRARY = "libc++";
+ CLANG_ENABLE_MODULES = YES;
+ CLANG_ENABLE_OBJC_ARC = YES;
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
+ COPY_PHASE_STRIP = NO;
+ GCC_C_LANGUAGE_STANDARD = gnu99;
+ GCC_DYNAMIC_NO_PIC = NO;
+ GCC_OPTIMIZATION_LEVEL = 0;
+ GCC_PREPROCESSOR_DEFINITIONS = (
+ "DEBUG=1",
+ "$(inherited)",
+ );
+ GCC_SYMBOLS_PRIVATE_EXTERN = NO;
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ IPHONEOS_DEPLOYMENT_TARGET = 7.1;
+ ONLY_ACTIVE_ARCH = YES;
+ SDKROOT = iphoneos;
+ TARGETED_DEVICE_FAMILY = "1,2";
+ };
+ name = Debug;
+ };
+ 60796F0D19190F4100A9926B /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ ARCHS = "$(ARCHS_STANDARD_32_BIT)";
+ CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
+ CLANG_CXX_LIBRARY = "libc++";
+ CLANG_ENABLE_MODULES = YES;
+ CLANG_ENABLE_OBJC_ARC = YES;
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
+ COPY_PHASE_STRIP = YES;
+ ENABLE_NS_ASSERTIONS = NO;
+ GCC_C_LANGUAGE_STANDARD = gnu99;
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ IPHONEOS_DEPLOYMENT_TARGET = 7.1;
+ SDKROOT = iphoneos;
+ TARGETED_DEVICE_FAMILY = "1,2";
+ VALIDATE_PRODUCT = YES;
+ };
+ name = Release;
+ };
+ 60796F0F19190F4100A9926B /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ARCHS = "$(ARCHS_STANDARD)";
+ ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
+ ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage;
+ FRAMEWORK_SEARCH_PATHS = (
+ "$(inherited)",
+ "$(PROJECT_DIR)",
+ );
+ GCC_PRECOMPILE_PREFIX_HEADER = YES;
+ GCC_PREFIX_HEADER = "sample/sample-Prefix.pch";
+ HEADER_SEARCH_PATHS = (
+ "$(inherited)",
+ /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include,
+ "\"$(PROJECT_DIR)/Python.framework/Resources/include/python2.7\"",
+ );
+ INFOPLIST_FILE = "sample/sample-Info.plist";
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ USER_HEADER_SEARCH_PATHS = include/python2.7;
+ WRAPPER_EXTENSION = app;
+ };
+ name = Debug;
+ };
+ 60796F1019190F4100A9926B /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ARCHS = "$(ARCHS_STANDARD)";
+ ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
+ ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage;
+ FRAMEWORK_SEARCH_PATHS = (
+ "$(inherited)",
+ "$(PROJECT_DIR)",
+ );
+ GCC_PRECOMPILE_PREFIX_HEADER = YES;
+ GCC_PREFIX_HEADER = "sample/sample-Prefix.pch";
+ HEADER_SEARCH_PATHS = (
+ "$(inherited)",
+ /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include,
+ "\"$(PROJECT_DIR)/Python.framework/Resources/include/python2.7\"",
+ );
+ INFOPLIST_FILE = "sample/sample-Info.plist";
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ USER_HEADER_SEARCH_PATHS = include/python2.7;
+ WRAPPER_EXTENSION = app;
+ };
+ name = Release;
+ };
+/* End XCBuildConfiguration section */
+
+/* Begin XCConfigurationList section */
+ 60796EDD19190F4100A9926B /* Build configuration list for PBXProject "sample" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 60796F0C19190F4100A9926B /* Debug */,
+ 60796F0D19190F4100A9926B /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ 60796F0E19190F4100A9926B /* Build configuration list for PBXNativeTarget "sample" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 60796F0F19190F4100A9926B /* Debug */,
+ 60796F1019190F4100A9926B /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+/* End XCConfigurationList section */
+ };
+ rootObject = 60796EDA19190F4100A9926B /* Project object */;
+}
diff -Nru orig/iOS/include/pyconfig.h modified/iOS/include/pyconfig.h
--- orig/iOS/include/pyconfig.h 1970-01-01 08:00:00.000000000 +0800
+++ modified/iOS/include/pyconfig.h 2015-03-14 21:42:14.000000000 +0800
@@ -0,0 +1,15 @@
+#ifdef __arm__
+#include "pyconfig-armv7.h"
+#endif
+
+#ifdef __arm64__
+#include "pyconfig-arm64.h"
+#endif
+
+#ifdef __i386__
+#include "pyconfig-i386.h"
+#endif
+
+#ifdef __x86_64__
+#include "pyconfig-x86_64.h"
+#endif
\ No newline at end of file
diff -Nru orig/setup.py modified/setup.py
--- orig/setup.py 2015-02-03 19:49:05.000000000 +0800
+++ modified/setup.py 2015-02-28 17:26:54.000000000 +0800
@@ -252,7 +252,11 @@
build_ext.build_extensions(self)
- longest = max([len(e.name) for e in self.extensions])
+ if missing:
+ longest = max([len(m) for m in missing])
+ else:
+ longest = 0
+
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))