Merge branch 'intrinsics_backport' of https://github.com/TazeTSchnitzel/php-src

* 'intrinsics_backport' of https://github.com/TazeTSchnitzel/php-src:
  Use checked add/sub intrinsics instead of asm for ++ and --
  Use checked arithmetic intrinsics instead of asm, when possible
This commit is contained in:
Xinchen Hui 2016-08-11 11:34:56 +08:00
commit afd11643c3
2 changed files with 88 additions and 5 deletions

View File

@ -19,10 +19,32 @@
/* $Id$ */
#include "zend_portability.h"
#ifndef ZEND_MULTIPLY_H
#define ZEND_MULTIPLY_H
#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
#if __has_builtin(__builtin_smull_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
long __tmpvar; \
if (((usedval) = __builtin_smull_overflow((a), (b), &__tmpvar))) { \
(dval) = (double) (a) * (double) (b); \
} \
else (lval) = __tmpvar; \
} while (0)
#elif __has_builtin(__builtin_smulll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
long long __tmpvar; \
if (((usedval) = __builtin_smulll_overflow((a), (b), &__tmpvar))) { \
(dval) = (double) (a) * (double) (b); \
} \
else (lval) = __tmpvar; \
} while (0)
#elif (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
zend_long __tmpvar; \

View File

@ -35,6 +35,7 @@
#include <ieeefp.h>
#endif
#include "zend_portability.h"
#include "zend_strtod.h"
#include "zend_multiply.h"
@ -444,7 +445,23 @@ ZEND_API void zend_update_current_locale(void);
static zend_always_inline void fast_long_increment_function(zval *op1)
{
#if defined(__GNUC__) && defined(__i386__)
#if __has_builtin(__builtin_saddl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
long lresult;
if (UNEXPECTED(__builtin_saddl_overflow(Z_LVAL_P(op1), 1, &lresult))) {
/* switch to double */
ZVAL_DOUBLE(op1, (double)ZEND_LONG_MAX + 1.0);
} else {
Z_LVAL_P(op1) = lresult;
}
#elif __has_builtin(__builtin_saddll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
long long llresult;
if (UNEXPECTED(__builtin_saddll_overflow(Z_LVAL_P(op1), 1, &llresult))) {
/* switch to double */
ZVAL_DOUBLE(op1, (double)ZEND_LONG_MAX + 1.0);
} else {
Z_LVAL_P(op1) = llresult;
}
#elif defined(__GNUC__) && defined(__i386__)
__asm__(
"incl (%0)\n\t"
"jno 0f\n\t"
@ -482,7 +499,23 @@ static zend_always_inline void fast_long_increment_function(zval *op1)
static zend_always_inline void fast_long_decrement_function(zval *op1)
{
#if defined(__GNUC__) && defined(__i386__)
#if __has_builtin(__builtin_ssubl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
long lresult;
if (UNEXPECTED(__builtin_ssubl_overflow(Z_LVAL_P(op1), 1, &lresult))) {
/* switch to double */
ZVAL_DOUBLE(op1, (double)ZEND_LONG_MIN - 1.0);
} else {
Z_LVAL_P(op1) = lresult;
}
#elif __has_builtin(__builtin_ssubll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
long long llresult;
if (UNEXPECTED(__builtin_ssubll_overflow(Z_LVAL_P(op1), 1, &llresult))) {
/* switch to double */
ZVAL_DOUBLE(op1, (double)ZEND_LONG_MIN - 1.0);
} else {
Z_LVAL_P(op1) = llresult;
}
#elif defined(__GNUC__) && defined(__i386__)
__asm__(
"decl (%0)\n\t"
"jno 0f\n\t"
@ -520,7 +553,21 @@ static zend_always_inline void fast_long_decrement_function(zval *op1)
static zend_always_inline void fast_long_add_function(zval *result, zval *op1, zval *op2)
{
#if defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
#if __has_builtin(__builtin_saddl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
long lresult;
if (UNEXPECTED(__builtin_saddl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &lresult))) {
ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) + (double) Z_LVAL_P(op2));
} else {
ZVAL_LONG(result, lresult);
}
#elif __has_builtin(__builtin_saddll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
long long llresult;
if (UNEXPECTED(__builtin_saddl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &llresult))) {
ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) + (double) Z_LVAL_P(op2));
} else {
ZVAL_LONG(result, llresult);
}
#elif defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
__asm__(
"movl (%1), %%eax\n\t"
"addl (%2), %%eax\n\t"
@ -606,7 +653,21 @@ static zend_always_inline int fast_add_function(zval *result, zval *op1, zval *o
static zend_always_inline void fast_long_sub_function(zval *result, zval *op1, zval *op2)
{
#if defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
#if __has_builtin(__builtin_ssubl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
long lresult;
if (UNEXPECTED(__builtin_ssubl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &lresult))) {
ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) - (double) Z_LVAL_P(op2));
} else {
ZVAL_LONG(result, lresult);
}
#elif __has_builtin(__builtin_ssubll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
long long llresult;
if (UNEXPECTED(__builtin_ssubl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &llresult))) {
ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) - (double) Z_LVAL_P(op2));
} else {
ZVAL_LONG(result, llresult);
}
#elif defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
__asm__(
"movl (%1), %%eax\n\t"
"subl (%2), %%eax\n\t"