When I run the following code in VC ++ 2013 (32-bit, without optimization):
#include <cmath> #include <iostream> #include <limits> double mulpow10(double const value, int const pow10) { static double const table[] = { 1E+000, 1E+001, 1E+002, 1E+003, 1E+004, 1E+005, 1E+006, 1E+007, 1E+008, 1E+009, 1E+010, 1E+011, 1E+012, 1E+013, 1E+014, 1E+015, 1E+016, 1E+017, 1E+018, 1E+019, }; return pow10 < 0 ? value / table[-pow10] : value * table[+pow10]; } int main(void) { double d = 9710908999.008999; int j_max = std::numeric_limits<double>::max_digits10; while (j_max > 0 && ( static_cast<double>( static_cast<unsigned long long>( mulpow10(d, j_max))) != mulpow10(d, j_max))) { --j_max; } double x = std::floor(d * 1.0E9); unsigned long long y1 = x; unsigned long long y2 = std::floor(d * 1.0E9); std::cout << "x == " << x << std::endl << "y1 == " << y1 << std::endl << "y2 == " << y2 << std::endl; }
I get
x == 9.7109089990089994e+018 y1 == 9710908999008999424 y2 == 9223372036854775808
in the debugger.
I am angry. Can someone explain to me how heck y1 and y2 have different meanings?
Update:
This only happens in /Arch:SSE2 or /Arch:AVX , not /Arch:IA32 or /Arch:SSE .
c ++ double floating-point visual-c ++ unsigned-long-long-int
Mehrdad
source share