Ok, I have rewritten Score as this:Bo Persson wrote: Trust your compiler!
Code: Select all
/// Score struct keeps a midgame and an endgame value in a single
/// integer, first LSB 16 bits are used to store endgame
/// value, while upper bits are used for midgame value.
struct Score {
Score() {}
Score(int val) : v(val) {}
int v;
};
inline Value eg_value(Score s) { return Value(int16_t(s.v & 0xffff)); }
inline Value mg_value(Score s) { return Value((s.v + 32768) >> 16); }
inline Score make_score(int mg, int eg) { return Score((mg << 16) + eg); }
inline bool operator!=(Score s1, Score s2) { return s1.v != s2.v; }
inline Score operator-(Score s) { return Score(-s.v); }
inline Score operator+(Score s1, Score s2) { return Score(s1.v + s2.v); }
inline Score operator-(Score s1, Score s2) { return Score(s1.v - s2.v); }
inline void operator+=(Score& s1, Score s2) { s1.v += s2.v; }
inline void operator-=(Score& s1, Score s2) { s1.v -= s2.v; }
inline Score operator*(int i, Score s) { return Score(i * s.v); }
// Division must be handled separately for each term
inline Score operator/(Score s, int i) { return make_score(mg_value(s) / i, eg_value(s) / i); }
// Only declared but not defined. We don't want to multiply two scores due to
// a very high risk of overflow. So user should explicitly convert to integer.
inline Score operator*(Score s1, Score s2);
Code: Select all
enum Score { a = INT_MIN, b = INT_MAX };

I didn't verified the code is optimized.