In today's episode of deriving point values for chess pieces from endgame tablebases, we're gonna try to see how robust our numbers from before were. So, we'll try computing a few variants.
Variant 1: not starting with obviously promoted pieces (i.e. no triple knights, bishops or rooks or double queens on the same side.)
| # pieces | P | N | B | R | Q |
|---|---|---|---|---|---|
| 1 | 2.29 | 0.00 | 0.00 | 5.00 | 4.83 |
| 2 | 3.02 | 1.47 | 2.41 | 5.00 | 9.39 |
| 3 | 2.22 | 1.93 | 2.48 | 5.00 | 9.20 |
| 4 | 1.96 | 2.20 | 2.67 | 5.00 | 9.22 |
| 5 | 1.76 | 2.30 | 2.79 | 5.00 | 9.26 |
Not too much interesting to see here, seems to generally match the previous numbers, with the only notable thing being that the queen trends up instead of down starting at 3 pieces.
Variant 2: a different loss function (sum of squares on un-sigmoided values)
| # pieces | P | N | B | R | Q |
|---|---|---|---|---|---|
| 1 | 2.29 | 0.00 | 0.00 | 5.00 | 4.83 |
| 2 | 2.95 | 1.36 | 1.74 | 5.00 | 5.54 |
| 3 | 3.30 | 2.25 | 2.52 | 5.00 | 6.85 |
| 4 | 2.62 | 2.34 | 2.36 | 5.00 | 6.92 |
| 5 | 2.26 | 2.30 | 2.47 | 5.00 | 7.18 |
Oh boy, is this interesting? Seems like with this loss function the queen is worth waaaay less. But still trending upward. Pawns are also worth way more. Not sure what to think about that.
Variant 3: another different loss function (logarithm proper scoring rule)
| # pieces | P | N | B | R | Q |
|---|---|---|---|---|---|
| 1 | 2.29 | 0.00 | 0.00 | 5.00 | 4.83 |
| 2 | 3.23 | 1.50 | 2.30 | 5.00 | 8.72 |
| 3 | 2.31 | 2.00 | 2.39 | 5.00 | 8.59 |
| 4 | 2.02 | 2.22 | 2.57 | 5.00 | 8.65 |
| 5 | 1.82 | 2.35 | 2.72 | 5.00 | 8.67 |
Very interesting, this one is way closer to the same as the original values I got, but with the queen just slightly lower in value.
Code below. Download stats.json from the lichess tablebase to run the code.
import fs from "node:fs";
const stats = JSON.parse(fs.readFileSync("stats.json", "utf-8"));
type Data = {
material: string;
gamePoints: number;
}[];
const getData = (options?: {
/**
* How much a cursed win counts as (cursed win = would be a win if not for the 50-move rule)
* Any value between 0 and 1 makes sense.
*/
curseFactor?: number;
nonKingPieceCount?: number;
tooManyPieces?: string[];
}): Data => {
const {
curseFactor = 0.25,
nonKingPieceCount = 5,
tooManyPieces = [],
} = options ?? {};
return Object.entries(stats)
.map(([key, e]: [string, any]) => {
const wins = e.histogram.white.wdl[2] + e.histogram.black.wdl[-2];
const cursedWins =
e.histogram.white.wdl[1] + e.histogram.black.wdl[-1];
const draws = e.histogram.white.wdl[0] + e.histogram.black.wdl[0];
const blessedLosses =
e.histogram.white.wdl[-1] + e.histogram.black.wdl[1];
const losses = e.histogram.white.wdl[-2] + e.histogram.black.wdl[2];
const total = wins + cursedWins + draws + blessedLosses + losses;
const gamePoints =
(wins +
curseFactor * cursedWins -
curseFactor * blessedLosses -
losses) /
total;
return {
material: key,
gamePoints,
};
})
.filter(
({ material }) =>
material.length === "KvK".length + nonKingPieceCount &&
tooManyPieces.every((substr) => !material.includes(substr)),
);
};
type Piece = "P" | "N" | "B" | "R" | "Q";
type Points = Record<Piece, number>;
type LossFunc = (goal: number, prediction: number) => number;
const sigmoid = (x: number) => Math.tanh(x);
const sum = (arr: number[]) => arr.reduce((a, b) => a + b, 0);
const predictGame = (points: Points, material: [string, string]) => {
const [mat1, mat2] = material;
return sigmoid(
sum([...mat1].map((p) => (p in points ? points[p as Piece] : 0))) -
sum([...mat2].map((p) => (p in points ? points[p as Piece] : 0))),
);
};
const getBaselineLoss = (data: Data, lossFunc: LossFunc) => {
let loss = 0;
for (const { gamePoints } of data) {
const prediction = 0;
loss += lossFunc(gamePoints, prediction);
}
return loss;
};
const computeLoss = (data: Data, points: Points, lossFunc: LossFunc) => {
let loss = 0;
for (const { material, gamePoints } of data) {
const prediction = predictGame(
points,
material.split("v") as [string, string],
);
loss += lossFunc(gamePoints, prediction);
}
return loss / getBaselineLoss(data, lossFunc);
};
const optimizeInRange = (
data: Data,
constraints: Record<Piece, { min: number; max: number; step: number }>,
lossFunc: LossFunc,
): Points => {
let bestLoss = Infinity;
let best: Points | null = null;
let i = 0;
for (
let p = constraints.P.min;
p <= constraints.P.max;
p += constraints.P.step
) {
for (
let n = constraints.N.min;
n <= constraints.N.max;
n += constraints.N.step
) {
for (
let b = constraints.B.min;
b <= constraints.B.max;
b += constraints.B.step
) {
for (
let r = constraints.R.min;
r <= constraints.R.max;
r += constraints.R.step
) {
for (
let q = constraints.Q.min;
q <= constraints.Q.max;
q += constraints.Q.step
) {
i += 1;
const pointValues = {
P: p,
N: n,
B: b,
R: r,
Q: q,
};
const loss = computeLoss(data, pointValues, lossFunc);
if (loss < bestLoss) {
best = pointValues;
bestLoss = loss;
}
}
}
}
}
}
return best as Points;
};
const nudge = (
data: Data,
points: Points,
epsilon: number,
lossFunc: LossFunc,
) => {
return optimizeInRange(
data,
Object.fromEntries(
Object.entries(points).map(([key, value]) => {
return [
key,
{
min: value - epsilon,
max: value + epsilon,
step: epsilon,
},
];
}),
) as any,
lossFunc,
);
};
const optimizeForEpsilon = (
data: Data,
initialPoints: Points,
epsilon: number,
lossFunc: LossFunc,
) => {
let prevLoss = 0;
let points = { ...initialPoints };
for (let i = 0; i < 1000; i += 1) {
points = nudge(data, points, epsilon, lossFunc);
const loss = computeLoss(data, points, lossFunc);
if (loss === prevLoss) {
break;
}
prevLoss = loss;
if (i > 998) {
console.log("TOOK TOO LONG");
}
}
return points;
};
const getPointValues = (
data: Data,
lossFunc: LossFunc,
initialValues = {
P: 0,
N: 0,
B: 0,
R: 0,
Q: 0,
},
) => {
let pointValues = initialValues;
pointValues = optimizeForEpsilon(data, pointValues, 1, lossFunc);
pointValues = optimizeForEpsilon(data, pointValues, 0.1, lossFunc);
pointValues = optimizeForEpsilon(data, pointValues, 0.01, lossFunc);
pointValues = optimizeForEpsilon(data, pointValues, 0.001, lossFunc);
return pointValues;
};
/**
* Normalize a set of points so that the rook is 5 (and queen will usually be around 9).
*/
const normalize = (points: Points) => {
const factor = 5 / points.R;
return {
P: points.P * factor,
N: points.N * factor,
B: points.B * factor,
R: points.R * factor,
Q: points.Q * factor,
};
};
const formatTable = (i: number, points: Points) => {
return `| ${points.P.toFixed(2)} | ${points.N.toFixed(2)} | ${points.B.toFixed(2)} | ${points.R.toFixed(2)} | ${points.Q.toFixed(2)} |`;
};
// Compute point values for pieces based on number of pieces on the board.
console.log("Variant 1: not starting with obviously promoted pieces");
for (let i = 1; i <= 5; i += 1) {
const lossFunc: LossFunc = (a, b) => Math.pow(a - b, 2);
const data = getData({
nonKingPieceCount: i,
tooManyPieces: ["BBB", "NNN", "RRR", "QQ"],
});
const pointValues = getPointValues(data, lossFunc);
console.log(`${i} non-king pieces`);
console.log(formatTable(i, normalize(pointValues)));
console.log("loss", computeLoss(data, pointValues, lossFunc));
}
console.log("Variant 2: different loss function");
for (let i = 1; i <= 5; i += 1) {
const lossFunc: LossFunc = (a, b) =>
Math.pow(Math.atanh(a) - Math.atanh(b), 2);
const data = getData({
nonKingPieceCount: i,
});
const pointValues = getPointValues(data, lossFunc);
console.log(`${i} non-king pieces`);
console.log(formatTable(i, normalize(pointValues)));
console.log("loss", computeLoss(data, pointValues, lossFunc));
}
console.log("Variant 3: another different loss function");
for (let i = 1; i <= 5; i += 1) {
const lossFunc: LossFunc = (a, b) =>
((a + 1) / 2) * Math.log((b + 1) / 2) +
(1 - (a + 1) / 2) * Math.log(1 - (b + 1) / 2);
const data = getData({
nonKingPieceCount: i,
});
const pointValues = getPointValues(data, lossFunc);
console.log(`${i} non-king pieces`);
console.log(formatTable(i, normalize(pointValues)));
console.log("loss", computeLoss(data, pointValues, lossFunc));
}
Part 1: here.
In my previous iteration of this, I ended up giving values based on an average over all piece combinations in the endgame tablebase. I was curious what would happen if I subdivided it based on number of (non-king) pieces on the board. Here's the numbers I got today (rounded to 2 decimal places, after normalizing to R = 5):
| # pieces | P | N | B | R | Q |
|---|---|---|---|---|---|
| 1 | 2.29 | 0 | 0 | 5 | 4.83 |
| 2 | 3.02 | 1.47 | 2.41 | 5 | 9.39 |
| 3 | 2.22 | 1.94 | 2.44 | 5 | 9.18 |
| 4 | 1.95 | 2.21 | 2.61 | 5 | 9.10 |
| 5 | 1.76 | 2.34 | 2.76 | 5 | 9.05 |
I've learned that the row based on a single non-king piece being on the board is almost entirely useless. The queen is worse than the rook because it's more likely to stalemate. The bishop and knight are worth literally zero because they can't checkmate on their own. The rest is interesting though.
We'll notice a few trends, though: as piece count goes up, pawn and queen values go down, and knight and bishop values go up. Those trends point in the direction of traditional point values for pieces. Very interesting. I want to try fitting a logistic curve to these numbers for rows 2-5 to try guessing what row 6 values will be, and then eventually when the 8 piece tablebase comes out (since they count kings as pieces), compare with what actually comes out.
This really makes you think that as you get closer to the endgame, the bishop and knight both get weaker relative to other pieces and the pawn gets stronger.
Code below. Download stats.json from the lichess tablebase to run the code.
import fs from "node:fs";
const stats = JSON.parse(fs.readFileSync("stats.json", "utf-8"));
type Data = {
material: string;
gamePoints: number;
}[];
const getData = (options?: {
/**
* How much a cursed win counts as (cursed win = would be a win if not for the 50-move rule)
* Any value between 0 and 1 makes sense.
*/
curseFactor?: number;
nonKingPieceCount?: number;
}): Data => {
const { curseFactor = 0.25, nonKingPieceCount = 5 } = options ?? {};
return Object.entries(stats)
.map(([key, e]: [string, any]) => {
const wins = e.histogram.white.wdl[2] + e.histogram.black.wdl[-2];
const cursedWins =
e.histogram.white.wdl[1] + e.histogram.black.wdl[-1];
const draws = e.histogram.white.wdl[0] + e.histogram.black.wdl[0];
const blessedLosses =
e.histogram.white.wdl[-1] + e.histogram.black.wdl[1];
const losses = e.histogram.white.wdl[-2] + e.histogram.black.wdl[2];
const total = wins + cursedWins + draws + blessedLosses + losses;
const gamePoints =
(wins +
curseFactor * cursedWins -
curseFactor * blessedLosses -
losses) /
total;
return {
material: key,
gamePoints,
};
})
.filter(
({ material }) =>
material.length === "KvK".length + nonKingPieceCount,
);
};
type Piece = "P" | "N" | "B" | "R" | "Q";
type Points = Record<Piece, number>;
const sigmoid = (x: number) => Math.tanh(x);
const sum = (arr: number[]) => arr.reduce((a, b) => a + b, 0);
const predictGame = (points: Points, material: [string, string]) => {
const [mat1, mat2] = material;
return sigmoid(
sum([...mat1].map((p) => (p in points ? points[p as Piece] : 0))) -
sum([...mat2].map((p) => (p in points ? points[p as Piece] : 0))),
);
};
const getBaselineLoss = (data: Data) => {
let loss = 0;
for (const { gamePoints } of data) {
const prediction = 0;
loss += Math.pow(gamePoints - prediction, 2);
}
return loss;
};
const computeLoss = (data: Data, points: Points) => {
let loss = 0;
for (const { material, gamePoints } of data) {
const prediction = predictGame(
points,
material.split("v") as [string, string],
);
loss += Math.pow(gamePoints - prediction, 2);
}
return loss / getBaselineLoss(data);
};
const optimizeInRange = (
data: Data,
constraints: Record<Piece, { min: number; max: number; step: number }>,
): Points => {
let bestLoss = Infinity;
let best: Points | null = null;
let i = 0;
for (
let p = constraints.P.min;
p <= constraints.P.max;
p += constraints.P.step
) {
for (
let n = constraints.N.min;
n <= constraints.N.max;
n += constraints.N.step
) {
for (
let b = constraints.B.min;
b <= constraints.B.max;
b += constraints.B.step
) {
for (
let r = constraints.R.min;
r <= constraints.R.max;
r += constraints.R.step
) {
for (
let q = constraints.Q.min;
q <= constraints.Q.max;
q += constraints.Q.step
) {
i += 1;
const pointValues = {
P: p,
N: n,
B: b,
R: r,
Q: q,
};
const loss = computeLoss(data, pointValues);
if (loss < bestLoss) {
best = pointValues;
bestLoss = loss;
}
}
}
}
}
}
return best as Points;
};
const nudge = (data: Data, points: Points, epsilon: number) => {
return optimizeInRange(
data,
Object.fromEntries(
Object.entries(points).map(([key, value]) => {
return [
key,
{
min: value - epsilon,
max: value + epsilon,
step: epsilon,
},
];
}),
) as any,
);
};
const optimizeForEpsilon = (
data: Data,
initialPoints: Points,
epsilon: number,
) => {
let prevLoss = 0;
let points = { ...initialPoints };
for (let i = 0; i < 1000; i += 1) {
points = nudge(data, points, epsilon);
const loss = computeLoss(data, points);
if (loss === prevLoss) {
break;
}
prevLoss = loss;
if (i > 998) {
console.log("TOOK TOO LONG");
}
}
return points;
};
const getPointValues = (
data: Data,
initialValues = {
P: 0,
N: 0,
B: 0,
R: 0,
Q: 0,
},
) => {
let pointValues = initialValues;
pointValues = optimizeForEpsilon(data, pointValues, 1);
pointValues = optimizeForEpsilon(data, pointValues, 0.1);
pointValues = optimizeForEpsilon(data, pointValues, 0.01);
pointValues = optimizeForEpsilon(data, pointValues, 0.001);
return pointValues;
};
/**
* Normalize a set of points so that the rook is 5 (and queen will usually be around 9).
*/
const normalize = (points: Points) => {
const factor = 5 / points.R;
return {
P: points.P * factor,
N: points.N * factor,
B: points.B * factor,
R: points.R * factor,
Q: points.Q * factor,
};
};
// Compute point values for pieces based on number of pieces on the board.
for (let i = 1; i <= 5; i += 1) {
const data = getData({ nonKingPieceCount: i });
const pointValues = getPointValues(data);
console.log(`${i} non-king pieces`);
console.log(normalize(pointValues));
console.log("loss", computeLoss(data, pointValues));
}
I was curious to see if I could determine point values for chess pieces based on endgame tablebases. I use the model that the difference of the sum of the players' pieces should determine the expected value of the game to a given player via a sigmoid function. The end result is the following:
The pawns are overrated here, because we use endgame tablebases and in endgames, pawns are much more likely to promote, and therefore more valuable. Queen to rook ratio is pretty close to 9:5, and bishops and knights are slightly weaker relatively than the normal point values associated with them. Here are the point values rescaled to make the rook be worth 5:
Code below. Download stats.json from the lichess tablebase to run the code.
import fs from "node:fs";
const stats = JSON.parse(fs.readFileSync("stats.json", "utf-8"));
// How much a cursed win counts as (cursed win = would be a win if not for the 50-move rule)
const CURSE_FACTOR = 0.5;
const data = Object.entries(stats).map(([key, e]: [string, any]) => {
const wins = e.histogram.white.wdl[2] + e.histogram.black.wdl[-2];
const cursedWins = e.histogram.white.wdl[1] + e.histogram.black.wdl[-1];
const draws = e.histogram.white.wdl[0] + e.histogram.black.wdl[0];
const blessedLosses = e.histogram.white.wdl[-1] + e.histogram.black.wdl[1];
const losses = e.histogram.white.wdl[-2] + e.histogram.black.wdl[2];
const total = wins + cursedWins + draws + blessedLosses + losses;
const gamePoints =
(wins +
CURSE_FACTOR * cursedWins -
CURSE_FACTOR * blessedLosses -
losses) /
total;
return {
material: key,
gamePoints,
};
});
type Piece = "P" | "N" | "B" | "R" | "Q";
type Points = Record<Piece, number>;
const sigmoid = (x: number) => Math.tanh(x);
const sum = (arr: number[]) => arr.reduce((a, b) => a + b, 0);
const predictGame = (points: Points, material: [string, string]) => {
const [mat1, mat2] = material;
return sigmoid(
sum([...mat1].map((p) => (p in points ? points[p as Piece] : 0))) -
sum([...mat2].map((p) => (p in points ? points[p as Piece] : 0))),
);
};
const baselineLoss = (() => {
let loss = 0;
for (const { gamePoints } of data) {
const prediction = 0;
loss += Math.pow(gamePoints - prediction, 2);
}
return loss;
})();
console.log("BASELINE LOSS", baselineLoss);
const computeLoss = (points: Points) => {
let loss = 0;
for (const { material, gamePoints } of data) {
const prediction = predictGame(
points,
material.split("v") as [string, string],
);
loss += Math.pow(gamePoints - prediction, 2);
}
return loss;
};
const optimizeInRange = (
constraints: Record<Piece, { min: number; max: number; step: number }>,
): Points => {
let bestLoss = Infinity;
let best: Points | null = null;
let i = 0;
for (
let p = constraints.P.min;
p <= constraints.P.max;
p += constraints.P.step
) {
for (
let n = constraints.N.min;
n <= constraints.N.max;
n += constraints.N.step
) {
for (
let b = constraints.B.min;
b <= constraints.B.max;
b += constraints.B.step
) {
for (
let r = constraints.R.min;
r <= constraints.R.max;
r += constraints.R.step
) {
for (
let q = constraints.Q.min;
q <= constraints.Q.max;
q += constraints.Q.step
) {
i += 1;
const pointValues = {
P: p,
N: n,
B: b,
R: r,
Q: q,
};
const loss = computeLoss(pointValues);
if (loss < bestLoss) {
best = pointValues;
bestLoss = loss;
// console.log(i, bestLoss);
}
}
}
}
}
}
return best as Points;
};
const nudge = (points: Points, epsilon: number) => {
return optimizeInRange(
Object.fromEntries(
Object.entries(points).map(([key, value]) => {
return [
key,
{
min: value - epsilon,
max: value + epsilon,
step: epsilon,
},
];
}),
) as any,
);
};
const optimizeForEpsilon = (initialPoints: Points, epsilon: number) => {
let prevLoss = 0;
let points = { ...initialPoints };
for (let i = 0; i < 1000; i += 1) {
points = nudge(points, epsilon);
const loss = computeLoss(points);
if (loss === prevLoss) {
break;
}
prevLoss = loss;
console.log(i, loss);
if (i > 998) {
console.log("TOOK TOO LONG");
}
}
return points;
};
let pointValues = {
P: 0,
N: 0,
B: 0,
R: 0,
Q: 0,
};
pointValues = optimizeForEpsilon(pointValues, 1);
pointValues = optimizeForEpsilon(pointValues, 0.1);
pointValues = optimizeForEpsilon(pointValues, 0.01);
pointValues = optimizeForEpsilon(pointValues, 0.001);
console.log(pointValues);