fix(Batching): add timestamp safety check to ensure all messages in the batch have the same timestamp (#3902)

Co-authored-by: mischa <info@noobtuts.com>
This commit is contained in:
mischa 2024-09-26 11:25:46 +02:00 committed by GitHub
parent 2b0603f741
commit 38e2842abc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 79 additions and 1 deletions

View File

@ -49,8 +49,16 @@ public static int MaxMessageOverhead(int messageSize) =>
// => best to build batches on the fly.
readonly Queue<NetworkWriterPooled> batches = new Queue<NetworkWriterPooled>();
// current batch in progress
// current batch in progress.
// we also store the timestamp to ensure we don't add a message from another frame,
// as this would introduce subtle jitter!
//
// for example:
// - a batch is started at t=1, another message is added at t=2 and then it's flushed
// - NetworkTransform uses remoteTimestamp which is t=1
// - snapshot interpolation would off by one (or multiple) frames!
NetworkWriterPooled batch;
double batchTimestamp;
public Batcher(int threshold)
{
@ -62,6 +70,32 @@ public Batcher(int threshold)
// caller needs to make sure they are within max packet size.
public void AddMessage(ArraySegment<byte> message, double timeStamp)
{
// safety: message timestamp is only written once.
// make sure all messages in this batch are from the same timestamp.
// otherwise it could silently introduce jitter.
//
// this happened before:
// - NetworkEarlyUpdate @ t=1 processes transport messages
// - a handler replies by sending a message
// - a new batch is started @ t=1, timestamp is encoded
// - NetworkLateUpdate @ t=2 decides it's time to broadcast
// - NetworkTransform sends @ t=2
// - we add to the above batch which already encoded t=1
// - Client receives the batch which timestamp t=1
// - NetworkTransform uses remoteTime for interpolation
// remoteTime is the batch timestamp which is t=1
// - the NetworkTransform message is actually t=2
// => smooth interpolation would be impossible!
// NT thinks the position was @ t=1 but actually it was @ t=2 !
//
// the solution: if timestamp changed, enqueue the existing batch
if (batch != null && batchTimestamp != timeStamp)
{
batches.Enqueue(batch);
batch = null;
batchTimestamp = 0;
}
// predict the needed size, which is varint(size) + content
int headerSize = Compression.VarUIntSize((ulong)message.Count);
int neededSize = headerSize + message.Count;
@ -76,6 +110,7 @@ public void AddMessage(ArraySegment<byte> message, double timeStamp)
{
batches.Enqueue(batch);
batch = null;
batchTimestamp = 0;
}
// initialize a new batch if necessary
@ -89,6 +124,9 @@ public void AddMessage(ArraySegment<byte> message, double timeStamp)
// -> batches are per-frame, it doesn't matter which message's
// timestamp we use.
batch.WriteDouble(timeStamp);
// remember the encoded timestamp, see safety check below.
batchTimestamp = timeStamp;
}
// add serialization to current batch. even if > threshold.
@ -155,6 +193,7 @@ public void Clear()
{
NetworkWriterPool.Return(batch);
batch = null;
batchTimestamp = 0;
}
// return all queued batches

View File

@ -56,6 +56,45 @@ public void AddMessage()
batcher.AddMessage(new ArraySegment<byte>(message), TimeStamp);
}
// test to prevent the following issue from ever happening again:
//
// - NetworkEarlyUpdate @ t=1 processes transport messages
// - a handler replies by sending a message
// - a new batch is started @ t=1, timestamp is encoded
// - NetworkLateUpdate @ t=2 decides it's time to broadcast
// - NetworkTransform sends @ t=2
// - we add to the above batch which already encoded t=1
// - Client receives the batch which timestamp t=1
// - NetworkTransform uses remoteTime for interpolation
// remoteTime is the batch timestamp which is t=1
// - the NetworkTransform message is actually t=2
// => smooth interpolation would be impossible!
// NT thinks the position was @ t=1 but actually it was @ t=2 !
[Test]
public void AddMessage_TimestampMismatch()
{
// add message @ t=1
byte[] message1 = {0x01, 0x01};
batcher.AddMessage(new ArraySegment<byte>(message1), 1);
// add message @ t=2
byte[] message2 = {0x02, 0x02};
batcher.AddMessage(new ArraySegment<byte>(message2), 2);
// call getbatch: this should only contain the message @ t=1 !
// <<tickTimeStamp:8, message>>
bool result = batcher.GetBatch(writer);
Assert.That(result, Is.EqualTo(true));
Assert.That(writer.ToArray().SequenceEqual(MakeBatch(1, message1)));
// call getbatch: this should now contain the message @ t=2 !
// <<tickTimeStamp:8, message>>
writer.Position = 0;
result = batcher.GetBatch(writer);
Assert.That(result, Is.EqualTo(true));
Assert.That(writer.ToArray().SequenceEqual(MakeBatch(2, message2)));
}
[Test]
public void MakeNextBatch_OnlyAcceptsFreshWriter()
{