perf: NetworkReader/Writer: read/write collection size headers as VarInt for significant bandwidth reduction! (#3868)

Co-authored-by: mischa <info@noobtuts.com>
This commit is contained in:
mischa 2024-08-09 09:58:22 +02:00 committed by GitHub
parent 96ba861e2c
commit 2fe1b8fd38
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 65 additions and 30 deletions

View File

@ -100,7 +100,10 @@ public static byte[] ReadBytesAndSize(this NetworkReader reader)
// we offset count by '1' to easily support null without writing another byte.
// encoding null as '0' instead of '-1' also allows for better compression
// (ushort vs. short / varuint vs. varint) etc.
uint count = reader.ReadUInt();
// most sizes are small, read size as VarUInt!
uint count = (uint)Compression.DecompressVarUInt(reader);
// uint count = reader.ReadUInt();
// Use checked() to force it to throw OverflowException if data is invalid
return count == 0 ? null : reader.ReadBytes(checked((int)(count - 1u)));
}
@ -111,7 +114,10 @@ public static ArraySegment<byte> ReadArraySegmentAndSize(this NetworkReader read
// we offset count by '1' to easily support null without writing another byte.
// encoding null as '0' instead of '-1' also allows for better compression
// (ushort vs. short / varuint vs. varint) etc.
uint count = reader.ReadUInt();
// most sizes are small, read size as VarUInt!
uint count = (uint)Compression.DecompressVarUInt(reader);
// uint count = reader.ReadUInt();
// Use checked() to force it to throw OverflowException if data is invalid
return count == 0 ? default : reader.ReadBytesSegment(checked((int)(count - 1u)));
}
@ -269,7 +275,10 @@ public static List<T> ReadList<T>(this NetworkReader reader)
// we offset count by '1' to easily support null without writing another byte.
// encoding null as '0' instead of '-1' also allows for better compression
// (ushort vs. short / varuint vs. varint) etc.
uint length = reader.ReadUInt();
// most sizes are small, read size as VarUInt!
uint length = (uint)Compression.DecompressVarUInt(reader);
// uint length = reader.ReadUInt();
if (length == 0) return null;
length -= 1;
@ -301,7 +310,10 @@ public static HashSet<T> ReadHashSet<T>(this NetworkReader reader)
// we offset count by '1' to easily support null without writing another byte.
// encoding null as '0' instead of '-1' also allows for better compression
// (ushort vs. short / varuint vs. varint) etc.
uint length = reader.ReadUInt();
// most sizes are small, read size as VarUInt!
uint length = (uint)Compression.DecompressVarUInt(reader);
//uint length = reader.ReadUInt();
if (length == 0) return null;
length -= 1;
@ -319,7 +331,10 @@ public static T[] ReadArray<T>(this NetworkReader reader)
// we offset count by '1' to easily support null without writing another byte.
// encoding null as '0' instead of '-1' also allows for better compression
// (ushort vs. short / varuint vs. varint) etc.
uint length = reader.ReadUInt();
// most sizes are small, read size as VarUInt!
uint length = (uint)Compression.DecompressVarUInt(reader);
//uint length = reader.ReadUInt();
if (length == 0) return null;
length -= 1;

View File

@ -99,10 +99,14 @@ public static void WriteBytesAndSize(this NetworkWriter writer, byte[] buffer, i
// (ushort vs. short / varuint vs. varint) etc.
if (buffer == null)
{
writer.WriteUInt(0u);
// most sizes are small, write size as VarUInt!
Compression.CompressVarUInt(writer, 0u);
// writer.WriteUInt(0u);
return;
}
writer.WriteUInt(checked((uint)count) + 1u);
// most sizes are small, write size as VarUInt!
Compression.CompressVarUInt(writer, checked((uint)count) + 1u);
// writer.WriteUInt(checked((uint)count) + 1u);
writer.WriteBytes(buffer, offset, count);
}
@ -124,7 +128,9 @@ public static void WriteArraySegment<T>(this NetworkWriter writer, ArraySegment<
// - ReadArray
// in which case ReadArray needs null support. both need to be compatible.
int count = segment.Count;
writer.WriteUInt(checked((uint)count) + 1u);
// most sizes are small, write size as VarUInt!
Compression.CompressVarUInt(writer, checked((uint)count) + 1u);
// writer.WriteUInt(checked((uint)count) + 1u);
for (int i = 0; i < count; i++)
{
writer.Write(segment.Array[segment.Offset + i]);
@ -328,7 +334,9 @@ public static void WriteList<T>(this NetworkWriter writer, List<T> list)
// (ushort vs. short / varuint vs. varint) etc.
if (list is null)
{
writer.WriteUInt(0);
// most sizes are small, write size as VarUInt!
Compression.CompressVarUInt(writer, 0u);
// writer.WriteUInt(0);
return;
}
@ -336,7 +344,9 @@ public static void WriteList<T>(this NetworkWriter writer, List<T> list)
if (list.Count > NetworkReader.AllocationLimit)
throw new IndexOutOfRangeException($"NetworkWriter.WriteList - List<{typeof(T)}> too big: {list.Count} elements. Limit: {NetworkReader.AllocationLimit}");
writer.WriteUInt(checked((uint)list.Count) + 1u);
// most sizes are small, write size as VarUInt!
Compression.CompressVarUInt(writer, checked((uint)list.Count) + 1u);
// writer.WriteUInt(checked((uint)list.Count) + 1u);
for (int i = 0; i < list.Count; i++)
writer.Write(list[i]);
}
@ -346,22 +356,27 @@ public static void WriteList<T>(this NetworkWriter writer, List<T> list)
// fully serialize for NetworkMessages etc.
// note that Weaver/Writers/GenerateWriter() handles this manually.
// TODO writer not found. need to adjust weaver first. see tests.
/*
public static void WriteHashSet<T>(this NetworkWriter writer, HashSet<T> hashSet)
{
// we offset count by '1' to easily support null without writing another byte.
// encoding null as '0' instead of '-1' also allows for better compression
// (ushort vs. short / varuint vs. varint) etc.
if (hashSet is null)
{
writer.WriteUInt(0);
return;
}
writer.WriteUInt(checked((uint)hashSet.Count) + 1u);
foreach (T item in hashSet)
writer.Write(item);
}
*/
// /*
// public static void WriteHashSet<T>(this NetworkWriter writer, HashSet<T> hashSet)
// {
// // we offset count by '1' to easily support null without writing another byte.
// // encoding null as '0' instead of '-1' also allows for better compression
// // (ushort vs. short / varuint vs. varint) etc.
// if (hashSet is null)
// {
// // most sizes are small, write size as VarUInt!
// Compression.CompressVarUInt(writer, 0u);
// //writer.WriteUInt(0);
// return;
// }
//
// // most sizes are small, write size as VarUInt!
// Compression.CompressVarUInt(writer, checked((uint)hashSet.Count) + 1u);
// //writer.WriteUInt(checked((uint)hashSet.Count) + 1u);
// foreach (T item in hashSet)
// writer.Write(item);
// }
// */
public static void WriteArray<T>(this NetworkWriter writer, T[] array)
{
@ -370,7 +385,9 @@ public static void WriteArray<T>(this NetworkWriter writer, T[] array)
// (ushort vs. short / varuint vs. varint) etc.
if (array is null)
{
writer.WriteUInt(0);
// most sizes are small, write size as VarUInt!
Compression.CompressVarUInt(writer, 0u);
// writer.WriteUInt(0);
return;
}
@ -378,7 +395,9 @@ public static void WriteArray<T>(this NetworkWriter writer, T[] array)
if (array.Length > NetworkReader.AllocationLimit)
throw new IndexOutOfRangeException($"NetworkWriter.WriteArray - Array<{typeof(T)}> too big: {array.Length} elements. Limit: {NetworkReader.AllocationLimit}");
writer.WriteUInt(checked((uint)array.Length) + 1u);
// most sizes are small, write size as VarUInt!
Compression.CompressVarUInt(writer, checked((uint)array.Length) + 1u);
// writer.WriteUInt(checked((uint)array.Length) + 1u);
for (int i = 0; i < array.Length; i++)
writer.Write(array[i]);
}

View File

@ -17,12 +17,13 @@ struct VariableSizedMessage : NetworkMessage
{
// weaver serializes byte[] wit WriteBytesAndSize
public byte[] payload;
// so payload := size - 4
// so payload := size - header
// where header is VarUInt compression(size)
// then the message is exactly maxed size.
//
// NOTE: we have a LargerMaxMessageSize test which guarantees that
// variablesized + 1 is exactly transport.max + 1
public VariableSizedMessage(int size) => payload = new byte[size - 4];
public VariableSizedMessage(int size) => payload = new byte[size - Compression.VarUIntSize((uint)size)];
}
public class CommandTestNetworkBehaviour : NetworkBehaviour