mirror of
https://github.com/MirrorNetworking/Mirror.git
synced 2024-11-18 02:50:32 +00:00
NetworkConnection.SendToTransport abstract; NetworkConnectionToClient/Server.Send(ArraySegment) common code moved into NetworkConnection
This commit is contained in:
parent
970c3e8702
commit
6e9206deab
@ -135,7 +135,35 @@ public void Send<T>(T message, int channelId = Channels.Reliable)
|
||||
// Send stage two: serialized NetworkMessage as ArraySegment<byte>
|
||||
// internal because no one except Mirror should send bytes directly to
|
||||
// the client. they would be detected as a message. send messages instead.
|
||||
internal abstract void Send(ArraySegment<byte> segment, int channelId = Channels.Reliable);
|
||||
internal virtual void Send(ArraySegment<byte> segment, int channelId = Channels.Reliable)
|
||||
{
|
||||
//Debug.Log("ConnectionSend " + this + " bytes:" + BitConverter.ToString(segment.Array, segment.Offset, segment.Count));
|
||||
|
||||
// validate packet size first.
|
||||
if (ValidatePacketSize(segment, channelId))
|
||||
{
|
||||
// batching enabled?
|
||||
if (batching)
|
||||
{
|
||||
// add to batch no matter what.
|
||||
// batching will try to fit as many as possible into MTU.
|
||||
// but we still allow > MTU, e.g. kcp max packet size 144kb.
|
||||
// those are simply sent as single batches.
|
||||
//
|
||||
// IMPORTANT: do NOT send > batch sized messages directly:
|
||||
// - data race: large messages would be sent directly. small
|
||||
// messages would be sent in the batch at the end of frame
|
||||
// - timestamps: if batching assumes a timestamp, then large
|
||||
// messages need that too.
|
||||
GetBatchForChannelId(channelId).AddMessage(segment);
|
||||
}
|
||||
// otherwise send directly to minimize latency
|
||||
else SendToTransport(segment, channelId);
|
||||
}
|
||||
}
|
||||
|
||||
// Send stage three: hand off to transport
|
||||
protected abstract void SendToTransport(ArraySegment<byte> segment, int channelId = Channels.Reliable);
|
||||
|
||||
/// <summary>Disconnects this connection.</summary>
|
||||
// for future reference, here is how Disconnects work in Mirror.
|
||||
|
@ -14,33 +14,9 @@ public class NetworkConnectionToClient : NetworkConnection
|
||||
public NetworkConnectionToClient(int networkConnectionId, bool batching)
|
||||
: base(networkConnectionId, batching) {}
|
||||
|
||||
// Send stage two: serialized NetworkMessage as ArraySegment<byte>
|
||||
internal override void Send(ArraySegment<byte> segment, int channelId = Channels.Reliable)
|
||||
{
|
||||
//Debug.Log("ConnectionSend " + this + " bytes:" + BitConverter.ToString(segment.Array, segment.Offset, segment.Count));
|
||||
|
||||
// validate packet size first.
|
||||
if (ValidatePacketSize(segment, channelId))
|
||||
{
|
||||
// batching enabled?
|
||||
if (batching)
|
||||
{
|
||||
// add to batch no matter what.
|
||||
// batching will try to fit as many as possible into MTU.
|
||||
// but we still allow > MTU, e.g. kcp max packet size 144kb.
|
||||
// those are simply sent as single batches.
|
||||
//
|
||||
// IMPORTANT: do NOT send > batch sized messages directly:
|
||||
// - data race: large messages would be sent directly. small
|
||||
// messages would be sent in the batch at the end of frame
|
||||
// - timestamps: if batching assumes a timestamp, then large
|
||||
// messages need that too.
|
||||
GetBatchForChannelId(channelId).AddMessage(segment);
|
||||
}
|
||||
// otherwise send directly to minimize latency
|
||||
else Transport.activeTransport.ServerSend(connectionId, segment, channelId);
|
||||
}
|
||||
}
|
||||
// Send stage three: hand off to transport
|
||||
protected override void SendToTransport(ArraySegment<byte> segment, int channelId = Channels.Reliable) =>
|
||||
Transport.activeTransport.ServerSend(connectionId, segment, channelId);
|
||||
|
||||
// flush batched messages at the end of every Update.
|
||||
internal void Update()
|
||||
|
@ -9,33 +9,9 @@ public class NetworkConnectionToServer : NetworkConnection
|
||||
|
||||
public NetworkConnectionToServer(bool batching) : base(batching) {}
|
||||
|
||||
// Send stage two: serialized NetworkMessage as ArraySegment<byte>
|
||||
internal override void Send(ArraySegment<byte> segment, int channelId = Channels.Reliable)
|
||||
{
|
||||
// Debug.Log("ConnectionSend " + this + " bytes:" + BitConverter.ToString(segment.Array, segment.Offset, segment.Count));
|
||||
|
||||
// validate packet size first.
|
||||
if (ValidatePacketSize(segment, channelId))
|
||||
{
|
||||
// batching enabled?
|
||||
if (batching)
|
||||
{
|
||||
// add to batch no matter what.
|
||||
// batching will try to fit as many as possible into MTU.
|
||||
// but we still allow > MTU, e.g. kcp max packet size 144kb.
|
||||
// those are simply sent as single batches.
|
||||
//
|
||||
// IMPORTANT: do NOT send > batch sized messages directly:
|
||||
// - data race: large messages would be sent directly. small
|
||||
// messages would be sent in the batch at the end of frame
|
||||
// - timestamps: if batching assumes a timestamp, then large
|
||||
// messages need that too.
|
||||
GetBatchForChannelId(channelId).AddMessage(segment);
|
||||
}
|
||||
// otherwise send directly to minimize latency
|
||||
else Transport.activeTransport.ClientSend(segment, channelId);
|
||||
}
|
||||
}
|
||||
// Send stage three: hand off to transport
|
||||
protected override void SendToTransport(ArraySegment<byte> segment, int channelId = Channels.Reliable) =>
|
||||
Transport.activeTransport.ClientSend(segment, channelId);
|
||||
|
||||
// flush batched messages at the end of every Update.
|
||||
internal virtual void Update()
|
||||
|
@ -115,6 +115,7 @@ class FakeConnection : NetworkConnection
|
||||
public FakeConnection() : base(false) {}
|
||||
public override void Disconnect() => throw new NotImplementedException();
|
||||
internal override void Send(ArraySegment<byte> segment, int channelId = 0) => throw new NotImplementedException();
|
||||
protected override void SendToTransport(ArraySegment<byte> segment, int channelId = Channels.Reliable) => throw new NotImplementedException();
|
||||
}
|
||||
|
||||
[Test]
|
||||
|
Loading…
Reference in New Issue
Block a user