Skip to content

Commit

Permalink
Merge branch 'rehlds:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
cris840 authored Dec 21, 2024
2 parents 03f7e06 + 20c16bf commit da9ff74
Show file tree
Hide file tree
Showing 17 changed files with 399 additions and 200 deletions.
1 change: 1 addition & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ on:
types: [opened, reopened, synchronize]
release:
types: [published]
workflow_dispatch:

jobs:
windows:
Expand Down
9 changes: 6 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,12 @@ This means that plugins that do binary code analysis (Orpheu for example) probab
<li>sv_rehlds_local_gametime &lt;1|0&gt; // A feature of local gametime which decrease "lags" if you run same map for a long time. Default: 0
<li>sv_use_entity_file // Use custom entity file for a map. Path to an entity file will be "maps/[map name].ent". 0 - use original entities. 1 - use .ent files from maps directory. 2 - use .ent files from maps directory and create new .ent file if not exist.
<li>sv_usercmd_custom_random_seed // When enabled server will populate an additional random seed independent of the client. Default: 0
<li>sv_net_incoming_decompression <1|0> // When enabled server will decompress of incoming compressed file transfer payloads. Default: 1
<li>sv_net_incoming_decompression_max_ratio <0|100> // Sets the max allowed ratio between compressed and uncompressed data for file transfer. (A ratio close to 90 indicates large uncompressed data with low entropy) Default: 80.0
<li>sv_net_incoming_decompression_max_size <16|65536> // Sets the max allowed size for decompressed file transfer data. Default: 65536 bytes
<li>sv_net_incoming_decompression &lt;1|0&gt; // When enabled server will decompress of incoming compressed file transfer payloads. Default: 1
<li>sv_net_incoming_decompression_max_ratio &lt;0|100&gt; // Sets the max allowed ratio between compressed and uncompressed data for file transfer. (A ratio close to 90 indicates large uncompressed data with low entropy) Default: 80.0
<li>sv_net_incoming_decompression_max_size &lt;16|65536&gt; // Sets the max allowed size for decompressed file transfer data. Default: 65536 bytes
<li>sv_net_incoming_decompression_min_failures &lt;0|10&gt; // Sets the min number of decompression failures required before a player's connection is flagged for potential punishment. Default: 4
<li>sv_net_incoming_decompression_max_failures &lt;0|10&gt; // Sets the max number of decompression failures allowed within a specified time window before action is taken against the player. Default: 10
<li>sv_net_incoming_decompression_min_failuretime: &lt;0.1|10.0&gt; // Sets the min time in secs within which decompression failures are tracked to determine if the player exceeds the failure thresholds. Default: 0.1
<li>sv_net_incoming_decompression_punish // Time in minutes for which the player will be banned for malformed/abnormal bzip2 fragments (0 - Permanent, use a negative number for a kick). Default: -1
<li>sv_tags &lt;comma-delimited string list of tags&gt; // Sets a string defining the "gametags" for this server, this is optional, but if it is set it allows users/scripts to filter in the matchmaking/server-browser interfaces based on the value. Default: ""
<li>sv_filterban &lt;-1|0|1&gt;// Set packet filtering by IP mode. -1 - All players will be rejected without any exceptions. 0 - No checks will happen. 1 - All incoming players will be checked if they're IP banned (if they have an IP filter entry), if they are, they will be kicked. Default: 1
Expand Down
15 changes: 14 additions & 1 deletion rehlds/engine/model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -854,6 +854,7 @@ void CalcSurfaceExtents(msurface_t *s)
int i, j, e;
mvertex_t *v;
mtexinfo_t *tex;
vec3_t middle{};
int bmins[2], bmaxs[2];

mins[0] = mins[1] = 999999;
Expand All @@ -869,6 +870,8 @@ void CalcSurfaceExtents(msurface_t *s)
else
v = &loadmodel->vertexes[loadmodel->edges[-e].v[1]];

VectorAdd(middle, v->position, middle);

for (j = 0; j < 2; j++)
{
// FIXED: loss of floating point
Expand All @@ -884,15 +887,25 @@ void CalcSurfaceExtents(msurface_t *s)
}
}

VectorScale(middle, 1.0f / s->numedges, middle);

for (i = 0; i < 2; i++)
{
bmins[i] = (int) floor(mins[i] / 16);
bmaxs[i] = (int) ceil(maxs[i] / 16);

s->texturemins[i] = bmins[i] * 16;
s->extents[i] = (bmaxs[i] - bmins[i]) * 16;

if (!(tex->flags & TEX_SPECIAL) && s->extents[i] > MAX_SURFACE_TEXTURE_SIZE)
Sys_Error("%s: Bad surface extents", __func__);
{
int surfID = s - loadmodel->surfaces;
Sys_Error("%s: Bad #%d surface extents %d/%d on %s at position (%d,%d,%d)",
__func__, surfID, s->extents[0], s->extents[1],
tex->texture->name,
(int)middle[0], (int)middle[1], (int)middle[2]
);
}
}
}

Expand Down
1 change: 1 addition & 0 deletions rehlds/engine/net.h
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,7 @@ typedef struct netchan_s

// Incoming and outgoing flow metrics
flow_t flow[MAX_FLOWS];

} netchan_t;

#ifdef REHLDS_FIXES
Expand Down
219 changes: 172 additions & 47 deletions rehlds/engine/net_chan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,10 @@ cvar_t net_chokeloopback = { "net_chokeloop", "0", 0, 0.0f, nullptr};
cvar_t sv_net_incoming_decompression = { "sv_net_incoming_decompression", "1", 0, 1.0f, nullptr };
cvar_t sv_net_incoming_decompression_max_ratio = { "sv_net_incoming_decompression_max_ratio", "80.0", 0, 80.0f, nullptr };
cvar_t sv_net_incoming_decompression_max_size = { "sv_net_incoming_decompression_max_size", "65536", 0, 65536.0f, nullptr };
cvar_t sv_net_incoming_decompression_punish = { "sv_net_incoming_decompression_punish", "-1", 0, -1.0f, NULL };
cvar_t sv_net_incoming_decompression_min_failures = { "sv_net_incoming_decompression_min_failures", "4", 0, 4.0f, nullptr };
cvar_t sv_net_incoming_decompression_max_failures = { "sv_net_incoming_decompression_max_failures", "10", 0, 10.0f, nullptr };
cvar_t sv_net_incoming_decompression_min_failuretime = { "sv_net_incoming_decompression_min_failuretime", "0.1", 0, 0.1f, nullptr };
cvar_t sv_net_incoming_decompression_punish = { "sv_net_incoming_decompression_punish", "-1", 0, -1.0f, nullptr };

cvar_t sv_filetransfercompression = { "sv_filetransfercompression", "1", 0, 0.0f, nullptr};
cvar_t sv_filetransfermaxsize = { "sv_filetransfermaxsize", "10485760", 0, 0.0f, nullptr};
Expand Down Expand Up @@ -184,6 +187,7 @@ void Netchan_Clear(netchan_t *chan)
void Netchan_Setup(netsrc_t socketnumber, netchan_t *chan, netadr_t adr, int player_slot, void *connection_status, qboolean(*pfnNetchan_Blocksize)(void *))
{
Netchan_Clear(chan);
g_GameClients[player_slot]->NetchanClear();

Q_memset(chan, 0, sizeof(netchan_t));

Expand Down Expand Up @@ -1423,6 +1427,93 @@ void Netchan_FlushIncoming(netchan_t *chan, int stream)
chan->incomingready[stream] = FALSE;
}

void Netchan_DecompressionCvarsBounds()
{
if (sv_net_incoming_decompression_min_failures.value < 1)
Cvar_SetValue("sv_net_incoming_decompression_min_failures", 1);

else if (sv_net_incoming_decompression_min_failures.value > NET_DECOMPRESS_MAX_TIMES)
Cvar_SetValue("sv_net_incoming_decompression_min_failures", NET_DECOMPRESS_MAX_TIMES);

if (sv_net_incoming_decompression_max_failures.value < 1)
Cvar_SetValue("sv_net_incoming_decompression_max_failures", 1);

else if (sv_net_incoming_decompression_max_failures.value > NET_DECOMPRESS_MAX_TIMES)
Cvar_SetValue("sv_net_incoming_decompression_max_failures", NET_DECOMPRESS_MAX_TIMES);

if (sv_net_incoming_decompression_max_failures.value < sv_net_incoming_decompression_min_failures.value)
{
int iTemp = sv_net_incoming_decompression_max_failures.value;
Cvar_SetValue("sv_net_incoming_decompression_max_failures", sv_net_incoming_decompression_min_failures.value);
Cvar_SetValue("sv_net_incoming_decompression_min_failures", iTemp);
}

if (sv_net_incoming_decompression_min_failuretime.value <= 0.0f)
Cvar_SetValue("sv_net_incoming_decompression_min_failuretime", 0.1f);
}

// Check for an abnormal size ratio between compressed and uncompressed data
qboolean Netchan_ValidateDecompress(netchan_t *chan, int stream, unsigned int compressedSize, unsigned int uncompressedSize)
{
#ifdef REHLDS_FIXES
int i;

if (sv_net_incoming_decompression_max_ratio.value <= 0)
return TRUE; // validation is disabled

if (compressedSize >= uncompressedSize)
return TRUE;

float ratio = ((float)(uncompressedSize - compressedSize) / uncompressedSize) * 100.0f;
if (ratio < sv_net_incoming_decompression_max_ratio.value)
return TRUE; // no low entropy for uncompressed data

if ((chan->player_slot - 1) != host_client - g_psvs.clients)
return TRUE;

Netchan_DecompressionCvarsBounds();

FragStats_t &stats = g_GameClients[chan->player_slot - 1]->GetFragStats(stream);

// check if the client should be rejected based on total failed decompress
if (stats.num_decompress_failures >= sv_net_incoming_decompression_max_failures.value)
{
for (i = 0; i < sv_net_incoming_decompression_max_failures.value - 1; i++)
stats.decompress_failure_times[i] = stats.decompress_failure_times[i + 1];

stats.num_decompress_failures = sv_net_incoming_decompression_max_failures.value - 1;
}

stats.decompress_failure_times[stats.num_decompress_failures++] = realtime;

// check if the client should be rejected based on recent failed decompress
int recent_failures = 0;
for (i = 0; i < stats.num_decompress_failures; i++)
{
if ((realtime - stats.decompress_failure_times[i]) <= sv_net_incoming_decompression_min_failuretime.value)
recent_failures++;
}

if (recent_failures >= sv_net_incoming_decompression_min_failures.value)
{
if (chan->player_slot == 0)
Con_DPrintf("Incoming abnormal uncompressed size with ratio %.2f\n", ratio);
else
Con_DPrintf("%s:Incoming abnormal uncompressed size with ratio %.2f from %s\n", NET_AdrToString(chan->remote_address), ratio, host_client->name);

if (sv_net_incoming_decompression_punish.value >= 0)
{
Con_DPrintf("%s:Banned for malformed/abnormal bzip2 fragments from %s\n", NET_AdrToString(chan->remote_address), host_client->name);
Cbuf_AddText(va("addip %.1f %s\n", sv_net_incoming_decompression_punish.value, NET_BaseAdrToString(chan->remote_address)));
}

return FALSE;
}
#endif

return TRUE;
}

qboolean Netchan_CopyNormalFragments(netchan_t *chan)
{
fragbuf_t *p, *n;
Expand Down Expand Up @@ -1483,8 +1574,6 @@ qboolean Netchan_CopyNormalFragments(netchan_t *chan)
}
#endif // REHLDS_FIXES

qboolean success = TRUE;

if (*(uint32 *)net_message.data == MAKEID('B', 'Z', '2', '\0'))
{
// Determine whether decompression of compressed data is allowed
Expand All @@ -1493,13 +1582,13 @@ qboolean Netchan_CopyNormalFragments(netchan_t *chan)
{
if (chan->player_slot == 0)
{
Con_DPrintf("Incoming compressed data disallowed from\n");
Con_DPrintf("Incoming compressed normal fragment disallowed from\n");
return FALSE;
}
// compressed data is expected only after requesting resource list
else if (host_client->m_sendrescount == 0)
{
Con_DPrintf("%s:Incoming compressed data disallowed from %s\n", NET_AdrToString(chan->remote_address), host_client->name);
Con_DPrintf("%s:Incoming compressed normal fragment disallowed from %s\n", NET_AdrToString(chan->remote_address), host_client->name);
return FALSE;
}
}
Expand All @@ -1510,56 +1599,34 @@ qboolean Netchan_CopyNormalFragments(netchan_t *chan)
unsigned int compressedSize = net_message.cursize - 4;

// Decompress net buffer data
if (success && (BZ2_bzBuffToBuffDecompress(uncompressed, &uncompressedSize, (char *)net_message.data + 4, compressedSize, 1, 0) == BZ_OK))
{
#ifdef REHLDS_FIXES
// Check for an abnormal size ratio between compressed and uncompressed data
if (sv_net_incoming_decompression_max_ratio.value > 0 && compressedSize < uncompressedSize)
{
float ratio = ((float)(uncompressedSize - compressedSize) / uncompressedSize) * 100.0f;
if (ratio >= sv_net_incoming_decompression_max_ratio.value)
{
if (chan->player_slot == 0)
Con_DPrintf("Incoming abnormal uncompressed size with ratio %.2f\n", ratio);
else
Con_DPrintf("%s:Incoming abnormal uncompressed size with ratio %.2f from %s\n", NET_AdrToString(chan->remote_address), ratio, host_client->name);

success = FALSE;
}
}
#endif
qboolean success = TRUE;

// Copy uncompressed data back to the net buffer
Q_memcpy(net_message.data, uncompressed, uncompressedSize);
net_message.cursize = uncompressedSize;
}
else
if (BZ2_bzBuffToBuffDecompress(uncompressed, &uncompressedSize, (char *)net_message.data + 4, compressedSize, 1, 0) != BZ_OK)
{
// malformed data or compressed data exceeding sv_net_incoming_decompression_max_size
success = FALSE;
}
else if (!Netchan_ValidateDecompress(chan, FRAG_NORMAL_STREAM, compressedSize, uncompressedSize))
{
success = FALSE;
}

// Drop client if decompression was unsuccessful
#ifdef REHLDS_FIXES
if (!success)
{
if ((chan->player_slot - 1) == host_client - g_psvs.clients)
{
#ifdef REHLDS_FIXES
if (sv_net_incoming_decompression_punish.value >= 0)
{
Con_DPrintf("%s:Banned for malformed/abnormal bzip2 fragments from %s\n", NET_AdrToString(chan->remote_address), host_client->name);
Cbuf_AddText(va("addip %.1f %s\n", sv_net_incoming_decompression_punish.value, NET_BaseAdrToString(chan->remote_address)));
}
#endif

SV_DropClient(host_client, FALSE, "Malformed/abnormal compressed data");
}

// Drop client if decompression was unsuccessful
SV_DropClient(host_client, FALSE, "Malformed/abnormal compressed data");
SZ_Clear(&net_message);
return FALSE;
}
#endif

// Copy uncompressed data back to the net buffer
Q_memcpy(net_message.data, uncompressed, uncompressedSize);
net_message.cursize = uncompressedSize;
}

return success;
return TRUE;
}

qboolean Netchan_CopyFileFragments(netchan_t *chan)
Expand All @@ -1575,7 +1642,6 @@ qboolean Netchan_CopyFileFragments(netchan_t *chan)
qboolean bCompressed;
unsigned int uncompressedSize;


if (!chan->incomingready[FRAG_FILE_STREAM])
return FALSE;

Expand All @@ -1587,6 +1653,19 @@ qboolean Netchan_CopyFileFragments(netchan_t *chan)
return FALSE;
}

#ifdef REHLDS_FIXES
if (chan->player_slot > 0 && (chan->player_slot - 1) == host_client - g_psvs.clients)
{
// customization already uploaded with request by operator,
// do not accept any other customization
if (host_client->uploaddoneregistering)
{
SV_DropClient(host_client, FALSE, "Too many customization have been uploaded (unrequested customization)");
return FALSE;
}
}
#endif

bCompressed = FALSE;
SZ_Clear(&net_message);
MSG_BeginReading();
Expand Down Expand Up @@ -1701,10 +1780,53 @@ qboolean Netchan_CopyFileFragments(netchan_t *chan)

if (bCompressed)
{
unsigned char* uncompressedBuffer = (unsigned char*)Mem_Malloc(uncompressedSize);
Con_DPrintf("Decompressing file %s (%d -> %d)\n", filename, nsize, uncompressedSize);
BZ2_bzBuffToBuffDecompress((char*)uncompressedBuffer, &uncompressedSize, (char*)buffer, nsize, 1, 0);
// Determine whether decompression of compressed data is allowed
#ifdef REHLDS_FIXES
if (!sv_net_incoming_decompression.value)
{
if (chan->player_slot == 0)
{
Con_DPrintf("Incoming compressed file fragment disallowed from\n");
return FALSE;
}
// compressed data is expected only after requesting resource list
else if (host_client->m_sendrescount == 0)
{
Con_DPrintf("%s:Incoming compressed file fragment disallowed from %s\n", NET_AdrToString(chan->remote_address), host_client->name);
return FALSE;
}
}
#endif

uncompressedSize = clamp(uncompressedSize, 16u, (unsigned)sv_net_incoming_decompression_max_size.value); // valid range (16 - 65536) bytes

qboolean success = TRUE;
unsigned char *uncompressedBuffer = (unsigned char *)Mem_Malloc(uncompressedSize);
unsigned int compressedSize = nsize;

// Decompress net buffer data
if (BZ2_bzBuffToBuffDecompress((char *)uncompressedBuffer, &uncompressedSize, (char *)buffer, compressedSize, 1, 0) != BZ_OK)
{
// malformed data or compressed data exceeding sv_net_incoming_decompression_max_size
success = FALSE;
}
else if (!Netchan_ValidateDecompress(chan, FRAG_FILE_STREAM, compressedSize, uncompressedSize))
{
success = FALSE;
}

Mem_Free(buffer);

if (!success)
{
// Drop client if decompression was unsuccessful
SV_DropClient(host_client, FALSE, "Malformed/abnormal compressed data");
SZ_Clear(&net_message);
Mem_Free(uncompressedBuffer);
return FALSE;
}

Con_DPrintf("Decompressing file %s (%d -> %d)\n", filename, compressedSize, uncompressedSize);
pos = uncompressedSize;
buffer = uncompressedBuffer;
}
Expand Down Expand Up @@ -1899,6 +2021,9 @@ void Netchan_Init(void)
Cvar_RegisterVariable(&sv_net_incoming_decompression);
Cvar_RegisterVariable(&sv_net_incoming_decompression_max_ratio);
Cvar_RegisterVariable(&sv_net_incoming_decompression_max_size);
Cvar_RegisterVariable(&sv_net_incoming_decompression_min_failures);
Cvar_RegisterVariable(&sv_net_incoming_decompression_max_failures);
Cvar_RegisterVariable(&sv_net_incoming_decompression_min_failuretime);
Cvar_RegisterVariable(&sv_net_incoming_decompression_punish);
#endif
Cvar_RegisterVariable(&sv_filetransfermaxsize);
Expand Down
Loading

0 comments on commit da9ff74

Please sign in to comment.