From ffee8754a0d408fe6d2e7ef043b97ab0a4299bfe Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 18:40:19 +0000 Subject: [PATCH 01/60] Remove PAL_exit as the only usages are in places where we don't use PAL features that would need global cleanup before process exit. --- src/coreclr/pal/inc/pal.h | 1 - src/coreclr/pal/src/CMakeLists.txt | 1 - src/coreclr/pal/src/cruntime/thread.cpp | 38 ------------------- src/coreclr/pal/src/include/pal/palinternal.h | 1 + .../palsuite/c_runtime/exit/test1/test1.cpp | 36 ------------------ .../palsuite/c_runtime/exit/test2/test2.cpp | 37 ------------------ .../pal/tests/palsuite/compilableTests.txt | 2 - .../tests/palsuite/manual-unautomatable.dat | 3 -- .../pal/tests/palsuite/paltestlist.txt | 1 - .../palsuite/paltestlist_to_be_reviewed.txt | 1 - .../pal/tests/palsuite/tests-manual.dat | 1 - 11 files changed, 1 insertion(+), 121 deletions(-) delete mode 100644 src/coreclr/pal/src/cruntime/thread.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/exit/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/exit/test2/test2.cpp diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 4a0a341b2272e..959ae1cebcac9 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3980,7 +3980,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); To avoid name collisions, those functions have been renamed using defines */ #ifndef PAL_STDCPP_COMPAT -#define exit PAL_exit #define realloc PAL_realloc #define rand PAL_rand #define time PAL_time diff --git a/src/coreclr/pal/src/CMakeLists.txt b/src/coreclr/pal/src/CMakeLists.txt index 745162987a42e..a4086fff86277 100644 --- a/src/coreclr/pal/src/CMakeLists.txt +++ b/src/coreclr/pal/src/CMakeLists.txt @@ -131,7 +131,6 @@ endif() set(SOURCES cruntime/malloc.cpp cruntime/misc.cpp - cruntime/thread.cpp cruntime/wchar.cpp debug/debug.cpp exception/seh.cpp diff --git a/src/coreclr/pal/src/cruntime/thread.cpp b/src/coreclr/pal/src/cruntime/thread.cpp deleted file mode 100644 index 883c5d1b00190..0000000000000 --- a/src/coreclr/pal/src/cruntime/thread.cpp +++ /dev/null @@ -1,38 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*++ - - - -Module Name: - - thread.c - -Abstract: - - Implementation of the threads/process functions in the C runtime library - that are Windows specific. - - - ---*/ - -#include "pal/palinternal.h" -#include "pal/dbgmsg.h" -#include "pal/init.h" - -SET_DEFAULT_DEBUG_CHANNEL(CRT); - -void -PAL_exit(int status) -{ - PERF_ENTRY(exit); - ENTRY ("exit(status=%d)\n", status); - - /* should also clean up any resources allocated by pal/cruntime, if any */ - ExitProcess(status); - - LOGEXIT ("exit returns void"); - PERF_EXIT(exit); -} diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 041118d391651..90952b9abd479 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -208,6 +208,7 @@ function_name() to call the system's implementation #define size_t DUMMY_size_t #define time_t PAL_time_t #define va_list DUMMY_va_list +#define exit DUMMY_exit #define abs DUMMY_abs #define llabs DUMMY_llabs #define ceil DUMMY_ceil diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/exit/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/exit/test1/test1.cpp deleted file mode 100644 index 2bb42e3563c42..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/exit/test1/test1.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls exit, and verifies that it actually stops program execution. -** -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_exit_test1_paltest_exit_test1, "c_runtime/exit/test1/paltest_exit_test1") -{ - /* - * Initialize the PAL and return FAIL if this fails - */ - if (0 != (PAL_Initialize(argc, argv))) - { - return FAIL; - } - - /*should return 0*/ - exit(0); - - Fail ("Exit didn't actually stop execution.\n"); - - return FAIL; -} - - - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/exit/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/exit/test2/test2.cpp deleted file mode 100644 index 6125b3c38899d..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/exit/test2/test2.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test2.c -** -** Purpose: Calls exit on fail, and verifies that it actually -** stops program execution and return 1. - -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_exit_test2_paltest_exit_test2, "c_runtime/exit/test2/paltest_exit_test2") -{ - /* - * Initialize the PAL and return FAIL if this fails - */ - if (0 != (PAL_Initialize(argc, argv))) - { - return FAIL; - } - - /*should return 1*/ - exit(1); - -} - - - - - - - - diff --git a/src/coreclr/pal/tests/palsuite/compilableTests.txt b/src/coreclr/pal/tests/palsuite/compilableTests.txt index 9a26d82106530..a6419e8fbd2a9 100644 --- a/src/coreclr/pal/tests/palsuite/compilableTests.txt +++ b/src/coreclr/pal/tests/palsuite/compilableTests.txt @@ -12,8 +12,6 @@ c_runtime/cosh/test1/paltest_cosh_test1 c_runtime/coshf/test1/paltest_coshf_test1 c_runtime/errno/test1/paltest_errno_test1 c_runtime/errno/test2/paltest_errno_test2 -c_runtime/exit/test1/paltest_exit_test1 -c_runtime/exit/test2/paltest_exit_test2 c_runtime/free/test1/paltest_free_test1 c_runtime/isalnum/test1/paltest_isalnum_test1 c_runtime/isalpha/test1/paltest_isalpha_test1 diff --git a/src/coreclr/pal/tests/palsuite/manual-unautomatable.dat b/src/coreclr/pal/tests/palsuite/manual-unautomatable.dat index c7a2a3913e7b2..78e1831fbc6a4 100644 --- a/src/coreclr/pal/tests/palsuite/manual-unautomatable.dat +++ b/src/coreclr/pal/tests/palsuite/manual-unautomatable.dat @@ -1,9 +1,6 @@ # Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. -#This test is negative and will exit with exit(1). -#Therefore, the harness would record it as a failure -c_runtime/exit/test2,1 # A successful DebugBreak test run dumps core or throws up an ASSERT # dialog box (or...) and returns an exit code != 0 debug_api/debugbreak/test1,1 diff --git a/src/coreclr/pal/tests/palsuite/paltestlist.txt b/src/coreclr/pal/tests/palsuite/paltestlist.txt index 878f00a2adf85..444d110cdaf9e 100644 --- a/src/coreclr/pal/tests/palsuite/paltestlist.txt +++ b/src/coreclr/pal/tests/palsuite/paltestlist.txt @@ -4,7 +4,6 @@ c_runtime/bsearch/test1/paltest_bsearch_test1 c_runtime/bsearch/test2/paltest_bsearch_test2 c_runtime/errno/test1/paltest_errno_test1 c_runtime/errno/test2/paltest_errno_test2 -c_runtime/exit/test1/paltest_exit_test1 c_runtime/free/test1/paltest_free_test1 c_runtime/isalnum/test1/paltest_isalnum_test1 c_runtime/isalpha/test1/paltest_isalpha_test1 diff --git a/src/coreclr/pal/tests/palsuite/paltestlist_to_be_reviewed.txt b/src/coreclr/pal/tests/palsuite/paltestlist_to_be_reviewed.txt index 2dbbd64e9f2b1..bc5589f4323c1 100644 --- a/src/coreclr/pal/tests/palsuite/paltestlist_to_be_reviewed.txt +++ b/src/coreclr/pal/tests/palsuite/paltestlist_to_be_reviewed.txt @@ -1,7 +1,6 @@ This is a list of failing PAL tests that need to be reviewed because. They should either be fixed or deleted if they are no longer applicable. -c_runtime/exit/test2/paltest_exit_test2 c_runtime/ferror/test1/paltest_ferror_test1 c_runtime/ferror/test2/paltest_ferror_test2 c_runtime/fputs/test2/paltest_fputs_test2 diff --git a/src/coreclr/pal/tests/palsuite/tests-manual.dat b/src/coreclr/pal/tests/palsuite/tests-manual.dat index b87a39486af1b..3f32f49df043c 100644 --- a/src/coreclr/pal/tests/palsuite/tests-manual.dat +++ b/src/coreclr/pal/tests/palsuite/tests-manual.dat @@ -1,7 +1,6 @@ # Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. -c_runtime/exit/test2,1 pal_specific/pal_get_stderr/test1,1 pal_specific/pal_get_stdin/test1,1 pal_specific/pal_get_stdout/test1,1 From 04ea4b48919aa8d38fbd8c7a14c70a3c85646c28 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 18:44:08 +0000 Subject: [PATCH 02/60] Remove rand PAL API as no one uses it. --- src/coreclr/pal/inc/pal.h | 7 -- src/coreclr/pal/src/cruntime/misc.cpp | 25 ----- src/coreclr/pal/src/include/pal/palinternal.h | 1 + src/coreclr/pal/tests/palsuite/CMakeLists.txt | 1 - .../c_runtime/rand_srand/test1/test1.cpp | 99 ------------------- .../pal/tests/palsuite/compilableTests.txt | 1 - .../pal/tests/palsuite/paltestlist.txt | 1 - 7 files changed, 1 insertion(+), 134 deletions(-) delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/rand_srand/test1/test1.cpp diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 959ae1cebcac9..5c6b3037444f9 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3981,7 +3981,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); defines */ #ifndef PAL_STDCPP_COMPAT #define realloc PAL_realloc -#define rand PAL_rand #define time PAL_time #define getenv PAL_getenv #define qsort PAL_qsort @@ -4367,12 +4366,6 @@ PALIMPORT time_t __cdecl time(time_t *); #endif // !PAL_STDCPP_COMPAT -/* Maximum value that can be returned by the rand function. */ - -#ifndef PAL_STDCPP_COMPAT -#define RAND_MAX 0x7fff -#endif // !PAL_STDCPP_COMPAT - PALIMPORT int __cdecl rand(void); PALIMPORT void __cdecl srand(unsigned int); diff --git a/src/coreclr/pal/src/cruntime/misc.cpp b/src/coreclr/pal/src/cruntime/misc.cpp index d079cd0abc4d6..a50479bc37258 100644 --- a/src/coreclr/pal/src/cruntime/misc.cpp +++ b/src/coreclr/pal/src/cruntime/misc.cpp @@ -133,31 +133,6 @@ extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stderr() return stderr; } -/*++ -Function: - - rand - - The RAND_MAX value can vary by platform. - -See MSDN for more details. ---*/ -int -__cdecl -PAL_rand(void) -{ - int ret; - PERF_ENTRY(rand); - ENTRY("rand(void)\n"); - - ret = (rand() % (PAL_RAND_MAX + 1)); - - LOGEXIT("rand() returning %d\n", ret); - PERF_EXIT(rand); - return ret; -} - - /*++ Function: diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 90952b9abd479..3ba5058db9447 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -275,6 +275,7 @@ function_name() to call the system's implementation #define fseek DUMMY_fseek #define fgetpos DUMMY_fgetpos #define fsetpos DUMMY_fsetpos +#define rand DUMMY_rand /* RAND_MAX needed to be renamed to avoid duplicate definition when including stdlib.h header files. PAL_RAND_MAX should have the same value as RAND_MAX diff --git a/src/coreclr/pal/tests/palsuite/CMakeLists.txt b/src/coreclr/pal/tests/palsuite/CMakeLists.txt index 4b9960f910c5c..cb0e6ba848cd6 100644 --- a/src/coreclr/pal/tests/palsuite/CMakeLists.txt +++ b/src/coreclr/pal/tests/palsuite/CMakeLists.txt @@ -88,7 +88,6 @@ add_executable_clr(paltests c_runtime/memset/test1/test1.cpp c_runtime/qsort/test1/test1.cpp c_runtime/qsort/test2/test2.cpp - c_runtime/rand_srand/test1/test1.cpp c_runtime/realloc/test1/test1.cpp c_runtime/sscanf_s/test1/test1.cpp c_runtime/sscanf_s/test10/test10.cpp diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/rand_srand/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/rand_srand/test1/test1.cpp deleted file mode 100644 index cd752c39f5396..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/rand_srand/test1/test1.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================= -** -** Source: test1.c -** -** Purpose: Test to ensure that srand provide random -** number to rand. Also make sure that rand result from a -** srand with seed 1 and no call to srand are the same. -** -** Dependencies: PAL_Initialize -** PAL_Terminate -** Fail -** srand() -** - -** -**===========================================================================*/ - -#include - - -PALTEST(c_runtime_rand_srand_test1_paltest_rand_srand_test1, "c_runtime/rand_srand/test1/paltest_rand_srand_test1") -{ - int RandNumber[10]; - int TempRandNumber; - int i; - int SRAND_SEED; - int SRAND_REINIT = 1; - - /* - * Initialize the PAL and return FAILURE if this fails - */ - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - SRAND_SEED = time(NULL); - - /* does not initialize srand and call rand. */ - for (i=0; i<10; i++) - { - /* keep the value in an array */ - RandNumber[i]=rand(); - if (RandNumber[i] < 0 || RandNumber[i] > RAND_MAX) - { - Fail("1) ERROR: random generated an invalid value: %d", RandNumber[i]); - } - } - - - /* initialize random generator */ - srand(SRAND_SEED); - - - /* choose 10 numbers with a different seed. - the numbers should be different than - those the previously generated one */ - for(i = 0; i < 10; i++) - { - TempRandNumber=rand(); - if (TempRandNumber < 0 || TempRandNumber > RAND_MAX) - { - Fail("2) ERROR: random generated an invalid value: %d", TempRandNumber); - } - } - - - - /* renitialize the srand with 1 */ - srand(SRAND_REINIT); - - - - /* choose 10 numbers with seed 1, - the number should be the same as those we kept in the array. */ - for( i = 0; i < 10;i++ ) - { - /* pick the random number*/ - TempRandNumber=rand(); - /* test if it is the same number generated in the first sequences*/ - if(RandNumber[i]!=TempRandNumber) - { - Fail ("ERROR: rand should return the same value when srand " - "is initialized with 1 or not initialized at all"); - } - if (TempRandNumber < 0 || TempRandNumber > RAND_MAX) - { - Fail("3) ERROR: random generated an invalid value: %d", TempRandNumber); - } - } - - - PAL_Terminate(); - return PASS; -} diff --git a/src/coreclr/pal/tests/palsuite/compilableTests.txt b/src/coreclr/pal/tests/palsuite/compilableTests.txt index a6419e8fbd2a9..c50f24e59b679 100644 --- a/src/coreclr/pal/tests/palsuite/compilableTests.txt +++ b/src/coreclr/pal/tests/palsuite/compilableTests.txt @@ -32,7 +32,6 @@ c_runtime/memmove/test1/paltest_memmove_test1 c_runtime/memset/test1/paltest_memset_test1 c_runtime/qsort/test1/paltest_qsort_test1 c_runtime/qsort/test2/paltest_qsort_test2 -c_runtime/rand_srand/test1/paltest_rand_srand_test1 c_runtime/realloc/test1/paltest_realloc_test1 c_runtime/sscanf_s/test1/paltest_sscanf_test1 c_runtime/sscanf_s/test10/paltest_sscanf_test10 diff --git a/src/coreclr/pal/tests/palsuite/paltestlist.txt b/src/coreclr/pal/tests/palsuite/paltestlist.txt index 444d110cdaf9e..6b7f196d06b12 100644 --- a/src/coreclr/pal/tests/palsuite/paltestlist.txt +++ b/src/coreclr/pal/tests/palsuite/paltestlist.txt @@ -24,7 +24,6 @@ c_runtime/memmove/test1/paltest_memmove_test1 c_runtime/memset/test1/paltest_memset_test1 c_runtime/qsort/test1/paltest_qsort_test1 c_runtime/qsort/test2/paltest_qsort_test2 -c_runtime/rand_srand/test1/paltest_rand_srand_test1 c_runtime/realloc/test1/paltest_realloc_test1 c_runtime/sscanf_s/test1/paltest_sscanf_test1 c_runtime/sscanf_s/test10/paltest_sscanf_test10 From 2527c50f585632652665eda3175503c689e2c93c Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 18:49:40 +0000 Subject: [PATCH 03/60] Remove PAL_realloc as no usage of it would ever pass an alloc size of 0. --- src/coreclr/pal/inc/pal.h | 1 - src/coreclr/pal/src/cruntime/malloc.cpp | 40 ------------ src/coreclr/pal/src/handlemgr/handlemgr.cpp | 2 +- src/coreclr/pal/src/include/pal/malloc.hpp | 7 -- src/coreclr/pal/src/include/pal/palinternal.h | 1 + .../c_runtime/realloc/test1/test1.cpp | 65 ------------------- src/coreclr/scripts/genDummyProvider.py | 1 - src/coreclr/scripts/genLttngProvider.py | 1 - 8 files changed, 2 insertions(+), 116 deletions(-) delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/realloc/test1/test1.cpp diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 5c6b3037444f9..3115562fc1b15 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3980,7 +3980,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); To avoid name collisions, those functions have been renamed using defines */ #ifndef PAL_STDCPP_COMPAT -#define realloc PAL_realloc #define time PAL_time #define getenv PAL_getenv #define qsort PAL_qsort diff --git a/src/coreclr/pal/src/cruntime/malloc.cpp b/src/coreclr/pal/src/cruntime/malloc.cpp index c4b3797e0b30a..bbbfa3130edab 100644 --- a/src/coreclr/pal/src/cruntime/malloc.cpp +++ b/src/coreclr/pal/src/cruntime/malloc.cpp @@ -30,46 +30,6 @@ SET_DEFAULT_DEBUG_CHANNEL(CRT); using namespace CorUnix; -void * -__cdecl -PAL_realloc( - void* pvMemblock, - size_t szSize - ) -{ - return InternalRealloc(pvMemblock, szSize); -} - -void * -CorUnix::InternalRealloc( - void* pvMemblock, - size_t szSize - ) -{ - void *pvMem; - - PERF_ENTRY(InternalRealloc); - ENTRY("realloc (memblock:%p size=%d)\n", pvMemblock, szSize); - - if (szSize == 0) - { - // If pvMemblock is NULL, there's no reason to call free. - if (pvMemblock != NULL) - { - free(pvMemblock); - } - pvMem = NULL; - } - else - { - pvMem = realloc(pvMemblock, szSize); - } - - LOGEXIT("realloc returns void * %p\n", pvMem); - PERF_EXIT(InternalRealloc); - return pvMem; -} - void __cdecl PAL_free( diff --git a/src/coreclr/pal/src/handlemgr/handlemgr.cpp b/src/coreclr/pal/src/handlemgr/handlemgr.cpp index 5dc198c7f5a3a..df4841ad472db 100644 --- a/src/coreclr/pal/src/handlemgr/handlemgr.cpp +++ b/src/coreclr/pal/src/handlemgr/handlemgr.cpp @@ -108,7 +108,7 @@ CSimpleHandleManager::AllocateHandle( } /* grow handle table */ - rghteTempTable = reinterpret_cast(InternalRealloc( + rghteTempTable = reinterpret_cast(realloc( m_rghteHandleTable, (m_dwTableSize + m_dwTableGrowthRate) * sizeof(HANDLE_TABLE_ENTRY))); diff --git a/src/coreclr/pal/src/include/pal/malloc.hpp b/src/coreclr/pal/src/include/pal/malloc.hpp index 4e7b96da0e228..6986368cd87d8 100644 --- a/src/coreclr/pal/src/include/pal/malloc.hpp +++ b/src/coreclr/pal/src/include/pal/malloc.hpp @@ -49,13 +49,6 @@ extern "C" } namespace CorUnix{ - - void * - InternalRealloc( - void *pvMemblock, - size_t szSize - ); - void * InternalMalloc( size_t szSize diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 3ba5058db9447..68ef0d662718a 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -276,6 +276,7 @@ function_name() to call the system's implementation #define fgetpos DUMMY_fgetpos #define fsetpos DUMMY_fsetpos #define rand DUMMY_rand +#define realloc DUMMY_realloc /* RAND_MAX needed to be renamed to avoid duplicate definition when including stdlib.h header files. PAL_RAND_MAX should have the same value as RAND_MAX diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/realloc/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/realloc/test1/test1.cpp deleted file mode 100644 index edd075da23f70..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/realloc/test1/test1.cpp +++ /dev/null @@ -1,65 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Uses realloc to allocate and realloate memory, checking -** that memory contents are copied when the memory is reallocated. -** -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_realloc_test1_paltest_realloc_test1, "c_runtime/realloc/test1/paltest_realloc_test1") -{ - char *testA; - const int len1 = 10; - const char str1[] = "aaaaaaaaaa"; - - const int len2 = 20; - const char str2[] = "bbbbbbbbbbbbbbbbbbbb"; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - /* this should work like malloc */ - testA = (char *)realloc(NULL, len1*sizeof(char)); - memcpy(testA, str1, len1); - if (testA == NULL) - { - Fail("We ran out of memory (unlikely), or realloc is broken.\n"); - } - - if (memcmp(testA, str1, len1) != 0) - { - Fail("realloc doesn't properly allocate new memory.\n"); - } - - testA = (char *)realloc(testA, len2*sizeof(char)); - if (memcmp(testA, str1, len1) != 0) - { - Fail("realloc doesn't move the contents of the original memory " - "block to the newly allocated block.\n"); - } - - memcpy(testA, str2, len2); - if (memcmp(testA, str2, len2) != 0) - { - Fail("Couldn't write to memory allocated by realloc.\n"); - } - - /* free the buffer */ - testA = (char*)realloc(testA, 0); - if (testA != NULL) - { - Fail("Realloc didn't return NULL when called with a length " - "of zero.\n"); - } - PAL_Terminate(); - return PASS; -} diff --git a/src/coreclr/scripts/genDummyProvider.py b/src/coreclr/scripts/genDummyProvider.py index ccf421421ca0b..4ccb4d5cd71f8 100644 --- a/src/coreclr/scripts/genDummyProvider.py +++ b/src/coreclr/scripts/genDummyProvider.py @@ -125,7 +125,6 @@ def generateDummyFiles(etwmanifest, out_dirname, runtimeFlavor, extern, dryRun): #include "pal_error.h" #include "pal.h" #define PAL_free free -#define PAL_realloc realloc #include "pal/stackstring.hpp" #endif diff --git a/src/coreclr/scripts/genLttngProvider.py b/src/coreclr/scripts/genLttngProvider.py index 70affdbbfe01a..2871cfe19a9b3 100644 --- a/src/coreclr/scripts/genLttngProvider.py +++ b/src/coreclr/scripts/genLttngProvider.py @@ -581,7 +581,6 @@ def generateLttngFiles(etwmanifest, eventprovider_directory, runtimeFlavor, dryR #include "pal_error.h" #include "pal.h" #define PAL_free free -#define PAL_realloc realloc #include "pal/stackstring.hpp" """) lttngimpl_file.write("#include \"" + lttngevntheadershortname + "\"\n\n") From 610d49f845528fd28375613db765d3ae9e62063d Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 18:52:26 +0000 Subject: [PATCH 04/60] Remove PAL_free as it just forwards to free. --- src/coreclr/pal/inc/pal.h | 1 - src/coreclr/pal/src/cruntime/malloc.cpp | 9 ------ src/coreclr/pal/src/cruntime/wchar.cpp | 10 +++--- src/coreclr/pal/src/file/directory.cpp | 6 ++-- src/coreclr/pal/src/include/pal/malloc.hpp | 2 +- src/coreclr/pal/src/include/pal/palinternal.h | 1 + .../pal/src/include/pal/stackstring.hpp | 2 +- src/coreclr/pal/src/include/pal/utils.h | 2 +- src/coreclr/pal/src/misc/cgroup.cpp | 28 ++++++++-------- src/coreclr/pal/src/misc/environ.cpp | 12 +++---- src/coreclr/pal/src/misc/perftrace.cpp | 32 +++++++++---------- src/coreclr/pal/src/misc/utils.cpp | 4 +-- src/coreclr/pal/src/safecrt/input.inl | 2 +- src/coreclr/pal/src/thread/thread.cpp | 2 +- src/coreclr/scripts/genDummyProvider.py | 1 - src/coreclr/scripts/genLttngProvider.py | 1 - 16 files changed, 52 insertions(+), 63 deletions(-) diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 3115562fc1b15..850b286f5c66c 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3985,7 +3985,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); #define qsort PAL_qsort #define bsearch PAL_bsearch #define malloc PAL_malloc -#define free PAL_free #ifdef HOST_AMD64 #define _mm_getcsr PAL__mm_getcsr diff --git a/src/coreclr/pal/src/cruntime/malloc.cpp b/src/coreclr/pal/src/cruntime/malloc.cpp index bbbfa3130edab..366ea131756c8 100644 --- a/src/coreclr/pal/src/cruntime/malloc.cpp +++ b/src/coreclr/pal/src/cruntime/malloc.cpp @@ -30,15 +30,6 @@ SET_DEFAULT_DEBUG_CHANNEL(CRT); using namespace CorUnix; -void -__cdecl -PAL_free( - void *pvMem - ) -{ - free(pvMem); -} - void * __cdecl PAL_malloc( diff --git a/src/coreclr/pal/src/cruntime/wchar.cpp b/src/coreclr/pal/src/cruntime/wchar.cpp index d5704ef0ca9d8..16845ff2fc66a 100644 --- a/src/coreclr/pal/src/cruntime/wchar.cpp +++ b/src/coreclr/pal/src/cruntime/wchar.cpp @@ -67,12 +67,12 @@ _wtoi( { ASSERT("WideCharToMultiByte failed. Error is %d\n", GetLastError()); - PAL_free(tempStr); + free(tempStr); return -1; } ret = atoi(tempStr); - PAL_free(tempStr); + free(tempStr); LOGEXIT("_wtoi returns int %d\n", ret); PERF_EXIT(_wtoi); return ret; @@ -310,7 +310,7 @@ PAL_wcstoul( } PAL_wcstoulExit: - PAL_free(s_nptr); + free(s_nptr); LOGEXIT("wcstoul returning unsigned long %lu\n", res); PERF_EXIT(wcstoul); @@ -381,7 +381,7 @@ PAL__wcstoui64( } PAL__wcstoui64Exit: - PAL_free(s_nptr); + free(s_nptr); LOGEXIT("_wcstoui64 returning unsigned long long %llu\n", res); PERF_EXIT(_wcstoui64); @@ -939,7 +939,7 @@ PAL_wcstod( const wchar_16 * nptr, wchar_16 **endptr ) *endptr = lpEndOfExpression; } - PAL_free( lpStringRep ); + free( lpStringRep ); LOGEXIT( "wcstod returning %f.\n", RetVal ); PERF_EXIT(wcstod); return RetVal; diff --git a/src/coreclr/pal/src/file/directory.cpp b/src/coreclr/pal/src/file/directory.cpp index 4cd0600e5cf60..ab07080637894 100644 --- a/src/coreclr/pal/src/file/directory.cpp +++ b/src/coreclr/pal/src/file/directory.cpp @@ -93,7 +93,7 @@ CreateDirectoryW( } if (mb_dir != NULL) { - PAL_free(mb_dir); + free(mb_dir); } LOGEXIT("CreateDirectoryW returns BOOL %d\n", bRet); PERF_EXIT(CreateDirectoryW); @@ -280,7 +280,7 @@ GetCurrentDirectoryA(PathCharString& lpBuffer) dwDirLen = strlen( current_dir ); lpBuffer.Set(current_dir, dwDirLen); - PAL_free(current_dir); + free(current_dir); done: if ( dwLastError ) @@ -486,7 +486,7 @@ CreateDirectoryA( { SetLastError( dwLastError ); } - PAL_free( unixPathName ); + free( unixPathName ); LOGEXIT("CreateDirectoryA returns BOOL %d\n", bRet); PERF_EXIT(CreateDirectoryA); return bRet; diff --git a/src/coreclr/pal/src/include/pal/malloc.hpp b/src/coreclr/pal/src/include/pal/malloc.hpp index 6986368cd87d8..4ede96bd387c1 100644 --- a/src/coreclr/pal/src/include/pal/malloc.hpp +++ b/src/coreclr/pal/src/include/pal/malloc.hpp @@ -43,7 +43,7 @@ extern "C" void __cdecl - PAL_free( + free( void *pvMem ); } diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 68ef0d662718a..17b48e0f3a79a 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -277,6 +277,7 @@ function_name() to call the system's implementation #define fsetpos DUMMY_fsetpos #define rand DUMMY_rand #define realloc DUMMY_realloc +#define free DUMMY_free /* RAND_MAX needed to be renamed to avoid duplicate definition when including stdlib.h header files. PAL_RAND_MAX should have the same value as RAND_MAX diff --git a/src/coreclr/pal/src/include/pal/stackstring.hpp b/src/coreclr/pal/src/include/pal/stackstring.hpp index 4a27a15579c7d..0eefe9f89138e 100644 --- a/src/coreclr/pal/src/include/pal/stackstring.hpp +++ b/src/coreclr/pal/src/include/pal/stackstring.hpp @@ -21,7 +21,7 @@ class StackString void DeleteBuffer() { if (m_innerBuffer != m_buffer) - PAL_free(m_buffer); + free(m_buffer); m_buffer = NULL; return; diff --git a/src/coreclr/pal/src/include/pal/utils.h b/src/coreclr/pal/src/include/pal/utils.h index fdd5b3b965a16..980cdf56ab6c6 100644 --- a/src/coreclr/pal/src/include/pal/utils.h +++ b/src/coreclr/pal/src/include/pal/utils.h @@ -194,7 +194,7 @@ class StringHolder StringHolder() : data(NULL) { } ~StringHolder() { - PAL_free( data); + free( data); } operator LPSTR () { return data;} diff --git a/src/coreclr/pal/src/misc/cgroup.cpp b/src/coreclr/pal/src/misc/cgroup.cpp index ee3c0ae584392..f46f383c54e4a 100644 --- a/src/coreclr/pal/src/misc/cgroup.cpp +++ b/src/coreclr/pal/src/misc/cgroup.cpp @@ -55,7 +55,7 @@ class CGroup static void Cleanup() { - PAL_free(s_cpu_cgroup_path); + free(s_cpu_cgroup_path); } static bool GetCpuLimit(UINT *val) @@ -157,8 +157,8 @@ class CGroup strcat_s(cgroup_path, len+1, cgroup_path_relative_to_mount + common_path_prefix_len); done: - PAL_free(hierarchy_root); - PAL_free(cgroup_path_relative_to_mount); + free(hierarchy_root); + free(cgroup_path_relative_to_mount); *pcgroup_path = cgroup_path; if (pcgroup_hierarchy_mount != nullptr) { @@ -166,7 +166,7 @@ class CGroup } else { - PAL_free(hierarchy_mount); + free(hierarchy_mount); } } @@ -187,9 +187,9 @@ class CGroup { if (filesystemType == nullptr || lineLen > maxLineLen) { - PAL_free(filesystemType); + free(filesystemType); filesystemType = nullptr; - PAL_free(options); + free(options); options = nullptr; filesystemType = (char*)PAL_malloc(lineLen+1); if (filesystemType == nullptr) @@ -249,10 +249,10 @@ class CGroup } } done: - PAL_free(mountpath); - PAL_free(mountroot); - PAL_free(filesystemType); - PAL_free(options); + free(mountpath); + free(mountroot); + free(filesystemType); + free(options); free(line); if (mountinfofile) fclose(mountinfofile); @@ -275,9 +275,9 @@ class CGroup { if (subsystem_list == nullptr || lineLen > maxLineLen) { - PAL_free(subsystem_list); + free(subsystem_list); subsystem_list = nullptr; - PAL_free(cgroup_path); + free(cgroup_path); cgroup_path = nullptr; subsystem_list = (char*)PAL_malloc(lineLen+1); if (subsystem_list == nullptr) @@ -332,10 +332,10 @@ class CGroup } } done: - PAL_free(subsystem_list); + free(subsystem_list); if (!result) { - PAL_free(cgroup_path); + free(cgroup_path); cgroup_path = nullptr; } free(line); diff --git a/src/coreclr/pal/src/misc/environ.cpp b/src/coreclr/pal/src/misc/environ.cpp index a31d6b177760b..bed6496c8998a 100644 --- a/src/coreclr/pal/src/misc/environ.cpp +++ b/src/coreclr/pal/src/misc/environ.cpp @@ -243,8 +243,8 @@ GetEnvironmentVariableW( } done: - PAL_free(outBuff); - PAL_free(inBuff); + free(outBuff); + free(inBuff); LOGEXIT("GetEnvironmentVariableW returns DWORD 0x%x\n", size); PERF_EXIT(GetEnvironmentVariableW); @@ -356,8 +356,8 @@ SetEnvironmentVariableW( bRet = SetEnvironmentVariableA(name, value); done: - PAL_free(value); - PAL_free(name); + free(value); + free(name); LOGEXIT("SetEnvironmentVariableW returning BOOL %d\n", bRet); PERF_EXIT(SetEnvironmentVariableW); @@ -476,7 +476,7 @@ FreeEnvironmentStringsW( if (lpValue != nullptr) { - PAL_free(lpValue); + free(lpValue); } LOGEXIT("FreeEnvironmentStringW returning BOOL TRUE\n"); @@ -571,7 +571,7 @@ SetEnvironmentVariableA( sprintf_s(string, iLen, "%s=%s", lpName, lpValue); nResult = EnvironPutenv(string, FALSE) ? 0 : -1; - PAL_free(string); + free(string); string = nullptr; // If EnvironPutenv returns FALSE, it almost certainly failed to allocate memory. diff --git a/src/coreclr/pal/src/misc/perftrace.cpp b/src/coreclr/pal/src/misc/perftrace.cpp index a0d52a415017b..a0fb6914d3c4a 100644 --- a/src/coreclr/pal/src/misc/perftrace.cpp +++ b/src/coreclr/pal/src/misc/perftrace.cpp @@ -355,7 +355,7 @@ void PERFTerminate( ) PERFlushAllLogs(); pthread_key_delete(PERF_tlsTableKey ); - PAL_free(pal_function_map); + free(pal_function_map); } @@ -454,11 +454,11 @@ BOOL PERFAllocThreadInfo( ) { if (node != NULL) { - PAL_free(node); + free(node); } if (local_info != NULL) { - PAL_free(local_info); + free(local_info); } if (apiTable != NULL) { @@ -466,14 +466,14 @@ BOOL PERFAllocThreadInfo( ) { if (apiTable[i].histograms != NULL) { - PAL_free(apiTable[i].histograms); + free(apiTable[i].histograms); } } - PAL_free(apiTable); + free(apiTable); } if (log_buf != NULL) { - PAL_free(log_buf); + free(log_buf); } } return ret; @@ -554,26 +554,26 @@ PERFlushAllLogs( ) PERFUpdateProgramInfo(current->thread_info); if (table1->histograms != NULL) { - PAL_free(table1->histograms); + free(table1->histograms); } - PAL_free(table1); + free(table1); } PERFFlushLog(current->thread_info, FALSE); - PAL_free(current->thread_info->pal_write_buf); - PAL_free(current->thread_info); + free(current->thread_info->pal_write_buf); + free(current->thread_info); } - PAL_free(current); + free(current); } PERFWriteCounters(table0); if (table0->histograms != NULL) { - PAL_free(table0->histograms); + free(table0->histograms); } - PAL_free(table0); + free(table0); PERFFlushLog(node->thread_info, FALSE); - PAL_free(node->thread_info->pal_write_buf); - PAL_free(node->thread_info); - PAL_free(node); + free(node->thread_info->pal_write_buf); + free(node->thread_info); + free(node); } static diff --git a/src/coreclr/pal/src/misc/utils.cpp b/src/coreclr/pal/src/misc/utils.cpp index 0d96cc991305a..cd4bc5ae982a6 100644 --- a/src/coreclr/pal/src/misc/utils.cpp +++ b/src/coreclr/pal/src/misc/utils.cpp @@ -204,7 +204,7 @@ LPSTR UTIL_WCToMB_Alloc(LPCWSTR lpWideCharStr, int cchWideChar) if(0 == length) { ASSERT("WCToMB error; GetLastError returns %#x\n", GetLastError()); - PAL_free(lpMultiByteStr); + free(lpMultiByteStr); return NULL; } return lpMultiByteStr; @@ -264,7 +264,7 @@ LPWSTR UTIL_MBToWC_Alloc(LPCSTR lpMultiByteStr, int cbMultiByte) if(0 >= length) { ASSERT("MCToMB error; GetLastError returns %#x\n", GetLastError()); - PAL_free(lpWideCharStr); + free(lpWideCharStr); return NULL; } return lpWideCharStr; diff --git a/src/coreclr/pal/src/safecrt/input.inl b/src/coreclr/pal/src/safecrt/input.inl index 9934eeb33f54f..61d5cc7191834 100644 --- a/src/coreclr/pal/src/safecrt/input.inl +++ b/src/coreclr/pal/src/safecrt/input.inl @@ -48,7 +48,7 @@ #define _malloc_crt PAL_malloc #define _realloc_crt PAL_realloc -#define _free_crt PAL_free +#define _free_crt free #define _FASSIGN(flag, argument, number, dec_point, locale) _safecrt_fassign((flag), (argument), (number)) #define _WFASSIGN(flag, argument, number, dec_point, locale) _safecrt_wfassign((flag), (argument), (number)) diff --git a/src/coreclr/pal/src/thread/thread.cpp b/src/coreclr/pal/src/thread/thread.cpp index 9420a442c1f6a..779e23bc979be 100644 --- a/src/coreclr/pal/src/thread/thread.cpp +++ b/src/coreclr/pal/src/thread/thread.cpp @@ -1616,7 +1616,7 @@ CorUnix::InternalSetThreadDescription( } if (NULL != nameBuf) { - PAL_free(nameBuf); + free(nameBuf); } #endif //defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) diff --git a/src/coreclr/scripts/genDummyProvider.py b/src/coreclr/scripts/genDummyProvider.py index 4ccb4d5cd71f8..90ec297b0bf08 100644 --- a/src/coreclr/scripts/genDummyProvider.py +++ b/src/coreclr/scripts/genDummyProvider.py @@ -124,7 +124,6 @@ def generateDummyFiles(etwmanifest, out_dirname, runtimeFlavor, extern, dryRun): #include "pal_mstypes.h" #include "pal_error.h" #include "pal.h" -#define PAL_free free #include "pal/stackstring.hpp" #endif diff --git a/src/coreclr/scripts/genLttngProvider.py b/src/coreclr/scripts/genLttngProvider.py index 2871cfe19a9b3..74e23a494238b 100644 --- a/src/coreclr/scripts/genLttngProvider.py +++ b/src/coreclr/scripts/genLttngProvider.py @@ -580,7 +580,6 @@ def generateLttngFiles(etwmanifest, eventprovider_directory, runtimeFlavor, dryR #include "pal_mstypes.h" #include "pal_error.h" #include "pal.h" -#define PAL_free free #include "pal/stackstring.hpp" """) lttngimpl_file.write("#include \"" + lttngevntheadershortname + "\"\n\n") From 6f5d9473210c3b9ae6fd93791177ddef6f023a85 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 19:05:33 +0000 Subject: [PATCH 05/60] Remove malloc PAL API and update the few places that could have passed in a zero size to bump it to 1 manually on all platforms. --- .../dlls/mscordac/mscordac_unixexports.src | 2 - src/coreclr/jit/alloc.cpp | 6 +- src/coreclr/pal/inc/pal.h | 1 - src/coreclr/pal/src/CMakeLists.txt | 1 - src/coreclr/pal/src/cruntime/malloc.cpp | 57 ----------------- src/coreclr/pal/src/cruntime/wchar.cpp | 14 ++--- src/coreclr/pal/src/file/directory.cpp | 4 +- src/coreclr/pal/src/include/pal/malloc.hpp | 38 +++++------- src/coreclr/pal/src/include/pal/palinternal.h | 1 + .../pal/src/include/pal/stackstring.hpp | 2 +- src/coreclr/pal/src/misc/cgroup.cpp | 14 ++--- src/coreclr/pal/src/misc/environ.cpp | 12 ++-- src/coreclr/pal/src/misc/fmtmessage.cpp | 6 +- src/coreclr/pal/src/misc/perftrace.cpp | 12 ++-- src/coreclr/pal/src/misc/utils.cpp | 4 +- src/coreclr/pal/src/safecrt/input.inl | 4 +- src/coreclr/pal/src/thread/thread.cpp | 2 +- src/coreclr/pal/tests/palsuite/CMakeLists.txt | 4 -- .../palsuite/c_runtime/free/test1/test1.cpp | 61 ------------------- .../palsuite/c_runtime/malloc/test1/test1.cpp | 51 ---------------- .../palsuite/c_runtime/malloc/test2/test2.cpp | 40 ------------ .../pal/tests/palsuite/compilableTests.txt | 4 -- .../pal/tests/palsuite/paltestlist.txt | 5 -- .../utilcode/clrhost_nodependencies.cpp | 5 ++ 24 files changed, 62 insertions(+), 288 deletions(-) delete mode 100644 src/coreclr/pal/src/cruntime/malloc.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/free/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/malloc/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/malloc/test2/test2.cpp diff --git a/src/coreclr/dlls/mscordac/mscordac_unixexports.src b/src/coreclr/dlls/mscordac/mscordac_unixexports.src index 8d94292d5c572..43853ae5cbdc0 100644 --- a/src/coreclr/dlls/mscordac/mscordac_unixexports.src +++ b/src/coreclr/dlls/mscordac/mscordac_unixexports.src @@ -47,8 +47,6 @@ nativeStringResourceTable_mscorrc #PAL_ReadProcessMemory #PAL_ProbeMemory #PAL_Random -#PAL_malloc -#PAL_realloc #PAL_qsort #PAL__wcstoui64 #PAL_wcstoul diff --git a/src/coreclr/jit/alloc.cpp b/src/coreclr/jit/alloc.cpp index 6300376beeb6d..d9fc96458849b 100644 --- a/src/coreclr/jit/alloc.cpp +++ b/src/coreclr/jit/alloc.cpp @@ -153,7 +153,11 @@ void* ArenaAllocator::allocateHostMemory(size_t size, size_t* pActualSize) if (bypassHostAllocator()) { *pActualSize = size; - void* p = malloc(size); + if (size == 0) + { + size = 1; + } + void* p = malloc(size); if (p == nullptr) { NOMEM(); diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 850b286f5c66c..bfe29e91271e4 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3984,7 +3984,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); #define getenv PAL_getenv #define qsort PAL_qsort #define bsearch PAL_bsearch -#define malloc PAL_malloc #ifdef HOST_AMD64 #define _mm_getcsr PAL__mm_getcsr diff --git a/src/coreclr/pal/src/CMakeLists.txt b/src/coreclr/pal/src/CMakeLists.txt index a4086fff86277..6db25a1fc722e 100644 --- a/src/coreclr/pal/src/CMakeLists.txt +++ b/src/coreclr/pal/src/CMakeLists.txt @@ -129,7 +129,6 @@ if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND (CLR_CMAKE_HOST_ARCH_AMD64 OR CLR_CM endif() set(SOURCES - cruntime/malloc.cpp cruntime/misc.cpp cruntime/wchar.cpp debug/debug.cpp diff --git a/src/coreclr/pal/src/cruntime/malloc.cpp b/src/coreclr/pal/src/cruntime/malloc.cpp deleted file mode 100644 index 366ea131756c8..0000000000000 --- a/src/coreclr/pal/src/cruntime/malloc.cpp +++ /dev/null @@ -1,57 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*++ - - - -Module Name: - - malloc.cpp - -Abstract: - - Implementation of suspension safe memory allocation functions. - -Revision History: - - - ---*/ - -#include "pal/corunix.hpp" -#include "pal/thread.hpp" -#include "pal/malloc.hpp" -#include "pal/dbgmsg.h" - -#include - -SET_DEFAULT_DEBUG_CHANNEL(CRT); - -using namespace CorUnix; - -void * -__cdecl -PAL_malloc( - size_t szSize - ) -{ - return InternalMalloc(szSize); -} - -void * -CorUnix::InternalMalloc( - size_t szSize - ) -{ - void *pvMem; - - if (szSize == 0) - { - // malloc may return null for a requested size of zero bytes. Force a nonzero size to get a valid pointer. - szSize = 1; - } - - pvMem = (void*)malloc(szSize); - return pvMem; -} diff --git a/src/coreclr/pal/src/cruntime/wchar.cpp b/src/coreclr/pal/src/cruntime/wchar.cpp index 16845ff2fc66a..88340538ebca2 100644 --- a/src/coreclr/pal/src/cruntime/wchar.cpp +++ b/src/coreclr/pal/src/cruntime/wchar.cpp @@ -55,10 +55,10 @@ _wtoi( GetLastError()); return -1; } - tempStr = (char *) PAL_malloc(len); + tempStr = (char *) malloc(len); if (!tempStr) { - ERROR("PAL_malloc failed\n"); + ERROR("malloc failed\n"); SetLastError(ERROR_NOT_ENOUGH_MEMORY); return -1; } @@ -261,10 +261,10 @@ PAL_wcstoul( res = 0; goto PAL_wcstoulExit; } - s_nptr = (char *)PAL_malloc(size); + s_nptr = (char *)malloc(size); if (!s_nptr) { - ERROR("PAL_malloc failed\n"); + ERROR("malloc failed\n"); SetLastError(ERROR_NOT_ENOUGH_MEMORY); res = 0; goto PAL_wcstoulExit; @@ -351,10 +351,10 @@ PAL__wcstoui64( res = 0; goto PAL__wcstoui64Exit; } - s_nptr = (char *)PAL_malloc(size); + s_nptr = (char *)malloc(size); if (!s_nptr) { - ERROR("PAL_malloc failed\n"); + ERROR("malloc failed\n"); SetLastError(ERROR_NOT_ENOUGH_MEMORY); res = 0; goto PAL__wcstoui64Exit; @@ -896,7 +896,7 @@ PAL_wcstod( const wchar_16 * nptr, wchar_16 **endptr ) if ( lpEndOfExpression != lpStartOfExpression ) { Length = lpEndOfExpression - lpStartOfExpression; - lpStringRep = (LPSTR)PAL_malloc( Length + 1); + lpStringRep = (LPSTR)malloc( Length + 1); if ( lpStringRep ) { diff --git a/src/coreclr/pal/src/file/directory.cpp b/src/coreclr/pal/src/file/directory.cpp index ab07080637894..e06afd0b19e1c 100644 --- a/src/coreclr/pal/src/file/directory.cpp +++ b/src/coreclr/pal/src/file/directory.cpp @@ -75,11 +75,11 @@ CreateDirectoryW( goto done; } - if (((mb_dir = (char *)PAL_malloc(mb_size)) == NULL) || + if (((mb_dir = (char *)malloc(mb_size)) == NULL) || (WideCharToMultiByte( CP_ACP, 0, lpPathName, -1, mb_dir, mb_size, NULL, NULL) != mb_size)) { - ASSERT("WideCharToMultiByte or PAL_malloc failure! LastError:%d errno:%d\n", + ASSERT("WideCharToMultiByte or malloc failure! LastError:%d errno:%d\n", GetLastError(), errno); dwLastError = ERROR_INTERNAL_ERROR; goto done; diff --git a/src/coreclr/pal/src/include/pal/malloc.hpp b/src/coreclr/pal/src/include/pal/malloc.hpp index 4ede96bd387c1..b62e8fbbc260f 100644 --- a/src/coreclr/pal/src/include/pal/malloc.hpp +++ b/src/coreclr/pal/src/include/pal/malloc.hpp @@ -26,33 +26,23 @@ Module Name: #include #include -extern "C" -{ - void * - __cdecl - PAL_realloc( - void* pvMemblock, - size_t szSize - ); - - void * - __cdecl - PAL_malloc( - size_t szSize - ); - - void - __cdecl - free( - void *pvMem - ); -} - namespace CorUnix{ - void * + inline void * InternalMalloc( size_t szSize - ); + ) + { + void *pvMem; + + if (szSize == 0) + { + // malloc may return null for a requested size of zero bytes. Force a nonzero size to get a valid pointer. + szSize = 1; + } + + pvMem = (void*)malloc(szSize); + return pvMem; + } // Define common code for "new" style allocators below. #define INTERNAL_NEW_COMMON() \ diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 17b48e0f3a79a..0a45b745681c3 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -278,6 +278,7 @@ function_name() to call the system's implementation #define rand DUMMY_rand #define realloc DUMMY_realloc #define free DUMMY_free +#define malloc DUMMY_malloc /* RAND_MAX needed to be renamed to avoid duplicate definition when including stdlib.h header files. PAL_RAND_MAX should have the same value as RAND_MAX diff --git a/src/coreclr/pal/src/include/pal/stackstring.hpp b/src/coreclr/pal/src/include/pal/stackstring.hpp index 0eefe9f89138e..22e79a571502a 100644 --- a/src/coreclr/pal/src/include/pal/stackstring.hpp +++ b/src/coreclr/pal/src/include/pal/stackstring.hpp @@ -44,7 +44,7 @@ class StackString m_buffer = NULL; } - T * newBuffer = (T *)PAL_realloc(m_buffer, (count_allocated + 1) * sizeof(T)); + T * newBuffer = (T *)realloc(m_buffer, (count_allocated + 1) * sizeof(T)); if (NULL == newBuffer) { SetLastError(ERROR_NOT_ENOUGH_MEMORY); diff --git a/src/coreclr/pal/src/misc/cgroup.cpp b/src/coreclr/pal/src/misc/cgroup.cpp index f46f383c54e4a..3ad761a2ee003 100644 --- a/src/coreclr/pal/src/misc/cgroup.cpp +++ b/src/coreclr/pal/src/misc/cgroup.cpp @@ -126,7 +126,7 @@ class CGroup len = strlen(hierarchy_mount); len += strlen(cgroup_path_relative_to_mount); - cgroup_path = (char*)PAL_malloc(len+1); + cgroup_path = (char*)malloc(len+1); if (cgroup_path == nullptr) goto done; @@ -191,10 +191,10 @@ class CGroup filesystemType = nullptr; free(options); options = nullptr; - filesystemType = (char*)PAL_malloc(lineLen+1); + filesystemType = (char*)malloc(lineLen+1); if (filesystemType == nullptr) goto done; - options = (char*)PAL_malloc(lineLen+1); + options = (char*)malloc(lineLen+1); if (options == nullptr) goto done; maxLineLen = lineLen; @@ -227,10 +227,10 @@ class CGroup } if (isSubsystemMatch) { - mountpath = (char*)PAL_malloc(lineLen+1); + mountpath = (char*)malloc(lineLen+1); if (mountpath == nullptr) goto done; - mountroot = (char*)PAL_malloc(lineLen+1); + mountroot = (char*)malloc(lineLen+1); if (mountroot == nullptr) goto done; @@ -279,10 +279,10 @@ class CGroup subsystem_list = nullptr; free(cgroup_path); cgroup_path = nullptr; - subsystem_list = (char*)PAL_malloc(lineLen+1); + subsystem_list = (char*)malloc(lineLen+1); if (subsystem_list == nullptr) goto done; - cgroup_path = (char*)PAL_malloc(lineLen+1); + cgroup_path = (char*)malloc(lineLen+1); if (cgroup_path == nullptr) goto done; maxLineLen = lineLen; diff --git a/src/coreclr/pal/src/misc/environ.cpp b/src/coreclr/pal/src/misc/environ.cpp index bed6496c8998a..4980d213fa3bc 100644 --- a/src/coreclr/pal/src/misc/environ.cpp +++ b/src/coreclr/pal/src/misc/environ.cpp @@ -183,7 +183,7 @@ GetEnvironmentVariableW( goto done; } - inBuff = (CHAR *)PAL_malloc(inBuffSize); + inBuff = (CHAR *)malloc(inBuffSize); if (inBuff == nullptr) { ERROR("malloc failed\n"); @@ -193,7 +193,7 @@ GetEnvironmentVariableW( if (nSize) { - outBuff = (CHAR *)PAL_malloc(nSize*2); + outBuff = (CHAR *)malloc(nSize*2); if (outBuff == nullptr) { ERROR("malloc failed\n"); @@ -310,7 +310,7 @@ SetEnvironmentVariableW( goto done; } - name = (PCHAR)PAL_malloc(sizeof(CHAR)* nameSize); + name = (PCHAR)malloc(sizeof(CHAR)* nameSize); if (name == nullptr) { ERROR("malloc failed\n"); @@ -336,7 +336,7 @@ SetEnvironmentVariableW( goto done; } - value = (PCHAR)PAL_malloc(sizeof(CHAR)*valueSize); + value = (PCHAR)malloc(sizeof(CHAR)*valueSize); if (value == nullptr) { @@ -414,7 +414,7 @@ GetEnvironmentStringsW( envNum += len; } - wenviron = (WCHAR *)PAL_malloc(sizeof(WCHAR)* (envNum + 1)); + wenviron = (WCHAR *)malloc(sizeof(WCHAR)* (envNum + 1)); if (wenviron == nullptr) { ERROR("malloc failed\n"); @@ -559,7 +559,7 @@ SetEnvironmentVariableA( { // All the conditions are met. Set the variable. int iLen = strlen(lpName) + strlen(lpValue) + 2; - LPSTR string = (LPSTR) PAL_malloc(iLen); + LPSTR string = (LPSTR) malloc(iLen); if (string == nullptr) { bRet = FALSE; diff --git a/src/coreclr/pal/src/misc/fmtmessage.cpp b/src/coreclr/pal/src/misc/fmtmessage.cpp index c7de98718c1d3..0598914b06cb5 100644 --- a/src/coreclr/pal/src/misc/fmtmessage.cpp +++ b/src/coreclr/pal/src/misc/fmtmessage.cpp @@ -61,7 +61,7 @@ static LPWSTR FMTMSG_GetMessageString( DWORD dwErrCode ) allocChars = MAX_ERROR_STRING_LENGTH + 1; } - LPWSTR lpRetVal = (LPWSTR)PAL_malloc(allocChars * sizeof(WCHAR)); + LPWSTR lpRetVal = (LPWSTR)malloc(allocChars * sizeof(WCHAR)); if (lpRetVal) { @@ -140,7 +140,7 @@ static INT FMTMSG__watoi( LPWSTR str ) UINT NumOfBytes = 0; \ nSize *= 2; \ NumOfBytes = nSize * sizeof( WCHAR ); \ - lpTemp = static_cast( PAL_malloc( NumOfBytes ) ); \ + lpTemp = static_cast( malloc( NumOfBytes ) ); \ TRACE( "Growing the buffer.\n" );\ \ if ( !lpTemp ) \ @@ -327,7 +327,7 @@ FormatMessageW( } lpWorkingString = static_cast( - PAL_malloc( nSize * sizeof( WCHAR ) ) ); + malloc( nSize * sizeof( WCHAR ) ) ); if ( !lpWorkingString ) { ERROR( "Unable to allocate memory for the working string.\n" ); diff --git a/src/coreclr/pal/src/misc/perftrace.cpp b/src/coreclr/pal/src/misc/perftrace.cpp index a0fb6914d3c4a..9419005099f41 100644 --- a/src/coreclr/pal/src/misc/perftrace.cpp +++ b/src/coreclr/pal/src/misc/perftrace.cpp @@ -321,7 +321,7 @@ PERFInitialize(LPWSTR command_line, LPWSTR exe_path) if( ret == TRUE ) { - pal_function_map = (char*)PAL_malloc(PAL_API_NUMBER); + pal_function_map = (char*)malloc(PAL_API_NUMBER); if(pal_function_map != NULL) { bRead = PERFReadSetting( ); // we don't quit even we failed to read the file. @@ -376,21 +376,21 @@ BOOL PERFAllocThreadInfo( ) memory resources could be exhausted. If this ever becomes a problem, the memory allocated per thread should be freed when a thread exits. */ - node = ( pal_thread_list_node * )PAL_malloc(sizeof(pal_thread_list_node)); + node = ( pal_thread_list_node * )malloc(sizeof(pal_thread_list_node)); if(node == NULL) { ret = FALSE; goto PERFAllocThreadInfoExit; } - local_info = (pal_perf_thread_info *)PAL_malloc(sizeof(pal_perf_thread_info)); + local_info = (pal_perf_thread_info *)malloc(sizeof(pal_perf_thread_info)); if (local_info == NULL) { ret = FALSE; goto PERFAllocThreadInfoExit; } - apiTable = (pal_perf_api_info *)PAL_malloc( PAL_API_NUMBER * sizeof(pal_perf_api_info)); + apiTable = (pal_perf_api_info *)malloc( PAL_API_NUMBER * sizeof(pal_perf_api_info)); if (apiTable == NULL) { ret = FALSE; @@ -411,7 +411,7 @@ BOOL PERFAllocThreadInfo( ) apiTable[i].sum_of_square_duration = 0.0; if (pal_perf_histogram_size > 0) { - apiTable[i].histograms = (DWORD *)PAL_malloc(pal_perf_histogram_size*sizeof(DWORD)); + apiTable[i].histograms = (DWORD *)malloc(pal_perf_histogram_size*sizeof(DWORD)); if (apiTable[i].histograms == NULL) { ret = FALSE; @@ -425,7 +425,7 @@ BOOL PERFAllocThreadInfo( ) } } - log_buf = (char * )PAL_malloc( PAL_PERF_PROFILE_BUFFER_SIZE ); + log_buf = (char * )malloc( PAL_PERF_PROFILE_BUFFER_SIZE ); if(log_buf == NULL) { diff --git a/src/coreclr/pal/src/misc/utils.cpp b/src/coreclr/pal/src/misc/utils.cpp index cd4bc5ae982a6..261be25bcabaa 100644 --- a/src/coreclr/pal/src/misc/utils.cpp +++ b/src/coreclr/pal/src/misc/utils.cpp @@ -190,7 +190,7 @@ LPSTR UTIL_WCToMB_Alloc(LPCWSTR lpWideCharStr, int cchWideChar) } /* allocate required buffer */ - lpMultiByteStr = (LPSTR)PAL_malloc(length); + lpMultiByteStr = (LPSTR)malloc(length); if(NULL == lpMultiByteStr) { ERROR("malloc() failed! errno is %d (%s)\n", errno,strerror(errno)); @@ -250,7 +250,7 @@ LPWSTR UTIL_MBToWC_Alloc(LPCSTR lpMultiByteStr, int cbMultiByte) return NULL; } - lpWideCharStr = (LPWSTR)PAL_malloc(fullsize); + lpWideCharStr = (LPWSTR)malloc(fullsize); if(NULL == lpWideCharStr) { ERROR("malloc() failed! errno is %d (%s)\n", errno,strerror(errno)); diff --git a/src/coreclr/pal/src/safecrt/input.inl b/src/coreclr/pal/src/safecrt/input.inl index 61d5cc7191834..556fafa6f6b4f 100644 --- a/src/coreclr/pal/src/safecrt/input.inl +++ b/src/coreclr/pal/src/safecrt/input.inl @@ -46,8 +46,8 @@ #define _istspace(x) isspace((unsigned char)x) -#define _malloc_crt PAL_malloc -#define _realloc_crt PAL_realloc +#define _malloc_crt malloc +#define _realloc_crt realloc #define _free_crt free #define _FASSIGN(flag, argument, number, dec_point, locale) _safecrt_fassign((flag), (argument), (number)) diff --git a/src/coreclr/pal/src/thread/thread.cpp b/src/coreclr/pal/src/thread/thread.cpp index 779e23bc979be..d388521da1605 100644 --- a/src/coreclr/pal/src/thread/thread.cpp +++ b/src/coreclr/pal/src/thread/thread.cpp @@ -1564,7 +1564,7 @@ CorUnix::InternalSetThreadDescription( goto InternalSetThreadDescriptionExit; } - nameBuf = (char *)PAL_malloc(nameSize); + nameBuf = (char *)malloc(nameSize); if (nameBuf == NULL) { palError = ERROR_OUTOFMEMORY; diff --git a/src/coreclr/pal/tests/palsuite/CMakeLists.txt b/src/coreclr/pal/tests/palsuite/CMakeLists.txt index cb0e6ba848cd6..4930fdca816c4 100644 --- a/src/coreclr/pal/tests/palsuite/CMakeLists.txt +++ b/src/coreclr/pal/tests/palsuite/CMakeLists.txt @@ -67,7 +67,6 @@ add_executable_clr(paltests c_runtime/atoi/test1/test1.cpp c_runtime/bsearch/test1/test1.cpp c_runtime/bsearch/test2/test2.cpp - c_runtime/free/test1/test1.cpp c_runtime/isalnum/test1/test1.cpp c_runtime/isalpha/test1/test1.cpp c_runtime/isdigit/test1/test1.cpp @@ -80,15 +79,12 @@ add_executable_clr(paltests c_runtime/iswupper/test1/test1.cpp c_runtime/isxdigit/test1/test1.cpp c_runtime/llabs/test1/test1.cpp - c_runtime/malloc/test1/test1.cpp - c_runtime/malloc/test2/test2.cpp c_runtime/memchr/test1/test1.cpp c_runtime/memcmp/test1/test1.cpp c_runtime/memmove/test1/test1.cpp c_runtime/memset/test1/test1.cpp c_runtime/qsort/test1/test1.cpp c_runtime/qsort/test2/test2.cpp - c_runtime/realloc/test1/test1.cpp c_runtime/sscanf_s/test1/test1.cpp c_runtime/sscanf_s/test10/test10.cpp c_runtime/sscanf_s/test11/test11.cpp diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/free/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/free/test1/test1.cpp deleted file mode 100644 index dc8d13158862e..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/free/test1/test1.cpp +++ /dev/null @@ -1,61 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Repeatedly allocates and frees a chunk of memory, to verify -** that free is really returning memory to the heap -** -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_free_test1_paltest_free_test1, "c_runtime/free/test1/paltest_free_test1") -{ - - char *testA; - - long i; - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - - /* check that free really returns memory to the heap. */ - for(i=1; i<1000000; i++) - { - testA = (char *)malloc(1000*sizeof(char)); - if (testA==NULL) - { - Fail("Either free is failing to return memory to the heap, or" - " the system is running out of memory for some other " - "reason.\n"); - } - free(testA); - } - - free(NULL); /*should do nothing*/ - PAL_Terminate(); - return PASS; -} - - - - - - - - - - - - - - - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test1/test1.cpp deleted file mode 100644 index 067791fe866df..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test1/test1.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Test that malloc returns usable memory -** -** -**==========================================================================*/ - -#include - - -PALTEST(c_runtime_malloc_test1_paltest_malloc_test1, "c_runtime/malloc/test1/paltest_malloc_test1") -{ - - char *testA; - int i; - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - /* check that malloc really gives us addressable memory */ - testA = (char *)malloc(20 * sizeof(char)); - if (testA == NULL) - { - Fail("Call to malloc failed.\n"); - } - for (i = 0; i < 20; i++) - { - testA[i] = 'a'; - } - for (i = 0; i < 20; i++) - { - if (testA[i] != 'a') - { - Fail("The memory doesn't seem to be properly allocated.\n"); - } - } - free(testA); - - PAL_Terminate(); - - return PASS; -} - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test2/test2.cpp deleted file mode 100644 index 9f94f1050d6ac..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/malloc/test2/test2.cpp +++ /dev/null @@ -1,40 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test2.c -** -** Purpose: Test that malloc(0) returns non-zero value -** -**==========================================================================*/ - -#include - - -PALTEST(c_runtime_malloc_test2_paltest_malloc_test2, "c_runtime/malloc/test2/paltest_malloc_test2") -{ - - char *testA; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - /* check that malloc(0) returns non-zero value */ - testA = (char *)malloc(0); - if (testA == NULL) - { - Fail("Call to malloc(0) failed.\n"); - } - - free(testA); - - PAL_Terminate(); - - return PASS; -} - - - diff --git a/src/coreclr/pal/tests/palsuite/compilableTests.txt b/src/coreclr/pal/tests/palsuite/compilableTests.txt index c50f24e59b679..a55aa31c51dcf 100644 --- a/src/coreclr/pal/tests/palsuite/compilableTests.txt +++ b/src/coreclr/pal/tests/palsuite/compilableTests.txt @@ -12,7 +12,6 @@ c_runtime/cosh/test1/paltest_cosh_test1 c_runtime/coshf/test1/paltest_coshf_test1 c_runtime/errno/test1/paltest_errno_test1 c_runtime/errno/test2/paltest_errno_test2 -c_runtime/free/test1/paltest_free_test1 c_runtime/isalnum/test1/paltest_isalnum_test1 c_runtime/isalpha/test1/paltest_isalpha_test1 c_runtime/isdigit/test1/paltest_isdigit_test1 @@ -24,15 +23,12 @@ c_runtime/iswspace/test1/paltest_iswspace_test1 c_runtime/iswupper/test1/paltest_iswupper_test1 c_runtime/isxdigit/test1/paltest_isxdigit_test1 c_runtime/llabs/test1/paltest_llabs_test1 -c_runtime/malloc/test1/paltest_malloc_test1 -c_runtime/malloc/test2/paltest_malloc_test2 c_runtime/memchr/test1/paltest_memchr_test1 c_runtime/memcmp/test1/paltest_memcmp_test1 c_runtime/memmove/test1/paltest_memmove_test1 c_runtime/memset/test1/paltest_memset_test1 c_runtime/qsort/test1/paltest_qsort_test1 c_runtime/qsort/test2/paltest_qsort_test2 -c_runtime/realloc/test1/paltest_realloc_test1 c_runtime/sscanf_s/test1/paltest_sscanf_test1 c_runtime/sscanf_s/test10/paltest_sscanf_test10 c_runtime/sscanf_s/test11/paltest_sscanf_test11 diff --git a/src/coreclr/pal/tests/palsuite/paltestlist.txt b/src/coreclr/pal/tests/palsuite/paltestlist.txt index 6b7f196d06b12..2594eb18d5ef4 100644 --- a/src/coreclr/pal/tests/palsuite/paltestlist.txt +++ b/src/coreclr/pal/tests/palsuite/paltestlist.txt @@ -4,7 +4,6 @@ c_runtime/bsearch/test1/paltest_bsearch_test1 c_runtime/bsearch/test2/paltest_bsearch_test2 c_runtime/errno/test1/paltest_errno_test1 c_runtime/errno/test2/paltest_errno_test2 -c_runtime/free/test1/paltest_free_test1 c_runtime/isalnum/test1/paltest_isalnum_test1 c_runtime/isalpha/test1/paltest_isalpha_test1 c_runtime/isdigit/test1/paltest_isdigit_test1 @@ -15,16 +14,12 @@ c_runtime/iswdigit/test1/paltest_iswdigit_test1 c_runtime/iswspace/test1/paltest_iswspace_test1 c_runtime/iswupper/test1/paltest_iswupper_test1 c_runtime/isxdigit/test1/paltest_isxdigit_test1 -c_runtime/llabs/test1/paltest_llabs_test1 -c_runtime/malloc/test1/paltest_malloc_test1 -c_runtime/malloc/test2/paltest_malloc_test2 c_runtime/memchr/test1/paltest_memchr_test1 c_runtime/memcmp/test1/paltest_memcmp_test1 c_runtime/memmove/test1/paltest_memmove_test1 c_runtime/memset/test1/paltest_memset_test1 c_runtime/qsort/test1/paltest_qsort_test1 c_runtime/qsort/test2/paltest_qsort_test2 -c_runtime/realloc/test1/paltest_realloc_test1 c_runtime/sscanf_s/test1/paltest_sscanf_test1 c_runtime/sscanf_s/test10/paltest_sscanf_test10 c_runtime/sscanf_s/test11/paltest_sscanf_test11 diff --git a/src/coreclr/utilcode/clrhost_nodependencies.cpp b/src/coreclr/utilcode/clrhost_nodependencies.cpp index b385474b6dc0c..7aceae763c43b 100644 --- a/src/coreclr/utilcode/clrhost_nodependencies.cpp +++ b/src/coreclr/utilcode/clrhost_nodependencies.cpp @@ -246,6 +246,11 @@ FORCEINLINE void* ClrMalloc(size_t size) p = HeapAlloc(hHeap, 0, size); #else + if (size == 0) + { + // Allocate at least one byte. + size = 1; + } p = malloc(size); #endif From 33f3224e9b6487e385ff2e52fcc722532a8e5269 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 19:06:57 +0000 Subject: [PATCH 06/60] Remove AMD64 processor PAL functions --- src/coreclr/pal/inc/pal.h | 15 +-------------- src/coreclr/pal/src/cruntime/misc.cpp | 16 ---------------- src/coreclr/pal/src/include/pal/palinternal.h | 5 ----- src/coreclr/vm/cgensys.h | 5 ++++- 4 files changed, 5 insertions(+), 36 deletions(-) diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index bfe29e91271e4..29c33b6551292 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3985,11 +3985,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); #define qsort PAL_qsort #define bsearch PAL_bsearch -#ifdef HOST_AMD64 -#define _mm_getcsr PAL__mm_getcsr -#define _mm_setcsr PAL__mm_setcsr -#endif // HOST_AMD64 - // Forward declare functions that are in header files we can't include yet int printf(const char *, ...); int vprintf(const char *, va_list); @@ -4397,15 +4392,7 @@ PALAPI PAL_GetCpuTickCount(); #endif // PAL_PERF -/******************* PAL functions for SIMD extensions *****************/ - -PALIMPORT -unsigned int _mm_getcsr(void); - -PALIMPORT -void _mm_setcsr(unsigned int i); - -/******************* PAL functions for CPU capability detection *******/ +/******************* PAL functions for exceptions *******/ #ifdef __cplusplus diff --git a/src/coreclr/pal/src/cruntime/misc.cpp b/src/coreclr/pal/src/cruntime/misc.cpp index a50479bc37258..5c1d7b63b3425 100644 --- a/src/coreclr/pal/src/cruntime/misc.cpp +++ b/src/coreclr/pal/src/cruntime/misc.cpp @@ -221,19 +221,3 @@ PAL_bsearch(const void *key, const void *base, size_t nmemb, size_t size, PERF_EXIT(bsearch); return retval; } - -#ifdef HOST_AMD64 - -PALIMPORT -unsigned int PAL__mm_getcsr(void) -{ - return _mm_getcsr(); -} - -PALIMPORT -void PAL__mm_setcsr(unsigned int i) -{ - _mm_setcsr(i); -} - -#endif // HOST_AMD64 diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 0a45b745681c3..8c37394efba83 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -580,11 +580,6 @@ function_name() to call the system's implementation #undef towlower #undef towupper -#ifdef HOST_AMD64 -#undef _mm_getcsr -#undef _mm_setcsr -#endif // HOST_AMD64 - #undef min #undef max diff --git a/src/coreclr/vm/cgensys.h b/src/coreclr/vm/cgensys.h index 1396d7558c29f..98b1cbc94781d 100644 --- a/src/coreclr/vm/cgensys.h +++ b/src/coreclr/vm/cgensys.h @@ -84,7 +84,10 @@ BOOL GetAnyThunkTarget (T_CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDes #endif // DACCESS_COMPILE - +#if defined(TARGET_AMD64) +extern "C" DWORD _mm_getcsr(); +extern "C" void _mm_setcsr(DWORD); +#endif // // ResetProcessorStateHolder saves/restores processor state around calls to From 86f4ea78dc2dd381d6ac0c57a38584ca33e1de80 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 19:11:16 +0000 Subject: [PATCH 07/60] Remove time PAL as it's unused --- src/coreclr/pal/inc/pal.h | 1 - src/coreclr/pal/src/cruntime/misc.cpp | 28 ----------- src/coreclr/pal/src/include/pal/misc.h | 17 ------- src/coreclr/pal/src/include/pal/palinternal.h | 1 + src/coreclr/pal/tests/palsuite/CMakeLists.txt | 1 - .../palsuite/c_runtime/time/test1/test1.cpp | 50 ------------------- .../pal/tests/palsuite/compilableTests.txt | 1 - .../pal/tests/palsuite/paltestlist.txt | 1 - 8 files changed, 1 insertion(+), 99 deletions(-) delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/time/test1/test1.cpp diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 29c33b6551292..70c36d8305541 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3980,7 +3980,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); To avoid name collisions, those functions have been renamed using defines */ #ifndef PAL_STDCPP_COMPAT -#define time PAL_time #define getenv PAL_getenv #define qsort PAL_qsort #define bsearch PAL_bsearch diff --git a/src/coreclr/pal/src/cruntime/misc.cpp b/src/coreclr/pal/src/cruntime/misc.cpp index 5c1d7b63b3425..0f5dd74c61844 100644 --- a/src/coreclr/pal/src/cruntime/misc.cpp +++ b/src/coreclr/pal/src/cruntime/misc.cpp @@ -133,34 +133,6 @@ extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stderr() return stderr; } -/*++ -Function: - - time - -See MSDN for more details. ---*/ -PAL_time_t -__cdecl -PAL_time(PAL_time_t *tloc) -{ - time_t result; - - PERF_ENTRY(time); - ENTRY( "time( tloc=%p )\n",tloc ); - - time_t t; - result = time(&t); - if (tloc != NULL) - { - *tloc = t; - } - - LOGEXIT( "time returning %#lx\n",result ); - PERF_EXIT(time); - return result; -} - PALIMPORT void __cdecl PAL_qsort(void *base, size_t nmemb, size_t size, diff --git a/src/coreclr/pal/src/include/pal/misc.h b/src/coreclr/pal/src/include/pal/misc.h index aa5b2b4852b6e..ffa6448ed7d30 100644 --- a/src/coreclr/pal/src/include/pal/misc.h +++ b/src/coreclr/pal/src/include/pal/misc.h @@ -25,23 +25,6 @@ extern "C" { #endif // __cplusplus -/*++ -Function : - - PAL_rand - - Calls rand and mitigates the difference between RAND_MAX - on Windows and FreeBSD. ---*/ -int __cdecl PAL_rand(void); - -/*++ -Function : - - PAL_time ---*/ -PAL_time_t __cdecl PAL_time(PAL_time_t*); - /*++ Function : MsgBoxInitialize diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 8c37394efba83..7f8b880e6785b 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -279,6 +279,7 @@ function_name() to call the system's implementation #define realloc DUMMY_realloc #define free DUMMY_free #define malloc DUMMY_malloc +#define time DUMMY_time /* RAND_MAX needed to be renamed to avoid duplicate definition when including stdlib.h header files. PAL_RAND_MAX should have the same value as RAND_MAX diff --git a/src/coreclr/pal/tests/palsuite/CMakeLists.txt b/src/coreclr/pal/tests/palsuite/CMakeLists.txt index 4930fdca816c4..5b5a2025bb146 100644 --- a/src/coreclr/pal/tests/palsuite/CMakeLists.txt +++ b/src/coreclr/pal/tests/palsuite/CMakeLists.txt @@ -113,7 +113,6 @@ add_executable_clr(paltests c_runtime/strpbrk/test1/test1.cpp c_runtime/strrchr/test1/test1.cpp c_runtime/strstr/test1/test1.cpp - c_runtime/time/test1/test1.cpp c_runtime/tolower/test1/test1.cpp c_runtime/toupper/test1/test1.cpp c_runtime/towlower/test1/test1.cpp diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/time/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/time/test1/test1.cpp deleted file mode 100644 index 72d905be0a51e..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/time/test1/test1.cpp +++ /dev/null @@ -1,50 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls the time function and verifies that the time returned -** is at least a positive value. -** -** -**==========================================================================*/ - -#include - -PALTEST(c_runtime_time_test1_paltest_time_test1, "c_runtime/time/test1/paltest_time_test1") -{ - time_t t = 0; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - - time(&t); - /*I was going to test that the time returned didn't exceed some - reasonable value, but decided not to, for fear of creating my own - little Y2K-style disaster.*/ - - if (t <= 0) - { - Fail("time() function doesn't return a time.\n"); - } - t = 0; - t = time(NULL); - if (t <= 0) - { - Fail("time() function doesn't return a time.\n"); - } - PAL_Terminate(); - return PASS; -} - - - - - - - diff --git a/src/coreclr/pal/tests/palsuite/compilableTests.txt b/src/coreclr/pal/tests/palsuite/compilableTests.txt index a55aa31c51dcf..e1b8ecdeb2685 100644 --- a/src/coreclr/pal/tests/palsuite/compilableTests.txt +++ b/src/coreclr/pal/tests/palsuite/compilableTests.txt @@ -57,7 +57,6 @@ c_runtime/strncpy/test1/paltest_strncpy_test1 c_runtime/strpbrk/test1/paltest_strpbrk_test1 c_runtime/strrchr/test1/paltest_strrchr_test1 c_runtime/strstr/test1/paltest_strstr_test1 -c_runtime/time/test1/paltest_time_test1 c_runtime/tolower/test1/paltest_tolower_test1 c_runtime/toupper/test1/paltest_toupper_test1 c_runtime/towlower/test1/paltest_towlower_test1 diff --git a/src/coreclr/pal/tests/palsuite/paltestlist.txt b/src/coreclr/pal/tests/palsuite/paltestlist.txt index 2594eb18d5ef4..f47e840dc93da 100644 --- a/src/coreclr/pal/tests/palsuite/paltestlist.txt +++ b/src/coreclr/pal/tests/palsuite/paltestlist.txt @@ -48,7 +48,6 @@ c_runtime/strncpy/test1/paltest_strncpy_test1 c_runtime/strpbrk/test1/paltest_strpbrk_test1 c_runtime/strrchr/test1/paltest_strrchr_test1 c_runtime/strstr/test1/paltest_strstr_test1 -c_runtime/time/test1/paltest_time_test1 c_runtime/tolower/test1/paltest_tolower_test1 c_runtime/toupper/test1/paltest_toupper_test1 c_runtime/towlower/test1/paltest_towlower_test1 From 865eefbbc71c97518fc487d52ddce13bf4033787 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 19:53:02 +0000 Subject: [PATCH 08/60] Explicitly use the PAL's getenv implementation in the few places we were using it through the define. --- src/coreclr/pal/inc/pal.h | 4 ++-- src/coreclr/pal/src/include/pal/palinternal.h | 1 + .../pal/tests/palsuite/c_runtime/_putenv/test1/test1.cpp | 4 ++-- .../pal/tests/palsuite/c_runtime/_putenv/test2/test2.cpp | 4 ++-- .../pal/tests/palsuite/c_runtime/_putenv/test3/test3.cpp | 4 ++-- src/coreclr/utilcode/clrconfig.cpp | 6 +++++- src/coreclr/vm/eventing/eventpipe/ds-rt-coreclr.h | 6 +++++- src/coreclr/vm/perfmap.cpp | 4 ++++ 8 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 70c36d8305541..7b0e92c66455a 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3980,7 +3980,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); To avoid name collisions, those functions have been renamed using defines */ #ifndef PAL_STDCPP_COMPAT -#define getenv PAL_getenv #define qsort PAL_qsort #define bsearch PAL_bsearch @@ -4354,13 +4353,14 @@ PALIMPORT DLLEXPORT void * __cdecl bsearch(const void *, const void *, size_t, s int(__cdecl *)(const void *, const void *)); PALIMPORT time_t __cdecl time(time_t *); +PALIMPORT DLLEXPORT char * __cdecl getenv(const char *); #endif // !PAL_STDCPP_COMPAT PALIMPORT int __cdecl rand(void); PALIMPORT void __cdecl srand(unsigned int); -PALIMPORT DLLEXPORT char * __cdecl getenv(const char *); +PALIMPORT DLLEXPORT char * __cdecl PAL_getenv(const char *); PALIMPORT DLLEXPORT int __cdecl _putenv(const char *); #define ERANGE 34 diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index 7f8b880e6785b..ee758fad3da99 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -280,6 +280,7 @@ function_name() to call the system's implementation #define free DUMMY_free #define malloc DUMMY_malloc #define time DUMMY_time +#define getenv DUMMY_getenv /* RAND_MAX needed to be renamed to avoid duplicate definition when including stdlib.h header files. PAL_RAND_MAX should have the same value as RAND_MAX diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test1/test1.cpp index a7ebbe4fa6189..6b9b6d94ee530 100644 --- a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test1/test1.cpp +++ b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test1/test1.cpp @@ -56,7 +56,7 @@ PALTEST(c_runtime__putenv_test1_paltest_putenv_test1, "c_runtime/_putenv/test1/p */ if (TestCases[i].bValidString == TRUE) { - variableValue = getenv(TestCases[i].varName); + variableValue = PAL_getenv(TestCases[i].varName); if (variableValue == NULL) { @@ -81,7 +81,7 @@ PALTEST(c_runtime__putenv_test1_paltest_putenv_test1, "c_runtime/_putenv/test1/p * Check to see that putenv fails for malformed _putenvString values */ { - variableValue = getenv(TestCases[i].varName); + variableValue = PAL_getenv(TestCases[i].varName); if (variableValue != NULL) { diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test2/test2.cpp index ee84e375c2e2e..ef118e513260b 100644 --- a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test2/test2.cpp +++ b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test2/test2.cpp @@ -35,7 +35,7 @@ PALTEST(c_runtime__putenv_test2_paltest_putenv_test2, "c_runtime/_putenv/test2/p "_putenv(%s)\n", _putenvString0); } - variableValue = getenv(variable); + variableValue = PAL_getenv(variable); if (variableValue == NULL) { @@ -60,7 +60,7 @@ PALTEST(c_runtime__putenv_test2_paltest_putenv_test2, "c_runtime/_putenv/test2/p "_putenv(%s)\n", _putenvString1); } - variableValue = getenv(variable); + variableValue = PAL_getenv(variable); if (variableValue != NULL) { diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test3/test3.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test3/test3.cpp index ab1397193ce3e..07380e1a51452 100644 --- a/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test3/test3.cpp +++ b/src/coreclr/pal/tests/palsuite/c_runtime/_putenv/test3/test3.cpp @@ -50,7 +50,7 @@ PALTEST(c_runtime__putenv_test3_paltest_putenv_test3, "c_runtime/_putenv/test3/p differing only by case, returns it's own value. */ - result = getenv(FirstVarName); + result = PAL_getenv(FirstVarName); if(result == NULL) { Fail("ERROR: The result of getenv on a valid Environment Variable " @@ -77,7 +77,7 @@ PALTEST(c_runtime__putenv_test3_paltest_putenv_test3, "c_runtime/_putenv/test3/p /* Verify that the environment variables */ - result = getenv(FirstVarName); + result = PAL_getenv(FirstVarName); if(result == NULL) { Fail("ERROR: The result of getenv on a valid Environment Variable " diff --git a/src/coreclr/utilcode/clrconfig.cpp b/src/coreclr/utilcode/clrconfig.cpp index 8ea705a917e83..dff8a9b704ed1 100644 --- a/src/coreclr/utilcode/clrconfig.cpp +++ b/src/coreclr/utilcode/clrconfig.cpp @@ -201,7 +201,11 @@ namespace // Validate the cache and no-cache logic result in the same answer SString nameToConvert(name); - CLRConfigNoCache nonCache = CLRConfigNoCache::Get(nameToConvert.GetUTF8(), noPrefix); +#ifdef HOST_WINDOWS + CLRConfigNoCache nonCache = CLRConfigNoCache::Get(nameToConvert.GetUTF8(), noPrefix, &getenv); +#else + CLRConfigNoCache nonCache = CLRConfigNoCache::Get(nameToConvert.GetUTF8(), noPrefix, &PAL_getenv); +#endif LPCSTR valueNoCache = nonCache.AsString(); _ASSERTE(SString::_stricmp(valueNoCache, temp.GetUTF8()) == 0); diff --git a/src/coreclr/vm/eventing/eventpipe/ds-rt-coreclr.h b/src/coreclr/vm/eventing/eventpipe/ds-rt-coreclr.h index e6036353d8b9f..fb6c0c3feeda0 100644 --- a/src/coreclr/vm/eventing/eventpipe/ds-rt-coreclr.h +++ b/src/coreclr/vm/eventing/eventpipe/ds-rt-coreclr.h @@ -391,7 +391,11 @@ ds_rt_server_log_pause_message (void) STATIC_CONTRACT_NOTHROW; const char diagPortsName[] = "DiagnosticPorts"; - CLRConfigNoCache diagPorts = CLRConfigNoCache::Get(diagPortsName); +#ifdef HOST_WINDOWS + CLRConfigNoCache diagPorts = CLRConfigNoCache::Get(diagPortsName); +#else + CLRConfigNoCache diagPorts = CLRConfigNoCache::Get(diagPortsName, /* noPrefix */ false, &PAL_getenv); +#endif LPCSTR ports = nullptr; if (diagPorts.IsSet()) { diff --git a/src/coreclr/vm/perfmap.cpp b/src/coreclr/vm/perfmap.cpp index 4ede16efca076..d032dc6031dce 100644 --- a/src/coreclr/vm/perfmap.cpp +++ b/src/coreclr/vm/perfmap.cpp @@ -46,7 +46,11 @@ void PerfMap::Initialize() const char * PerfMap::InternalConstructPath() { +#ifdef HOST_WINDOWS CLRConfigNoCache value = CLRConfigNoCache::Get("PerfMapJitDumpPath"); +#else + CLRConfigNoCache value = CLRConfigNoCache::Get("PerfMapJitDumpPath", /* noPrefix */ false, &PAL_getenv); +#endif if (value.IsSet()) { return value.AsString(); From 188b02dacc7dfaa3462ed0951aabe3e1a1e0b2aa Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 20:03:27 +0000 Subject: [PATCH 09/60] Suppress bad static-analysis warning --- src/coreclr/vm/codeman.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 6fe87885da111..608e0013515ea 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -1459,7 +1459,12 @@ class RangeSectionMap // This level is completely empty. Free it, and then null out the pointer to it. pointerToLevelData->Uninstall(); +#if defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wfree-nonheap-object" // The compiler can't tell that this pointer always comes from a malloc call. free((void*)rawData); +#pragma GCC diagnostic pop +#endif } } From 57e6a101cdc9df3e6c3e7f5d61e9b50876ef1c0b Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 20:07:30 +0000 Subject: [PATCH 10/60] Remove bsearch and qsort PAL APIs as they only forward to the underlying impl and maintain PAL logging --- src/coreclr/pal/inc/pal.h | 3 - src/coreclr/pal/src/cruntime/misc.cpp | 61 ------------------- src/coreclr/pal/src/include/pal/palinternal.h | 2 + src/coreclr/pal/tests/palsuite/CMakeLists.txt | 4 -- .../c_runtime/bsearch/test1/test1.cpp | 47 -------------- .../c_runtime/bsearch/test2/test2.cpp | 56 ----------------- .../palsuite/c_runtime/qsort/test1/test1.cpp | 47 -------------- .../palsuite/c_runtime/qsort/test2/test2.cpp | 48 --------------- .../pal/tests/palsuite/compilableTests.txt | 4 -- .../pal/tests/palsuite/paltestlist.txt | 6 -- 10 files changed, 2 insertions(+), 276 deletions(-) delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test2/test2.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/qsort/test1/test1.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/qsort/test2/test2.cpp diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 7b0e92c66455a..16fdd6cf02617 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3980,9 +3980,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); To avoid name collisions, those functions have been renamed using defines */ #ifndef PAL_STDCPP_COMPAT -#define qsort PAL_qsort -#define bsearch PAL_bsearch - // Forward declare functions that are in header files we can't include yet int printf(const char *, ...); int vprintf(const char *, va_list); diff --git a/src/coreclr/pal/src/cruntime/misc.cpp b/src/coreclr/pal/src/cruntime/misc.cpp index 0f5dd74c61844..2240b0b48c678 100644 --- a/src/coreclr/pal/src/cruntime/misc.cpp +++ b/src/coreclr/pal/src/cruntime/misc.cpp @@ -132,64 +132,3 @@ extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stderr() { return stderr; } - -PALIMPORT -void __cdecl -PAL_qsort(void *base, size_t nmemb, size_t size, - int (__cdecl *compar )(const void *, const void *)) -{ - PERF_ENTRY(qsort); - ENTRY("qsort(base=%p, nmemb=%lu, size=%lu, compar=%p\n", - base,(unsigned long) nmemb,(unsigned long) size, compar); - -/* reset ENTRY nesting level back to zero, qsort will invoke app-defined - callbacks and we want their entry traces... */ -#if _ENABLE_DEBUG_MESSAGES_ -{ - int old_level; - old_level = DBG_change_entrylevel(0); -#endif /* _ENABLE_DEBUG_MESSAGES_ */ - - qsort(base,nmemb,size,compar); - -/* ...and set nesting level back to what it was */ -#if _ENABLE_DEBUG_MESSAGES_ - DBG_change_entrylevel(old_level); -} -#endif /* _ENABLE_DEBUG_MESSAGES_ */ - - LOGEXIT("qsort returns\n"); - PERF_EXIT(qsort); -} - -PALIMPORT -void * __cdecl -PAL_bsearch(const void *key, const void *base, size_t nmemb, size_t size, - int (__cdecl *compar)(const void *, const void *)) -{ - void *retval; - - PERF_ENTRY(bsearch); - ENTRY("bsearch(key=%p, base=%p, nmemb=%lu, size=%lu, compar=%p\n", - key, base, (unsigned long) nmemb, (unsigned long) size, compar); - -/* reset ENTRY nesting level back to zero, bsearch will invoke app-defined - callbacks and we want their entry traces... */ -#if _ENABLE_DEBUG_MESSAGES_ -{ - int old_level; - old_level = DBG_change_entrylevel(0); -#endif /* _ENABLE_DEBUG_MESSAGES_ */ - - retval = bsearch(key,base,nmemb,size,compar); - -/* ...and set nesting level back to what it was */ -#if _ENABLE_DEBUG_MESSAGES_ - DBG_change_entrylevel(old_level); -} -#endif /* _ENABLE_DEBUG_MESSAGES_ */ - - LOGEXIT("bsearch returns %p\n",retval); - PERF_EXIT(bsearch); - return retval; -} diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index ee758fad3da99..b5a543bb77419 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -281,6 +281,8 @@ function_name() to call the system's implementation #define malloc DUMMY_malloc #define time DUMMY_time #define getenv DUMMY_getenv +#define qsort DUMMY_qsort +#define bsearch DUMMY_bsearch /* RAND_MAX needed to be renamed to avoid duplicate definition when including stdlib.h header files. PAL_RAND_MAX should have the same value as RAND_MAX diff --git a/src/coreclr/pal/tests/palsuite/CMakeLists.txt b/src/coreclr/pal/tests/palsuite/CMakeLists.txt index 5b5a2025bb146..0b8d0993b0c06 100644 --- a/src/coreclr/pal/tests/palsuite/CMakeLists.txt +++ b/src/coreclr/pal/tests/palsuite/CMakeLists.txt @@ -65,8 +65,6 @@ add_executable_clr(paltests #composite/wfmo/mutex.cpp c_runtime/atof/test1/test1.cpp c_runtime/atoi/test1/test1.cpp - c_runtime/bsearch/test1/test1.cpp - c_runtime/bsearch/test2/test2.cpp c_runtime/isalnum/test1/test1.cpp c_runtime/isalpha/test1/test1.cpp c_runtime/isdigit/test1/test1.cpp @@ -83,8 +81,6 @@ add_executable_clr(paltests c_runtime/memcmp/test1/test1.cpp c_runtime/memmove/test1/test1.cpp c_runtime/memset/test1/test1.cpp - c_runtime/qsort/test1/test1.cpp - c_runtime/qsort/test2/test2.cpp c_runtime/sscanf_s/test1/test1.cpp c_runtime/sscanf_s/test10/test10.cpp c_runtime/sscanf_s/test11/test11.cpp diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test1/test1.cpp deleted file mode 100644 index eacb660dee096..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test1/test1.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls bsearch to find a character in a sorted buffer, and -** verifies that the correct position is returned. -** -** -**==========================================================================*/ - -#include - -int __cdecl charcmp_bsearch_test1(const void *pa, const void *pb) -{ - return memcmp(pa, pb, 1); -} - -PALTEST(c_runtime_bsearch_test1_paltest_bsearch_test1, "c_runtime/bsearch/test1/paltest_bsearch_test1") -{ - - const char array[] = "abcdefghij"; - char * found=NULL; - - /* - * Initialize the PAL and return FAIL if this fails - */ - if (0 != (PAL_Initialize(argc, argv))) - { - return FAIL; - } - - found = (char *)bsearch(&"d", array, sizeof(array) - 1, (sizeof(char)) - , charcmp_bsearch_test1); - if (found != array + 3) - { - Fail ("bsearch was unable to find a specified character in a " - "sorted list.\n"); - } - PAL_Terminate(); - return PASS; -} - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test2/test2.cpp deleted file mode 100644 index a916e61362ee4..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/bsearch/test2/test2.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls bsearch to find a character in a sorted buffer, -** that does not exist. -** -** -**==========================================================================*/ - -#include - -int __cdecl charcmp_bsearch_test2(const void *pa, const void *pb) -{ - return *(const char *)pa - *(const char *)pb; -} - -PALTEST(c_runtime_bsearch_test2_paltest_bsearch_test2, "c_runtime/bsearch/test2/paltest_bsearch_test2") -{ - - const char array[] = "abcefghij"; - const char missing[] = "0dz"; - char * found=NULL; - const char * candidate = missing; - - /* - * Initialize the PAL and return FAIL if this fails - */ - if (0 != (PAL_Initialize(argc, argv))) - { - return FAIL; - } - - while (*candidate) { - found = (char *)bsearch(candidate, array, sizeof(array) - 1, - (sizeof(char)), charcmp_bsearch_test2); - if (found != NULL) - { - Fail ("ERROR: bsearch was able to find a specified character '%c' " - "in a sorted list '%s' as '%c' " - "even though the character is not in the list.\n", - *candidate, array, *found); - } - - candidate++; - } - - PAL_Terminate(); - return PASS; -} - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test1/test1.cpp deleted file mode 100644 index 57b288a809ea5..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test1/test1.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test1.c -** -** Purpose: Calls qsort to sort a buffer, and verifies that it has done -** the job correctly. -** -** -**==========================================================================*/ - -#include - -int __cdecl charcmp_qsort_test1(const void *pa, const void *pb) -{ - return memcmp(pa, pb, 1); -} - -PALTEST(c_runtime_qsort_test1_paltest_qsort_test1, "c_runtime/qsort/test1/paltest_qsort_test1") -{ - char before[] = "cgaiehdbjf"; - const char after[] = "abcdefghij"; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - - qsort(before, sizeof(before) - 1, sizeof(char), charcmp_qsort_test1); - - if (memcmp(before, after, sizeof(before)) != 0) - { - Fail("qsort did not correctly sort an array of characters.\n"); - } - - PAL_Terminate(); - return PASS; - -} - - - - - diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test2/test2.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test2/test2.cpp deleted file mode 100644 index 20d76c5677e3d..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/qsort/test2/test2.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================================ -** -** Source: test2.c -** -** Purpose: Calls qsort to sort a buffer, and verifies that it has done -** the job correctly. -** -** -**==========================================================================*/ - -#include - -int __cdecl twocharcmp_qsort_test2(const void *pa, const void *pb) -{ - return memcmp(pa, pb, 2); -} - -PALTEST(c_runtime_qsort_test2_paltest_qsort_test2, "c_runtime/qsort/test2/paltest_qsort_test2") -{ - char before[] = "ccggaaiieehhddbbjjff"; - const char after[] = "aabbccddeeffgghhiijj"; - - if (PAL_Initialize(argc, argv)) - { - return FAIL; - } - - - qsort(before, (sizeof(before) - 1) / 2, 2 * sizeof(char), twocharcmp_qsort_test2); - - if (memcmp(before, after, sizeof(before)) != 0) - { - Fail("qsort did not correctly sort an array of 2-character " - "buffers.\n"); - } - - PAL_Terminate(); - return PASS; - -} - - - - - diff --git a/src/coreclr/pal/tests/palsuite/compilableTests.txt b/src/coreclr/pal/tests/palsuite/compilableTests.txt index e1b8ecdeb2685..987fe6fcd9e65 100644 --- a/src/coreclr/pal/tests/palsuite/compilableTests.txt +++ b/src/coreclr/pal/tests/palsuite/compilableTests.txt @@ -1,7 +1,5 @@ c_runtime/atof/test1/paltest_atof_test1 c_runtime/atoi/test1/paltest_atoi_test1 -c_runtime/bsearch/test1/paltest_bsearch_test1 -c_runtime/bsearch/test2/paltest_bsearch_test2 c_runtime/cbrt/test1/paltest_cbrt_test1 c_runtime/cbrtf/test1/paltest_cbrtf_test1 c_runtime/ceil/test1/paltest_ceil_test1 @@ -27,8 +25,6 @@ c_runtime/memchr/test1/paltest_memchr_test1 c_runtime/memcmp/test1/paltest_memcmp_test1 c_runtime/memmove/test1/paltest_memmove_test1 c_runtime/memset/test1/paltest_memset_test1 -c_runtime/qsort/test1/paltest_qsort_test1 -c_runtime/qsort/test2/paltest_qsort_test2 c_runtime/sscanf_s/test1/paltest_sscanf_test1 c_runtime/sscanf_s/test10/paltest_sscanf_test10 c_runtime/sscanf_s/test11/paltest_sscanf_test11 diff --git a/src/coreclr/pal/tests/palsuite/paltestlist.txt b/src/coreclr/pal/tests/palsuite/paltestlist.txt index f47e840dc93da..8274b4a4e873b 100644 --- a/src/coreclr/pal/tests/palsuite/paltestlist.txt +++ b/src/coreclr/pal/tests/palsuite/paltestlist.txt @@ -1,9 +1,5 @@ c_runtime/atof/test1/paltest_atof_test1 c_runtime/atoi/test1/paltest_atoi_test1 -c_runtime/bsearch/test1/paltest_bsearch_test1 -c_runtime/bsearch/test2/paltest_bsearch_test2 -c_runtime/errno/test1/paltest_errno_test1 -c_runtime/errno/test2/paltest_errno_test2 c_runtime/isalnum/test1/paltest_isalnum_test1 c_runtime/isalpha/test1/paltest_isalpha_test1 c_runtime/isdigit/test1/paltest_isdigit_test1 @@ -18,8 +14,6 @@ c_runtime/memchr/test1/paltest_memchr_test1 c_runtime/memcmp/test1/paltest_memcmp_test1 c_runtime/memmove/test1/paltest_memmove_test1 c_runtime/memset/test1/paltest_memset_test1 -c_runtime/qsort/test1/paltest_qsort_test1 -c_runtime/qsort/test2/paltest_qsort_test2 c_runtime/sscanf_s/test1/paltest_sscanf_test1 c_runtime/sscanf_s/test10/paltest_sscanf_test10 c_runtime/sscanf_s/test11/paltest_sscanf_test11 From b528590297782219c9ed9d51a2f2bb12dd8011fa Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 13 Feb 2024 00:47:31 +0000 Subject: [PATCH 11/60] Remove CRT PAL, CLR STL PAL, and enable including CRT and STL headers. --- eng/native/configurecompiler.cmake | 1 + src/coreclr/CMakeLists.txt | 6 - src/coreclr/binder/assemblyname.cpp | 2 +- src/coreclr/debug/di/rspriv.h | 4 +- src/coreclr/debug/di/shimcallback.cpp | 2 +- src/coreclr/debug/di/stdafx.h | 3 + src/coreclr/debug/ee/debugger.cpp | 2 +- src/coreclr/debug/ee/stdafx.h | 1 + .../debug/shared/dbgtransportsession.cpp | 2 +- .../dlls/mscordac/mscordac_unixexports.src | 6 - src/coreclr/dlls/mscorpe/stdafx.h | 4 + src/coreclr/gc/env/common.h | 1 + src/coreclr/gc/gc.cpp | 10 + src/coreclr/ildasm/ildasmpch.h | 1 + src/coreclr/inc/clr_std/algorithm | 118 ---- src/coreclr/inc/clr_std/string | 425 ------------ src/coreclr/inc/clr_std/type_traits | 627 ------------------ src/coreclr/inc/clr_std/utility | 253 ------- src/coreclr/inc/clr_std/vector | 462 ------------- src/coreclr/inc/crtwrap.h | 1 + src/coreclr/inc/daccess.h | 4 +- src/coreclr/inc/gcinfotypes.h | 12 +- src/coreclr/inc/holder.h | 5 - src/coreclr/inc/safemath.h | 4 - src/coreclr/inc/utilcode.h | 57 -- src/coreclr/jit/inline.h | 2 +- src/coreclr/jit/jitstd/list.h | 2 +- src/coreclr/jit/jitstd/utility.h | 2 +- src/coreclr/jit/targetamd64.h | 2 +- src/coreclr/jit/targetarm.h | 2 +- src/coreclr/jit/targetarm64.h | 2 +- src/coreclr/jit/targetloongarch64.h | 2 +- src/coreclr/jit/targetriscv64.h | 2 +- src/coreclr/jit/targetx86.h | 2 +- src/coreclr/jit/utils.h | 14 +- src/coreclr/md/ceefilegen/blobfetcher.cpp | 2 +- src/coreclr/md/ceefilegen/stdafx.h | 4 + src/coreclr/md/compiler/stdafx.h | 4 + src/coreclr/md/enc/stdafx.h | 4 + src/coreclr/md/runtime/stdafx.h | 1 + src/coreclr/pal/inc/pal.h | 367 +--------- src/coreclr/pal/inc/pal_mstypes.h | 16 +- src/coreclr/pal/inc/rt/cpp/assert.h | 12 - src/coreclr/pal/inc/rt/cpp/cstdlib | 13 - src/coreclr/pal/inc/rt/cpp/ctype.h | 12 - src/coreclr/pal/inc/rt/cpp/emmintrin.h | 128 ---- src/coreclr/pal/inc/rt/cpp/fcntl.h | 12 - src/coreclr/pal/inc/rt/cpp/float.h | 12 - src/coreclr/pal/inc/rt/cpp/limits.h | 12 - src/coreclr/pal/inc/rt/cpp/malloc.h | 12 - src/coreclr/pal/inc/rt/cpp/math.h | 12 - src/coreclr/pal/inc/rt/cpp/memory.h | 12 - src/coreclr/pal/inc/rt/cpp/stdarg.h | 12 - src/coreclr/pal/inc/rt/cpp/stdbool.h | 4 - src/coreclr/pal/inc/rt/cpp/stddef.h | 12 - src/coreclr/pal/inc/rt/cpp/stdint.h | 4 - src/coreclr/pal/inc/rt/cpp/stdio.h | 12 - src/coreclr/pal/inc/rt/cpp/stdlib.h | 12 - src/coreclr/pal/inc/rt/cpp/string.h | 12 - src/coreclr/pal/inc/rt/cpp/time.h | 12 - src/coreclr/pal/inc/rt/cpp/wchar.h | 12 - src/coreclr/pal/inc/rt/cpp/xmmintrin.h | 117 ---- src/coreclr/pal/inc/rt/palrt.h | 14 - src/coreclr/pal/inc/rt/safecrt.h | 14 +- src/coreclr/pal/inc/rt/sal.h | 6 +- src/coreclr/pal/inc/rt/specstrings_strict.h | 1 - src/coreclr/pal/inc/rt/specstrings_undef.h | 1 - src/coreclr/pal/src/cruntime/misc.cpp | 31 - src/coreclr/pal/src/debug/debug.cpp | 1 + src/coreclr/pal/src/include/pal/file.h | 1 + src/coreclr/pal/src/include/pal/palinternal.h | 434 ------------ src/coreclr/pal/src/map/map.cpp | 1 + src/coreclr/pal/src/misc/miscpalapi.cpp | 1 + src/coreclr/pal/src/synchmgr/synchmanager.cpp | 1 + src/coreclr/pal/src/thread/process.cpp | 1 + src/coreclr/pal/src/thread/threadsusp.cpp | 1 + src/coreclr/palrt/memorystream.cpp | 6 +- .../StressLogAnalyzer/StressLogPlugin.cpp | 10 +- src/coreclr/tools/metainfo/mdinfo.cpp | 3 +- .../superpmi/superpmi-shared/standardpch.h | 23 +- src/coreclr/utilcode/loaderheap.cpp | 2 +- src/coreclr/utilcode/stdafx.h | 3 + src/coreclr/utilcode/stgpool.cpp | 4 +- src/coreclr/utilcode/stresslog.cpp | 4 +- src/coreclr/utilcode/utsem.cpp | 2 +- src/coreclr/vm/appdomain.cpp | 4 +- src/coreclr/vm/callcounting.cpp | 2 +- src/coreclr/vm/castcache.cpp | 1 + src/coreclr/vm/ceeload.cpp | 12 +- src/coreclr/vm/ceemain.cpp | 2 +- src/coreclr/vm/classhash.cpp | 2 +- src/coreclr/vm/classlayoutinfo.cpp | 2 +- src/coreclr/vm/codeman.cpp | 6 +- src/coreclr/vm/common.h | 5 +- src/coreclr/vm/dacenumerablehash.inl | 2 +- src/coreclr/vm/dynamicmethod.cpp | 2 +- src/coreclr/vm/interpreter.h | 2 +- src/coreclr/vm/jithelpers.cpp | 30 +- src/coreclr/vm/jitinterface.cpp | 4 +- src/coreclr/vm/methodtablebuilder.cpp | 4 +- src/coreclr/vm/object.inl | 2 +- src/coreclr/vm/profdetach.cpp | 4 +- src/coreclr/vm/qcall.h | 2 +- src/coreclr/vm/stackingallocator.cpp | 2 +- src/coreclr/vm/stringliteralmap.cpp | 2 +- src/coreclr/vm/syncblk.cpp | 2 +- src/coreclr/vm/threadstatics.cpp | 4 +- src/coreclr/vm/util.hpp | 2 +- src/coreclr/vm/virtualcallstub.cpp | 2 + src/mono/dlls/mscordbi/CMakeLists.txt | 1 - 110 files changed, 166 insertions(+), 3391 deletions(-) delete mode 100644 src/coreclr/inc/clr_std/algorithm delete mode 100644 src/coreclr/inc/clr_std/string delete mode 100644 src/coreclr/inc/clr_std/type_traits delete mode 100644 src/coreclr/inc/clr_std/utility delete mode 100644 src/coreclr/inc/clr_std/vector delete mode 100644 src/coreclr/pal/inc/rt/cpp/assert.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/cstdlib delete mode 100644 src/coreclr/pal/inc/rt/cpp/ctype.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/emmintrin.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/fcntl.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/float.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/limits.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/malloc.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/math.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/memory.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdarg.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdbool.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stddef.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdint.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdio.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/stdlib.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/string.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/time.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/wchar.h delete mode 100644 src/coreclr/pal/inc/rt/cpp/xmmintrin.h diff --git a/eng/native/configurecompiler.cmake b/eng/native/configurecompiler.cmake index 166fd52ab9656..3779adfc0b258 100644 --- a/eng/native/configurecompiler.cmake +++ b/eng/native/configurecompiler.cmake @@ -26,6 +26,7 @@ if (CLR_CMAKE_HOST_UNIX) add_compile_options(-Wall) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wno-null-conversion) + add_compile_options(-Wno-null-arithmetic) add_compile_options(-glldb) else() add_compile_options($<$:-Werror=conversion-null>) diff --git a/src/coreclr/CMakeLists.txt b/src/coreclr/CMakeLists.txt index 1c314d9bf624e..79de28794d355 100644 --- a/src/coreclr/CMakeLists.txt +++ b/src/coreclr/CMakeLists.txt @@ -202,12 +202,6 @@ if(CLR_CMAKE_HOST_UNIX) add_subdirectory(debug/createdump) endif(CLR_CMAKE_HOST_OSX OR (CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86 AND NOT CLR_CMAKE_HOST_ANDROID)) - # Include the dummy c++ include files - include_directories("pal/inc/rt/cpp") - - # This prevents inclusion of standard C compiler headers - add_compile_options(-nostdinc) - set (NATIVE_RESOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/nativeresources) include_directories(${NATIVE_RESOURCE_DIR}) set (PROCESS_RC_SCRIPT ${NATIVE_RESOURCE_DIR}/processrc.sh) diff --git a/src/coreclr/binder/assemblyname.cpp b/src/coreclr/binder/assemblyname.cpp index 9eea2ee8ba736..0c96f6be47ec2 100644 --- a/src/coreclr/binder/assemblyname.cpp +++ b/src/coreclr/binder/assemblyname.cpp @@ -11,10 +11,10 @@ // // ============================================================ +#include "common.h" #include "assemblyname.hpp" #include "assemblybindercommon.hpp" -#include "common.h" #include "utils.hpp" #include "textualidentityparser.hpp" diff --git a/src/coreclr/debug/di/rspriv.h b/src/coreclr/debug/di/rspriv.h index ceadc7eedafe7..3a12af67e42ab 100644 --- a/src/coreclr/debug/di/rspriv.h +++ b/src/coreclr/debug/di/rspriv.h @@ -3975,9 +3975,9 @@ class CordbProcess : // CORDB_ADDRESS's are UINT_PTR's (64 bit under HOST_64BIT, 32 bit otherwise) #if defined(TARGET_64BIT) -#define MAX_ADDRESS (_UI64_MAX) +#define MAX_ADDRESS (UINT64_MAX) #else -#define MAX_ADDRESS (_UI32_MAX) +#define MAX_ADDRESS (UINT32_MAX) #endif #define MIN_ADDRESS (0x0) CORDB_ADDRESS m_minPatchAddr; //smallest patch in table diff --git a/src/coreclr/debug/di/shimcallback.cpp b/src/coreclr/debug/di/shimcallback.cpp index 4e8f029209def..bf6c817fc880d 100644 --- a/src/coreclr/debug/di/shimcallback.cpp +++ b/src/coreclr/debug/di/shimcallback.cpp @@ -1408,7 +1408,7 @@ HRESULT ShimProxyCallback::DataBreakpoint(ICorDebugProcess* pProcess, ICorDebugT this->m_pThread.Assign(pThread); _ASSERTE(contextSize == sizeof(CONTEXT)); - this->m_contextSize = min(contextSize, sizeof(CONTEXT)); + this->m_contextSize = min(contextSize, (ULONG32)sizeof(CONTEXT)); memcpy(&(this->m_context), pContext, this->m_contextSize); } diff --git a/src/coreclr/debug/di/stdafx.h b/src/coreclr/debug/di/stdafx.h index 061c576c4725b..8ee806f88f271 100644 --- a/src/coreclr/debug/di/stdafx.h +++ b/src/coreclr/debug/di/stdafx.h @@ -10,6 +10,9 @@ #include #include #include +#include +using std::min; +using std::max; #include diff --git a/src/coreclr/debug/ee/debugger.cpp b/src/coreclr/debug/ee/debugger.cpp index b97f76c4a03c6..0b69cbe8d137d 100644 --- a/src/coreclr/debug/ee/debugger.cpp +++ b/src/coreclr/debug/ee/debugger.cpp @@ -3029,7 +3029,7 @@ HRESULT Debugger::GetILToNativeMappingIntoArrays( if (pDJI == NULL) return E_FAIL; - ULONG32 cMap = min(cMapMax, pDJI->GetSequenceMapCount()); + ULONG32 cMap = min((ULONG32)cMapMax, pDJI->GetSequenceMapCount()); DebuggerILToNativeMap * rgMapInt = pDJI->GetSequenceMap(); NewArrayHolder rguiILOffsetTemp = new (nothrow) UINT[cMap]; diff --git a/src/coreclr/debug/ee/stdafx.h b/src/coreclr/debug/ee/stdafx.h index f21a670e210bf..f81beacb25871 100644 --- a/src/coreclr/debug/ee/stdafx.h +++ b/src/coreclr/debug/ee/stdafx.h @@ -12,6 +12,7 @@ #include #include #include +#include #include diff --git a/src/coreclr/debug/shared/dbgtransportsession.cpp b/src/coreclr/debug/shared/dbgtransportsession.cpp index 8b8ca6203c957..3bebb8282aed7 100644 --- a/src/coreclr/debug/shared/dbgtransportsession.cpp +++ b/src/coreclr/debug/shared/dbgtransportsession.cpp @@ -1949,7 +1949,7 @@ void DbgTransportSession::TransportWorker() DWORD cbBytesToRead = sReceiveHeader.TypeSpecificData.MemoryAccess.m_cbLeftSideBuffer; while (cbBytesToRead) { - DWORD cbTransfer = min(cbBytesToRead, sizeof(rgDummy)); + DWORD cbTransfer = min(cbBytesToRead, (DWORD)sizeof(rgDummy)); if (!ReceiveBlock(rgDummy, cbTransfer)) HANDLE_TRANSIENT_ERROR(); cbBytesToRead -= cbTransfer; diff --git a/src/coreclr/dlls/mscordac/mscordac_unixexports.src b/src/coreclr/dlls/mscordac/mscordac_unixexports.src index 43853ae5cbdc0..ad056eb1104e3 100644 --- a/src/coreclr/dlls/mscordac/mscordac_unixexports.src +++ b/src/coreclr/dlls/mscordac/mscordac_unixexports.src @@ -22,15 +22,10 @@ nativeStringResourceTable_mscorrc ; All the # exports are prefixed with DAC_ #PAL_CatchHardwareExceptionHolderEnter #PAL_CatchHardwareExceptionHolderExit -#PAL_bsearch #PAL_CopyModuleData -#PAL_errno -#PAL_free #PAL_GetLogicalCpuCountFromOS #PAL_GetTotalCpuCount #PAL_GetUnwindInfoSize -#PAL_stdout -#PAL_stderr #PAL_GetApplicationGroupId #PAL_GetTransportName #PAL_GetCurrentThread @@ -47,7 +42,6 @@ nativeStringResourceTable_mscorrc #PAL_ReadProcessMemory #PAL_ProbeMemory #PAL_Random -#PAL_qsort #PAL__wcstoui64 #PAL_wcstoul #PAL_wcstod diff --git a/src/coreclr/dlls/mscorpe/stdafx.h b/src/coreclr/dlls/mscorpe/stdafx.h index 996113b500154..bd78a49013c94 100644 --- a/src/coreclr/dlls/mscorpe/stdafx.h +++ b/src/coreclr/dlls/mscorpe/stdafx.h @@ -11,6 +11,7 @@ #include #include #include +#include #define FEATURE_NO_HOST // Do not use host interface #include @@ -21,3 +22,6 @@ #include "ceegen.h" #include "ceefilegenwriter.h" #include "ceesectionstring.h" + +using std::min; +using std::max; diff --git a/src/coreclr/gc/env/common.h b/src/coreclr/gc/env/common.h index 78562ef0438b4..6c612f672eab0 100644 --- a/src/coreclr/gc/env/common.h +++ b/src/coreclr/gc/env/common.h @@ -24,6 +24,7 @@ #include #include +#include #ifdef TARGET_UNIX #include diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 0471326c0af5f..279b5c57efbc0 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -51,6 +51,16 @@ class gc_rand uint64_t gc_rand::x = 0; +// NativeAOT defines max/min as macros. +// CoreCLR does not. +// Define them if they aren't already available. +#ifndef min +#define min(_a, _b) ((_a) < (_b) ? (_a) : (_b)) +#endif +#ifndef max +#define max(_a, _b) ((_a) < (_b) ? (_b) : (_a)) +#endif + #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) BOOL bgc_heap_walk_for_etw_p = FALSE; #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE diff --git a/src/coreclr/ildasm/ildasmpch.h b/src/coreclr/ildasm/ildasmpch.h index 9d89ba46db52c..0717099eb3cc5 100644 --- a/src/coreclr/ildasm/ildasmpch.h +++ b/src/coreclr/ildasm/ildasmpch.h @@ -12,6 +12,7 @@ #include #include #include +#include #ifndef Debug_ReportError #define Debug_ReportError(strMessage) diff --git a/src/coreclr/inc/clr_std/algorithm b/src/coreclr/inc/clr_std/algorithm deleted file mode 100644 index ebd21b09c5e58..0000000000000 --- a/src/coreclr/inc/clr_std/algorithm +++ /dev/null @@ -1,118 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/algorithm -// -// Copy of some key Standard Template Library functionality - -#ifdef _MSC_VER -#pragma once -#endif - -#ifdef USE_STL -#include -#else -#ifndef __clr_std_algorithm_h__ -#define __clr_std_algorithm_h__ - -namespace std -{ - template - iter find_if ( iter first, iter last, CompareFunc comp ) - { - for ( ; first!=last ; first++ ) - if ( comp(*first) ) - break; - return first; - } - - template - iter find(iter first, iter last, const T& val) - { - for (;first != last; first++) - { - if (*first == val) - break; - } - return first; - } - - template - iter qsort_partition( iter first, iter last, iter pivot, comp compare ) - { - iter lastMinusOne = last - 1; - swap(pivot, lastMinusOne); - - // Pivot is at end - pivot = last - 1; - - iter partitionLoc = first; - - for (iter partitionWalk = first; partitionWalk != pivot; ++partitionWalk) - { - if (compare(*partitionWalk, *pivot)) - { - swap(*partitionWalk, *partitionLoc); - partitionLoc++; - } - } - swap(*pivot, *partitionLoc); - - return partitionLoc; - } - - template - void sort_worker ( iter first, iter last, comp compare ) - { - typename iter::difference_type RangeSize = last - first; - - // When down to a list of size 1, be done - if (RangeSize < 2) - return; - - // Pick pivot - - // Use simple pick middle algorithm - iter pivotLoc = first + (RangeSize / 2); - - // Partition - pivotLoc = qsort_partition(first, last, pivotLoc, compare); - - // Sort first array - sort_worker(first, pivotLoc, compare); - - // Sort second array - sort_worker(pivotLoc + 1, last, compare); - } - - template - void sort ( iter first, iter last, comp compare ) - { - sort_worker(first, last, compare); - if (first != last) - { - for (iter i = first; i < (last - 1); i++) - { - // Assert that the sort function works. - assert(!compare(*(i+1), *i)); - } - } - } - - template - OutIter transform( InIter first, InIter last, OutIter dest, Fn1 func ) - { - for ( ; first!=last ; ++first, ++dest ) - *dest = func(*first); - return dest; - } - -} // namespace std - -#endif /* __clr_std_algorithm_h__ */ - -#endif // !USE_STL - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/clr_std/string b/src/coreclr/inc/clr_std/string deleted file mode 100644 index 59ac67b98653c..0000000000000 --- a/src/coreclr/inc/clr_std/string +++ /dev/null @@ -1,425 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/string -// -// Copy of some key Standard Template Library functionality -// -// This was created for use with SuperPMI. It has the minimal functionality needed by SuperPMI. It hasn't -// been tested elsewhere. - -#ifdef _MSC_VER -#pragma once -#endif - -#ifdef USE_STL -#include -#else -#ifndef __clr_std_string_h__ -#define __clr_std_string_h__ - -#include "clr_std/vector" - -namespace std -{ - -template -class basic_string -{ -public: - typedef T value_type; - typedef size_t size_type; - typedef typename vector::iterator iterator; - typedef typename vector::const_iterator const_iterator; - - basic_string() - : m_string(1) // start with a string of length 1 for null terminator - { - m_string[0] = T(); - } - - basic_string(const basic_string& _Right) - { - assign(_Right); - } - - // Initialize a string with _Count characters from the string pointed at by _Ptr. - // If you want to include the trailing null character, _Count needs to include that. - basic_string(const value_type* _Ptr, size_type _Count) - : m_string(_Count + 1) // add 1 for a null terminator - { - copy(_Ptr, _Count); - } - - basic_string(const value_type* _Ptr) : basic_string(_Ptr, c_len(_Ptr)) - { - } - - void reserve(size_t newcapacity) - { - m_string.reserve(newcapacity + 1); // add 1 for the null terminator - } - - // - // Assignment - // - - basic_string& operator=(const basic_string& _Right) - { - if (this != &_Right) - { - assign(_Right); - } - return (*this); - } - - basic_string& assign(const basic_string& _Right) - { - m_string.resize(_Right.size() + 1); // +1 for null terminator - copy(_Right); - return (*this); - } - - // - // Basic data copying - // - - void copy(const basic_string& _Right) - { - assert(size() >= _Right.size()); - size_type i; - for (i = 0; i < _Right.size(); i++) - { - m_string[i] = _Right.m_string[i]; - } - m_string[i] = T(); - } - - void copy(const value_type* _Ptr, size_type _Count) - { - assert(size() >= _Count); - size_type i; - for (i = 0; i < _Count; i++) - { - m_string[i] = _Ptr[i]; - } - m_string[i] = T(); - } - - // - // Appending - // - - // Append a C-style string to the string. - basic_string& operator+=(const value_type* _Ptr) - { - size_type oldsize = size(); // doesn't include null terminator - size_type addsize = c_len(_Ptr); // doesn't include null terminator - size_type newsize = oldsize + addsize + 1; - m_string.resize(newsize); - size_type i; - for (i = oldsize; i < newsize - 1; i++) - { - m_string[i] = *_Ptr++; - } - m_string[i] = T(); - return (*this); - } - - basic_string& operator+=(const basic_string& _Right) - { - size_type oldsize = size(); // doesn't include null terminator - size_type addsize = _Right.size(); // doesn't include null terminator - size_type newsize = oldsize + addsize + 1; - m_string.resize(newsize); - size_type new_index = oldsize, right_index = 0; - while (right_index < addsize) - { - m_string[new_index] = _Right.m_string[right_index]; - ++new_index; - ++right_index; - } - m_string[new_index] = T(); - return (*this); - } - - basic_string& operator+=(value_type _Ch) - { - size_type oldsize = size(); // doesn't include null terminator - m_string[oldsize] = _Ch; // Replace the null terminator with the new symbol. - m_string.push_back(T()); // Return the replaced terminator again. - return (*this); - } - - ~basic_string() - { - // vector destructor does all the work - } - - size_t size() const - { - assert(m_string.size() > 0); - return m_string.size() - 1; // Don't report the null terminator. - } - - size_t length() const - { - return size(); - } - - T& operator[](size_t iIndex) - { - assert(iIndex < size() + 1); // allow looking at the null terminator - return m_string[iIndex]; - } - - const T* c_str() const - { - return m_string.data(); - } - - iterator begin() - { - return m_string.begin(); - } - - iterator end() - { - return m_string.end(); - } - - const_iterator cbegin() const - { - return m_string.cbegin(); - } - - const_iterator cend() const - { - return m_string.cend(); - } - - basic_string substr(size_type _Off = 0, size_type _Count = npos) const - { - size_type cursize = size(); - if (_Off >= cursize) - { - // result will be empty - return basic_string(); - } - else - { - if ((_Count == npos) || // No count specified; take the whole string suffix - (_Off + _Count > cursize)) // Count specified is too many characters; just take the whole suffix - { - _Count = cursize - _Off; - } - return basic_string(m_string.data() + _Off, _Count); - } - } - - size_type find_last_of(value_type _Ch) const - { - for (size_type _Off = size(); _Off != 0; _Off--) - { - if (m_string[_Off - 1] == _Ch) - { - return _Off - 1; - } - } - return npos; - } - - bool empty() const - { - return size() == 0; - } - - int compare(const basic_string& _Str) const - { - size_type i; - size_type compareSize = size(); - if (_Str.size() < compareSize) - { - // This string is longer; compare character-by-character only as many characters as we have. - compareSize = _Str.size(); - } - for (i = 0; i < compareSize; i++) - { - if (m_string[i] != _Str.m_string[i]) - { - if (m_string[i] < _Str.m_string[i]) - { - return -1; - } - else - { - return 1; - } - } - } - - // All the characters we compared were identical, but one string might be longer than the other. - if (size() == _Str.size()) - { - // We compared everything. - return 0; - } - else if (size() < _Str.size()) - { - // _Str has more characters than this. - return -1; - } - else - { - // this has more characters than _Str - return 1; - } - } - - static const size_type npos = size_type(-1); - -private: - - // Compute the length in characters of a null-terminated C-style string, not including the trailing null character. - // _Ptr must not be nullptr. - size_type c_len(const value_type* _Ptr) - { - size_type count; - for (count = 0; *_Ptr != T(); _Ptr++) - { - count++; - } - return count; - } - - vector m_string; // use a vector<> to represent the string, to avoid reimplementing similar functionality - -}; // class basic_string - -// -// String class instantiations -// - -typedef basic_string string; - -// -// Numeric conversions -// - -// convert integer T to string -template inline -string _IntToString(const char *_Fmt, T _Val) -{ - const size_t MaxIntBufSize = 21; /* can hold -2^63 and 2^64 - 1, plus NUL */ - char buf[MaxIntBufSize]; - int len = sprintf_s(buf, MaxIntBufSize, _Fmt, _Val); - return (string(buf, len)); -} - -inline string to_string(int _Val) -{ - return (_IntToString("%d", _Val)); -} - -inline string to_string(unsigned int _Val) -{ - return (_IntToString("%u", _Val)); -} - -inline string to_string(long _Val) -{ - return (_IntToString("%ld", _Val)); -} - -inline string to_string(unsigned long _Val) -{ - return (_IntToString("%lu", _Val)); -} - -inline string to_string(long long _Val) -{ - return (_IntToString("%lld", _Val)); -} - -inline string to_string(unsigned long long _Val) -{ - return (_IntToString("%llu", _Val)); -} - -// -// Comparisons -// - -template inline -bool operator==( - const basic_string& _Left, - const basic_string& _Right) -{ - return (_Left.compare(_Right) == 0); -} - -template inline -bool operator!=( - const basic_string& _Left, - const basic_string& _Right) -{ - return (!(_Left == _Right)); -} - -template inline -bool operator<( - const basic_string& _Left, - const basic_string& _Right) -{ - return (_Left.compare(_Right) < 0); -} - -template inline -bool operator>( - const basic_string& _Left, - const basic_string& _Right) -{ - return (_Right < _Left); -} - -template inline -bool operator<=( - const basic_string& _Left, - const basic_string& _Right) -{ - return (!(_Right < _Left)); -} - -template inline -bool operator>=( - const basic_string& _Left, - const basic_string& _Right) -{ - return (!(_Left < _Right)); -} - -// -// String concatenation and other string operations -// - -template inline -basic_string operator+( - const basic_string& _Left, - const basic_string& _Right) -{ - basic_string ret; - ret.reserve(_Left.size() + _Right.size()); - ret += _Left; - ret += _Right; - return ret; -} - -}; // namespace std - -#endif /* __clr_std_string_h__ */ - -#endif // !USE_STL - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/clr_std/type_traits b/src/coreclr/inc/clr_std/type_traits deleted file mode 100644 index 12af99d5c4fee..0000000000000 --- a/src/coreclr/inc/clr_std/type_traits +++ /dev/null @@ -1,627 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/utility -// -// Copy of some key Standard Template Library functionality. -// See http://msdn.microsoft.com/en-us/library/bb982077.aspx for documentation. -// - -#ifdef _MSC_VER -#pragma once -#endif - -#ifndef __clr_std_type_traits_h__ -#define __clr_std_type_traits_h__ - -#ifdef USE_STL - -#include - -#else - -namespace std -{ - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS remove_const - template - struct remove_const - { // remove top level const qualifier - typedef _Ty type; - }; - - template - struct remove_const - { // remove top level const qualifier - typedef _Ty type; - }; - - template - struct remove_const - { // remove top level const qualifier - typedef _Ty type[]; - }; - - template - struct remove_const - { // remove top level const qualifier - typedef _Ty type[_Nx]; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS remove_volatile - template - struct remove_volatile - { // remove top level volatile qualifier - typedef _Ty type; - }; - - template - struct remove_volatile - { // remove top level volatile qualifier - typedef _Ty type; - }; - - template - struct remove_volatile - { // remove top level volatile qualifier - typedef _Ty type[]; - }; - - template - struct remove_volatile - { // remove top level volatile qualifier - typedef _Ty type[_Nx]; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS remove_cv - template - struct remove_cv - { // remove top level const and volatile qualifiers - typedef typename remove_const::type>::type type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE remove_reference - template - struct remove_reference - { // remove reference - typedef T type; - }; - - template - struct remove_reference - { // remove reference - typedef T type; - }; - - template - struct remove_reference - { // remove rvalue reference - typedef T type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE remove_pointer - template - struct remove_pointer - { // remove pointer - typedef T type; - }; - - template - struct remove_pointer - { // remove pointer - typedef T type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE FUNCTION identity - template - struct identity - { // map T to type unchanged - typedef T type; - - inline - const T& operator()(const T& left) const - { // apply identity operator to operand - return (left); - } - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS integral_constant - template - struct integral_constant - { // convenient template for integral constant types - static const _Ty value = _Val; - - typedef _Ty value_type; - typedef integral_constant<_Ty, _Val> type; - }; - - typedef integral_constant true_type; - typedef integral_constant false_type; - - // TEMPLATE CLASS _Cat_base - template - struct _Cat_base - : false_type - { // base class for type predicates - }; - - template<> - struct _Cat_base - : true_type - { // base class for type predicates - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS enable_if - template - struct enable_if - { // type is undefined for assumed !_Test - }; - - template - struct enable_if - { // type is _Type for _Test - typedef _Type type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS conditional - template - struct conditional - { // type is _Ty2 for assumed !_Test - typedef _Ty2 type; - }; - - template - struct conditional - { // type is _Ty1 for _Test - typedef _Ty1 type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS make_unsigned - template - struct make_unsigned - { - }; - - template<> - struct make_unsigned - { - typedef unsigned int type; - }; - -#ifndef HOST_UNIX - - template<> - struct make_unsigned - { - typedef unsigned long type; - }; - -#endif // !HOST_UNIX - - template<> - struct make_unsigned<__int64> - { - typedef unsigned __int64 type; - }; - - template<> - struct make_unsigned - { - typedef size_t type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS make_signed - template - struct make_signed - { - }; - - template<> - struct make_signed - { - typedef signed int type; - }; - -#ifndef HOST_UNIX - - template<> - struct make_signed - { - typedef signed long type; - }; - -#endif // !HOST_UNIX - - template<> - struct make_signed - { - typedef signed __int64 type; - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_lvalue_reference - template - struct is_lvalue_reference - : false_type - { // determine whether _Ty is an lvalue reference - }; - - template - struct is_lvalue_reference<_Ty&> - : true_type - { // determine whether _Ty is an lvalue reference - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_rvalue_reference - template - struct is_rvalue_reference - : false_type - { // determine whether _Ty is an rvalue reference - }; - - template - struct is_rvalue_reference<_Ty&&> - : true_type - { // determine whether _Ty is an rvalue reference - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_reference - template - struct is_reference - : conditional< - is_lvalue_reference<_Ty>::value || is_rvalue_reference<_Ty>::value, - true_type, - false_type>::type - { // determine whether _Ty is a reference - }; - - // TEMPLATE CLASS is_pointer - template - struct is_pointer - : false_type - { // determine whether _Ty is a pointer - }; - - template - struct is_pointer<_Ty *> - : true_type - { // determine whether _Ty is a pointer - }; - - // TEMPLATE CLASS _Is_integral - template - struct _Is_integral - : false_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - -// On Unix 'long' is a 64-bit type (same as __int64) and the following two definitions -// conflict with _Is_integral and _Is_integral. -#ifndef HOST_UNIX - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; -#endif /* HOST_UNIX */ - - #if _HAS_CHAR16_T_LANGUAGE_SUPPORT - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - #endif /* _HAS_CHAR16_T_LANGUAGE_SUPPORT */ - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - template<> - struct _Is_integral - : true_type - { // determine whether _Ty is integral - }; - - // TEMPLATE CLASS is_integral - template - struct is_integral - : _Is_integral::type> - { // determine whether _Ty is integral - }; - - // TEMPLATE CLASS _Is_floating_point - template - struct _Is_floating_point - : false_type - { // determine whether _Ty is floating point - }; - - template<> - struct _Is_floating_point - : true_type - { // determine whether _Ty is floating point - }; - - template<> - struct _Is_floating_point - : true_type - { // determine whether _Ty is floating point - }; - -// In PAL, we define long as int and so this becomes int double, -// which is a nonsense -#ifndef HOST_UNIX - template<> - struct _Is_floating_point - : true_type - { // determine whether _Ty is floating point - }; -#endif - - // TEMPLATE CLASS is_floating_point - template - struct is_floating_point - : _Is_floating_point::type> - { // determine whether _Ty is floating point - }; - - // TEMPLATE CLASS is_arithmetic - template - struct is_arithmetic - : _Cat_base::value - || is_floating_point<_Ty>::value> - { // determine whether _Ty is an arithmetic type - }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_signed - template - struct is_signed : conditional< - static_cast::type>(-1) < 0, true_type, false_type>::type {}; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_same - template - struct is_same : false_type { }; - - //----------------------------------------------------------------------------------------- - template - struct is_same : true_type { }; - - //----------------------------------------------------------------------------------------- - // TEMPLATE CLASS is_base_of -#ifdef _MSC_VER - - template - struct is_base_of : - conditional<__is_base_of( TBase, TDerived), true_type, false_type>::type {}; - -#else - namespace detail - { - //------------------------------------------------------------------------------------- - // Helper types Small and Big - guarantee that sizeof(Small) < sizeof(Big) - // - - template - struct conversion_helper - { - typedef char Small; - struct Big { char dummy[2]; }; - static Big Test(...); - static Small Test(U); - static T MakeT(); - }; - - //------------------------------------------------------------------------------------- - // class template conversion - // Figures out the conversion relationships between two types - // Invocations (T and U are types): - // a) conversion::exists - // returns (at compile time) true if there is an implicit conversion from T - // to U (example: Derived to Base) - // b) conversion::exists2Way - // returns (at compile time) true if there are both conversions from T - // to U and from U to T (example: int to char and back) - // c) conversion::sameType - // returns (at compile time) true if T and U represent the same type - // - // NOTE: might not work if T and U are in a private inheritance hierarchy. - // - - template - struct conversion - { - typedef detail::conversion_helper H; - static const bool exists = sizeof(typename H::Small) == sizeof((H::Test(H::MakeT()))); - static const bool exists2Way = exists && conversion::exists; - static const bool sameType = false; - }; - - template - struct conversion - { - static const bool exists = true; - static const bool exists2Way = true; - static const bool sameType = true; - }; - - template - struct conversion - { - static const bool exists = false; - static const bool exists2Way = false; - static const bool sameType = false; - }; - - template - struct conversion - { - static const bool exists = false; - static const bool exists2Way = false; - static const bool sameType = false; - }; - - template <> - struct conversion - { - static const bool exists = true; - static const bool exists2Way = true; - static const bool sameType = true; - }; - } // detail - - // Note that we need to compare pointer types here, since conversion of types by-value - // just tells us whether or not an implicit conversion constructor exists. We handle - // type parameters that are already pointers specially; see below. - template - struct is_base_of : - conditional::exists, true_type, false_type>::type {}; - - // Specialization to handle type parameters that are already pointers. - template - struct is_base_of : - conditional::exists, true_type, false_type>::type {}; - - // Specialization to handle invalid mixing of pointer types. - template - struct is_base_of : - false_type {}; - - // Specialization to handle invalid mixing of pointer types. - template - struct is_base_of : - false_type {}; - -#endif - - namespace detail - { - template - using void_t = void; - } - // Always false dependent-value for static_asserts. - template - struct _Always_false - { - const bool value = false; - }; - - template - struct _Add_reference { // add reference (non-referenceable type) - using _Lvalue = _Ty; - using _Rvalue = _Ty; - }; - - template - struct _Add_reference<_Ty, detail::void_t<_Ty&>> { // (referenceable type) - using _Lvalue = _Ty&; - using _Rvalue = _Ty&&; - }; - - template - struct add_lvalue_reference { - using type = typename _Add_reference<_Ty>::_Lvalue; - }; - - template - struct add_rvalue_reference { - using type = typename _Add_reference<_Ty>::_Rvalue; - }; - - template - typename add_rvalue_reference<_Ty>::type declval() noexcept - { - static_assert(_Always_false<_Ty>::value, "Calling declval is ill-formed, see N4892 [declval]/2."); - } -} // namespace std - -#endif // !USE_STL - -#define REM_CONST(T) typename std::remove_const< T >::type -#define REM_CV(T) typename std::remove_cv< T >::type -#define REM_REF(T) typename std::remove_reference< T >::type - -#define REF_T(T) REM_REF(T) & -#define REF_CT(T) REM_REF(REM_CONST(T)) const & - -#endif // __clr_std_type_traits_h__ - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/clr_std/utility b/src/coreclr/inc/clr_std/utility deleted file mode 100644 index 1b6b5a7b72c1e..0000000000000 --- a/src/coreclr/inc/clr_std/utility +++ /dev/null @@ -1,253 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/utility -// -// Copy of some key Standard Template Library functionality -// See http://msdn.microsoft.com/en-us/library/bb982077.aspx for documentation. -// - -#ifdef _MSC_VER -#pragma once -#endif - -#ifdef USE_STL -#include -#else -#ifndef __clr_std_utility_h__ -#define __clr_std_utility_h__ - -#include "clr_std/type_traits" - -namespace std -{ - //----------------------------------------------------------------------------------------- - // TEMPLATE FUNCTION move - template inline - typename remove_reference::type&& - move(T&& arg) - { // forward _Arg as movable - return ((typename remove_reference::type&&)arg); - } - - //----------------------------------------------------------------------------------------- - // TEMPLATE FUNCTION swap (from ) - template inline - void swap(T& left, T& right) - { // exchange values stored at left and right - T tmp = std::move(left); - left = std::move(right); - right = std::move(tmp); - } - - //----------------------------------------------------------------------------------------- - // TEMPLATE FUNCTION forward - template inline - T&& - forward(typename identity::type& _Arg) - { // forward _Arg, given explicitly specified type parameter - return ((T&&)_Arg); - } -} - -namespace std -{ - //----------------------------------------------------------------------------------------- - // TEMPLATE STRUCT pair - template - struct pair - { // store a pair of values - typedef pair<_Ty1, _Ty2> _Myt; - typedef _Ty1 first_type; - typedef _Ty2 second_type; - - pair() - : first(_Ty1()), second(_Ty2()) - { // construct from defaults - } - - pair(const _Ty1& _Val1, const _Ty2& _Val2) - : first(_Val1.first), second(_Val2.second) - { // construct from specified values - } - - template - pair(pair<_Other1, _Other2>& _Right) - : first(_Right.first), second(_Right.second) - { // construct from compatible pair - } - - template - pair(const pair<_Other1, _Other2>& _Right) - : first(_Right.first), second(_Right.second) - { // construct from compatible pair - } - - void swap(_Myt& _Right) - { // exchange contents with _Right - if (this != &_Right) - { // different, worth swapping - swap(this->first, _Right.first); - swap(this->second, _Right.second); - } - } - - _Myt& operator=(const _Myt& _Right) - { // assign from copied pair - this->first = _Right.first; - this->second = _Right.second; - return (*this); - } - - typedef typename remove_reference<_Ty1>::type _Ty1x; - typedef typename remove_reference<_Ty2>::type _Ty2x; - - pair(_Ty1x&& _Val1, _Ty2x&& _Val2) - : first(std::move(_Val1)), - second(std::move(_Val2)) - { // construct from specified values - } - - pair(const _Ty1x& _Val1, _Ty2x&& _Val2) - : first(_Val1), - second(std::move(_Val2)) - { // construct from specified values - } - - pair(_Ty1x&& _Val1, const _Ty2x& _Val2) - : first(std::move(_Val1)), - second(_Val2) - { // construct from specified values - } - - template - pair(_Other1&& _Val1, _Other2&& _Val2) - : first(std::move(_Val1)), - second(std::move(_Val2)) - { // construct from moved values - } - - template - pair(pair<_Other1, _Other2>&& _Right) - : first(std::move(_Right.first)), - second(std::move(_Right.second)) - { // construct from moved compatible pair - } - - pair& operator=(pair<_Ty1, _Ty2>&& _Right) - { // assign from moved pair - this->first = std::move(_Right.first); - this->second = std::move(_Right.second); - return (*this); - } - - void swap(_Myt&& _Right) - { // exchange contents with _Right - if (this != &_Right) - { // different, worth swapping - this->first = std::move(_Right.first); - this->second = std::move(_Right.second); - } - } - - _Ty1 first; // the first stored value - _Ty2 second; // the second stored value - }; // struct pair - - //----------------------------------------------------------------------------------------- - // pair TEMPLATE FUNCTIONS - - template inline - void swap(pair<_Ty1, _Ty2>& _Left, pair<_Ty1, _Ty2>& _Right) - { // swap _Left and _Right pairs - _Left.swap(_Right); - } - - template inline - void swap(pair<_Ty1, _Ty2>& _Left, pair<_Ty1, _Ty2>&& _Right) - { // swap _Left and _Right pairs - typedef pair<_Ty1, _Ty2> _Myt; - _Left.swap(std::forward<_Myt>(_Right)); - } - - template inline - void swap( - pair<_Ty1, _Ty2>&& _Left, - pair<_Ty1, _Ty2>& _Right) - { // swap _Left and _Right pairs - typedef pair<_Ty1, _Ty2> _Myt; - _Right.swap(std::forward<_Myt>(_Left)); - } - - template inline - bool operator==( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test for pair equality - return (_Left.first == _Right.first && _Left.second == _Right.second); - } - - template inline - bool operator!=( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test for pair inequality - return (!(_Left == _Right)); - } - - template inline - bool operator<( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test if _Left < _Right for pairs - return (_Left.first < _Right.first || - (!(_Right.first < _Left.first) && _Left.second < _Right.second)); - } - - template inline - bool operator>( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test if _Left > _Right for pairs - return (_Right < _Left); - } - - template inline - bool operator<=( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test if _Left <= _Right for pairs - return (!(_Right < _Left)); - } - - template inline - bool operator>=( - const pair<_Ty1, _Ty2>& _Left, - const pair<_Ty1, _Ty2>& _Right) - { // test if _Left >= _Right for pairs - return (!(_Left < _Right)); - } - - template inline - _InIt begin( - const pair<_InIt, _InIt>& _Pair) - { // return first element of pair - return (_Pair.first); - } - - template inline - _InIt end( - const pair<_InIt, _InIt>& _Pair) - { // return second element of pair - return (_Pair.second); - } - -} // namespace std - -#endif /* __clr_std_utility_h__ */ - -#endif // !USE_STL - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/clr_std/vector b/src/coreclr/inc/clr_std/vector deleted file mode 100644 index c2d1caba890aa..0000000000000 --- a/src/coreclr/inc/clr_std/vector +++ /dev/null @@ -1,462 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clr_std/vector -// -// Copy of some key Standard Template Library functionality -// - -#ifdef _MSC_VER -#pragma once -#endif - -#ifdef USE_STL -#include -#else -#ifndef __clr_std_vector_h__ -#define __clr_std_vector_h__ - -// This is defined in the debugmacrosext.h header, but don't take a dependency on that. -#ifndef INDEBUG -#ifdef _DEBUG -#define INDEBUG(x) x -#else -#define INDEBUG(x) -#endif -#endif // !def INDEBUG - -namespace std -{ - template - class vector - { - public: - class const_iterator; - - class iterator - { - friend class std::vector::const_iterator; - public: - typedef T value_type; - typedef ptrdiff_t difference_type; - typedef T* pointer; - typedef T& reference; - - typedef class vector::iterator _MyIter; - - _MyIter &operator++() - { - m_ptr++; - return *this; - } - - _MyIter operator++(int) - { - // post-increment ++ - _MyIter myiter(m_ptr); - m_ptr++; - return myiter; - } - - _MyIter &operator--() - { - m_ptr--; - return *this; - } - - _MyIter operator--(int) - { - // post-decrement -- - _MyIter myiter(m_ptr); - m_ptr--; - return myiter; - } - - _MyIter operator- (ptrdiff_t n) - { - _MyIter myiter(m_ptr); - myiter.m_ptr -= n; - return myiter; - } - - ptrdiff_t operator- (_MyIter right) - { - _MyIter myiter(m_ptr); - return myiter.m_ptr - right.m_ptr; - } - - _MyIter operator+ (ptrdiff_t n) - { - _MyIter myiter(m_ptr); - myiter.m_ptr += n; - return myiter; - } - - T* operator->() const - { - return m_ptr; - } - - T & operator*() const - { - return *m_ptr; - } - - bool operator==(const _MyIter& _Right) const - { - bool equals = this->m_ptr == _Right.m_ptr; - return equals; - } - - bool operator!=(const _MyIter& _Right) const - { - bool equals = this->m_ptr == _Right.m_ptr; - return !equals; - } - - bool operator<(const _MyIter& _Right) const - { - return this->m_ptr < _Right.m_ptr; - } - - bool operator>(const _MyIter& _Right) const - { - return this->m_ptr > _Right.m_ptr; - } - public: - explicit iterator(T* ptr) - { - m_ptr = ptr; - } - - private: - T* m_ptr; - }; // class iterator - - class const_iterator - { - public: - typedef class vector::const_iterator _MyIter; - typedef class vector::iterator _MyNonConstIter; - - _MyIter &operator++() - { - m_ptr++; - return *this; - } - - _MyIter operator++(int) - { - // post-increment ++ - _MyIter myiter(m_ptr); - m_ptr++; - return myiter; - } - - const T* operator->() const - { - return m_ptr; - } - - const T & operator*() const - { - return *m_ptr; - } - - bool operator==(const _MyIter& _Right) const - { - bool equals = this->m_ptr == _Right.m_ptr; - return equals; - } - - bool operator!=(const _MyIter& _Right) const - { - bool equals = this->m_ptr == _Right.m_ptr; - return !equals; - } - - public: - explicit const_iterator(T* ptr) - { - m_ptr = ptr; - } - const_iterator(const _MyNonConstIter &nonConstIterator) - { - m_ptr = nonConstIterator.m_ptr; - } - - private: - T* m_ptr; - }; // class const iterator - - - public: - explicit vector(size_t n = 0) - { - m_size = 0; - m_capacity = 0; - m_pelements = NULL; - m_isBufferOwner = true; - resize(n); - } - - ~vector() - { - if (m_isBufferOwner) - { - erase(m_pelements, 0, m_size); - delete [] (BYTE*)m_pelements; // cast to BYTE* as we don't want this delete to invoke T's dtor - } - else - { - m_size = 0; - m_capacity = 0; - } - } - - vector(const vector&) = delete; - vector& operator=(const vector&) = delete; - - vector(vector&& v) noexcept - : m_size(v.m_size) - , m_capacity(v.m_capacity) - , m_pelements(v.m_pelements) - , m_isBufferOwner(v.m_isBufferOwner) - { - v.m_isBufferOwner = false; - } - - vector& operator=(vector&& v) noexcept - { - if (m_isBufferOwner) - { - erase(m_pelements, 0, m_size); - delete [] (BYTE*)m_pelements; - } - - m_size = v.m_size; - m_capacity = v.m_capacity; - m_pelements = v.m_pelements; - m_isBufferOwner = v.m_isBufferOwner; - v.m_isBufferOwner = false; - return *this; - } - - size_t size() const - { - return m_size; - } - - T & operator[](size_t iIndex) - { - assert(iIndex < m_size); - return m_pelements[iIndex]; - } - - T & operator[](size_t iIndex) const - { - assert(iIndex < m_size); - return m_pelements[iIndex]; - } - - void resize(size_t newsize) - { - assert(m_isBufferOwner); - size_t oldsize = this->size(); - resize_noinit(newsize); - if (newsize > oldsize) - { - fill_uninitialized_with_default_value(m_pelements, oldsize, newsize); - } - } - - void clear() - { - assert(m_isBufferOwner); - resize(0); - } - - void resize(size_t newsize, T c) - { - assert(m_isBufferOwner); - size_t oldsize = this->size(); - resize_noinit(newsize); - if (newsize > oldsize) - { - for (size_t i = oldsize; i < newsize; i++) - { - m_pelements[i] = c; - } - } - } - - void wrap(size_t numElements, T* pElements) - { - m_size = numElements; - m_pelements = pElements; - m_isBufferOwner = false; - } - - void resize_noinit(size_t newsize) - { - assert(m_isBufferOwner); - size_t oldsize = this->size(); - if (newsize < oldsize) - { - // Shrink - erase(m_pelements, newsize, oldsize); - } - else if (newsize > oldsize) - { - // Grow - reserve(newsize); - } - m_size = newsize; - } - - void push_back(const T & val) - { - assert(m_isBufferOwner); - if (m_size + 1 < m_size) - { - assert("push_back: overflow"); - // @todo: how to throw. - } - resize(m_size + 1, val); - } - - void reserve(size_t newcapacity) - { - assert(m_isBufferOwner); - if (newcapacity > m_capacity) - { - // To avoid resizing for every element that gets added to a vector, we - // allocate at least twice the old capacity, or 16 elements, whichever is greater. - newcapacity = max(newcapacity, max(m_capacity * 2, 16)); - - size_t bytesNeeded = newcapacity * sizeof(T); - if (bytesNeeded / sizeof(T) != newcapacity) - { - assert("resize: overflow"); - // @todo: how to throw something here? - } - - - T *pelements = (T*)(new BYTE[bytesNeeded]); // Allocate as BYTE array to avoid automatic construction - INDEBUG(memset(pelements, 0xcc, bytesNeeded)); - for (size_t i = 0; i < m_size; i++) - { - pelements[i] = m_pelements[i]; - } - - erase(m_pelements, 0, m_size); - delete [] (BYTE*)m_pelements; // cast to BYTE* as we don't want this delete to invoke T's dtor - - m_pelements = pelements; - m_capacity = newcapacity; - } - } - - iterator begin() - { - return iterator(m_pelements); - } - - iterator end() - { - return iterator(m_pelements + m_size); - } - - const_iterator cbegin() const - { - return const_iterator(m_pelements); - } - - const_iterator cend() const - { - return const_iterator(m_pelements + m_size); - } - - iterator erase(iterator position) - { - assert(m_isBufferOwner); - assert((position > begin() || position == begin()) && position < end()); - ptrdiff_t index = position - begin(); - erase(m_pelements, index, index + 1); - memcpy(&m_pelements[index], &m_pelements[index + 1], sizeof(T) * (m_size - index - 1)); - --m_size; - return iterator(m_pelements + (position - begin())); - } - - iterator erase(iterator position, iterator positionEnd) - { - assert(m_isBufferOwner); - assert((position > begin() || position == begin()) && position < end()); - ptrdiff_t index = position - begin(); - ptrdiff_t elements = positionEnd - position; - erase(m_pelements, index, index + elements); - memcpy(&m_pelements[index], &m_pelements[index + elements], sizeof(T) * (m_size - index - elements)); - m_size -= elements; - return iterator(m_pelements + (position - begin())); - } - - T* data() - { - return m_pelements; - } - - const T* data() const - { - return m_pelements; - } - - private: - // Transition a subset of the array from uninitialized to initialized with default value for T. - static void fill_uninitialized_with_default_value(T* pelements, size_t startIdx, size_t endIdx) - { - assert(startIdx <= endIdx); - assert(pelements != NULL || startIdx == endIdx); - for (size_t i = startIdx; i < endIdx; i++) - { - INDEBUG(assert(0xcc == *((BYTE*)&pelements[i]))); - pelements[i] = T(); - } - } - - // Transition a subset of the array from a valid value of T to uninitialized. - static void erase(T* pelements, size_t startIdx, size_t endIdx) - { - assert(startIdx <= endIdx); - assert(pelements != NULL || startIdx == endIdx); - for (size_t i = startIdx; i < endIdx; i++) - { - pelements[i].~T(); - } - - INDEBUG(memset(&pelements[startIdx], 0xcc, (endIdx - startIdx) * sizeof(T))); - } - - private: - size_t m_size; //# of elements - size_t m_capacity; //# of elements allocated - T *m_pelements; //actual array - // invariants: - // dimensions == m_capacity - // elements 0 thru m_size-1 always contain constructed T values. - // elements from m_size thru m_capacity - 1 contain memory garbage (0xcc in DEBUG). - bool m_isBufferOwner; // indicate if this vector creates its own buffer, or wraps an existing buffer. - - - - - }; // class vector - -}; // namespace std - -#endif /* __clr_std_vector_h__ */ - -#endif // !USE_STL - -// Help the VIM editor figure out what kind of file this no-extension file is. -// vim: filetype=cpp diff --git a/src/coreclr/inc/crtwrap.h b/src/coreclr/inc/crtwrap.h index d3ab3a28be7c6..5c8b2e6e78bf7 100644 --- a/src/coreclr/inc/crtwrap.h +++ b/src/coreclr/inc/crtwrap.h @@ -11,6 +11,7 @@ #define __CrtWrap_h__ #include +#include #include #include #include "debugmacros.h" diff --git a/src/coreclr/inc/daccess.h b/src/coreclr/inc/daccess.h index 699947a02cdd4..d449901cd36fc 100644 --- a/src/coreclr/inc/daccess.h +++ b/src/coreclr/inc/daccess.h @@ -568,10 +568,8 @@ // Keep in sync with the definitions in dbgutil.cpp and createdump.h #define DACCESS_TABLE_SYMBOL "g_dacTable" -#ifdef PAL_STDCPP_COMPAT #include -#else -#include "clr_std/type_traits" +#ifndef PAL_STDCPP_COMPAT #include "crosscomp.h" #endif diff --git a/src/coreclr/inc/gcinfotypes.h b/src/coreclr/inc/gcinfotypes.h index 7457063d47eb3..9270d4441d7e7 100644 --- a/src/coreclr/inc/gcinfotypes.h +++ b/src/coreclr/inc/gcinfotypes.h @@ -608,7 +608,7 @@ void FASTCALL decodeCallPattern(int pattern, #if defined(TARGET_AMD64) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -664,7 +664,7 @@ void FASTCALL decodeCallPattern(int pattern, #elif defined(TARGET_ARM) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 4u // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -722,7 +722,7 @@ void FASTCALL decodeCallPattern(int pattern, #elif defined(TARGET_ARM64) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -777,7 +777,7 @@ void FASTCALL decodeCallPattern(int pattern, #elif defined(TARGET_LOONGARCH64) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -832,7 +832,7 @@ void FASTCALL decodeCallPattern(int pattern, #elif defined(TARGET_RISCV64) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -896,7 +896,7 @@ PORTABILITY_WARNING("Please specialize these definitions for your platform!") #endif #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 4u // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) diff --git a/src/coreclr/inc/holder.h b/src/coreclr/inc/holder.h index 16551b141ca1a..984260308d04a 100644 --- a/src/coreclr/inc/holder.h +++ b/src/coreclr/inc/holder.h @@ -11,13 +11,8 @@ #include "volatile.h" #include "palclr.h" -#ifdef PAL_STDCPP_COMPAT #include #include -#else -#include "clr_std/utility" -#include "clr_std/type_traits" -#endif #if defined(FEATURE_COMINTEROP) && !defined(STRIKE) #include diff --git a/src/coreclr/inc/safemath.h b/src/coreclr/inc/safemath.h index fcd51af3de8cb..336d4b0b464e3 100644 --- a/src/coreclr/inc/safemath.h +++ b/src/coreclr/inc/safemath.h @@ -31,11 +31,7 @@ #include "static_assert.h" -#ifdef PAL_STDCPP_COMPAT #include -#else -#include "clr_std/type_traits" -#endif //================================================================== // Semantics: if val can be represented as the exact same value diff --git a/src/coreclr/inc/utilcode.h b/src/coreclr/inc/utilcode.h index fe5db13f6b971..2e6fd534cf951 100644 --- a/src/coreclr/inc/utilcode.h +++ b/src/coreclr/inc/utilcode.h @@ -29,11 +29,7 @@ #include "safemath.h" #include "new.hpp" -#ifdef PAL_STDCPP_COMPAT #include -#else -#include "clr_std/type_traits" -#endif #include "contract.h" @@ -302,28 +298,6 @@ inline WCHAR* FormatInteger(WCHAR* str, size_t strCount, const char* fmt, I v) return str; } -//***************************************************************************** -// Placement new is used to new and object at an exact location. The pointer -// is simply returned to the caller without actually using the heap. The -// advantage here is that you cause the ctor() code for the object to be run. -// This is ideal for heaps of C++ objects that need to get init'd multiple times. -// Example: -// void *pMem = GetMemFromSomePlace(); -// Foo *p = new (pMem) Foo; -// DoSomething(p); -// p->~Foo(); -//***************************************************************************** -#ifndef __PLACEMENT_NEW_INLINE -#define __PLACEMENT_NEW_INLINE -inline void *__cdecl operator new(size_t, void *_P) -{ - LIMITED_METHOD_DAC_CONTRACT; - - return (_P); -} -#endif // __PLACEMENT_NEW_INLINE - - /********************************************************************************/ /* portability helpers */ @@ -3934,37 +3908,6 @@ inline T* InterlockedCompareExchangeT( return InterlockedCompareExchangeT(destination, exchange, static_cast(comparand)); } -// NULL pointer variants of the above to avoid having to cast NULL -// to the appropriate pointer type. -template -inline T* InterlockedExchangeT( - T* volatile * target, - int value) // When NULL is provided as argument. -{ - //STATIC_ASSERT(value == 0); - return InterlockedExchangeT(target, nullptr); -} - -template -inline T* InterlockedCompareExchangeT( - T* volatile * destination, - int exchange, // When NULL is provided as argument. - T* comparand) -{ - //STATIC_ASSERT(exchange == 0); - return InterlockedCompareExchangeT(destination, nullptr, comparand); -} - -template -inline T* InterlockedCompareExchangeT( - T* volatile * destination, - T* exchange, - int comparand) // When NULL is provided as argument. -{ - //STATIC_ASSERT(comparand == 0); - return InterlockedCompareExchangeT(destination, exchange, nullptr); -} - #undef InterlockedExchangePointer #define InterlockedExchangePointer Use_InterlockedExchangeT #undef InterlockedCompareExchangePointer diff --git a/src/coreclr/jit/inline.h b/src/coreclr/jit/inline.h index dca92e39241e4..8dff40c173211 100644 --- a/src/coreclr/jit/inline.h +++ b/src/coreclr/jit/inline.h @@ -1048,7 +1048,7 @@ class InlineStrategy enum { ALWAYS_INLINE_SIZE = 16, - IMPLEMENTATION_MAX_INLINE_SIZE = _UI16_MAX, + IMPLEMENTATION_MAX_INLINE_SIZE = UINT16_MAX, IMPLEMENTATION_MAX_INLINE_DEPTH = 1000 }; diff --git a/src/coreclr/jit/jitstd/list.h b/src/coreclr/jit/jitstd/list.h index f00c159645255..77b5f893bea10 100644 --- a/src/coreclr/jit/jitstd/list.h +++ b/src/coreclr/jit/jitstd/list.h @@ -14,7 +14,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "iterator.h" #include "functional.h" -#include "clr_std/utility" +#include namespace jitstd { diff --git a/src/coreclr/jit/jitstd/utility.h b/src/coreclr/jit/jitstd/utility.h index 624bb7bc7c39a..0df302a9352a5 100644 --- a/src/coreclr/jit/jitstd/utility.h +++ b/src/coreclr/jit/jitstd/utility.h @@ -5,7 +5,7 @@ #pragma once -#include "clr_std/type_traits" +#include namespace jitstd { diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h index 4abe71984b57c..a6e8606b9500d 100644 --- a/src/coreclr/jit/targetamd64.h +++ b/src/coreclr/jit/targetamd64.h @@ -66,7 +66,7 @@ #define NOGC_WRITE_BARRIERS 0 // We DO-NOT have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 - #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #ifdef UNIX_AMD64_ABI diff --git a/src/coreclr/jit/targetarm.h b/src/coreclr/jit/targetarm.h index ac9d72cab31f6..b07523db62d36 100644 --- a/src/coreclr/jit/targetarm.h +++ b/src/coreclr/jit/targetarm.h @@ -38,7 +38,7 @@ #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. - #define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 4u // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods diff --git a/src/coreclr/jit/targetarm64.h b/src/coreclr/jit/targetarm64.h index 3646ecb4407bf..7ed7e66421be7 100644 --- a/src/coreclr/jit/targetarm64.h +++ b/src/coreclr/jit/targetarm64.h @@ -40,7 +40,7 @@ #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. - #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods diff --git a/src/coreclr/jit/targetloongarch64.h b/src/coreclr/jit/targetloongarch64.h index 736fd1406c304..fa2a7fc93dae5 100644 --- a/src/coreclr/jit/targetloongarch64.h +++ b/src/coreclr/jit/targetloongarch64.h @@ -45,7 +45,7 @@ #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. - #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_FUNCLETS 1 #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. diff --git a/src/coreclr/jit/targetriscv64.h b/src/coreclr/jit/targetriscv64.h index 9cf0185a56935..f3d4af7b13bb6 100644 --- a/src/coreclr/jit/targetriscv64.h +++ b/src/coreclr/jit/targetriscv64.h @@ -42,7 +42,7 @@ #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. - #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods diff --git a/src/coreclr/jit/targetx86.h b/src/coreclr/jit/targetx86.h index 60b2f7793f435..b7add06df05bc 100644 --- a/src/coreclr/jit/targetx86.h +++ b/src/coreclr/jit/targetx86.h @@ -49,7 +49,7 @@ #endif #define USER_ARGS_COME_LAST 0 #define EMIT_TRACK_STACK_DEPTH 1 - #define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this + #define TARGET_POINTER_SIZE 4u // equal to sizeof(void*) and the managed pointer size in bytes for this // target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, // filter-handler, fault) and directly execute 'finally' clauses. diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h index 5378a08422365..ffbbae09bece5 100644 --- a/src/coreclr/jit/utils.h +++ b/src/coreclr/jit/utils.h @@ -16,7 +16,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #define _UTILS_H_ #include "safemath.h" -#include "clr_std/type_traits" +#include #include "iallocator.h" #include "hostallocator.h" #include "cycletimer.h" @@ -160,6 +160,18 @@ int signum(T val) } } +template +constexpr auto max(T&& t, U&& u) -> decltype(t > u ? t : u) +{ + return t > u ? t : u; +} + +template +constexpr auto min(T&& t, U&& u) -> decltype(t < u ? t : u) +{ + return t < u ? t : u; +} + #if defined(DEBUG) // ConfigMethodRange describes a set of methods, specified via their diff --git a/src/coreclr/md/ceefilegen/blobfetcher.cpp b/src/coreclr/md/ceefilegen/blobfetcher.cpp index 7a110eeeeaf59..f08908147de75 100644 --- a/src/coreclr/md/ceefilegen/blobfetcher.cpp +++ b/src/coreclr/md/ceefilegen/blobfetcher.cpp @@ -211,7 +211,7 @@ char* CBlobFetcher::MakeNewBlock(unsigned len, unsigned align) { pChRet = m_pIndex[m_nIndexUsed].MakeNewBlock(len + pad, 0); // Did we run out of memory? - if (pChRet == NULL && m_pIndex[m_nIndexUsed].GetDataLen() == NULL) + if (pChRet == NULL && m_pIndex[m_nIndexUsed].GetDataLen() == 0) return NULL; if (pChRet == NULL) { diff --git a/src/coreclr/md/ceefilegen/stdafx.h b/src/coreclr/md/ceefilegen/stdafx.h index 36f42f95aa529..4026a47f14107 100644 --- a/src/coreclr/md/ceefilegen/stdafx.h +++ b/src/coreclr/md/ceefilegen/stdafx.h @@ -17,6 +17,7 @@ #include // for qsort #include #include +#include #include #include @@ -27,3 +28,6 @@ #include "ceegen.h" #include "ceesectionstring.h" + +using std::min; +using std::max; diff --git a/src/coreclr/md/compiler/stdafx.h b/src/coreclr/md/compiler/stdafx.h index 56e29559cafe0..b8ae250e008cd 100644 --- a/src/coreclr/md/compiler/stdafx.h +++ b/src/coreclr/md/compiler/stdafx.h @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -25,4 +26,7 @@ #include "utsem.h" +using std::min; +using std::max; + #endif // __STDAFX_H_ diff --git a/src/coreclr/md/enc/stdafx.h b/src/coreclr/md/enc/stdafx.h index e1b3962a14e60..10d1cf0f32d69 100644 --- a/src/coreclr/md/enc/stdafx.h +++ b/src/coreclr/md/enc/stdafx.h @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -26,4 +27,7 @@ #include "utsem.h" +using std::min; +using std::max; + #endif // __STDAFX_H__ diff --git a/src/coreclr/md/runtime/stdafx.h b/src/coreclr/md/runtime/stdafx.h index aca84b431773e..957cbd7e006d0 100644 --- a/src/coreclr/md/runtime/stdafx.h +++ b/src/coreclr/md/runtime/stdafx.h @@ -13,6 +13,7 @@ #include #include +#include #include #include diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 16fdd6cf02617..bd83ffc0eea3d 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -33,7 +33,6 @@ Module Name: #ifndef __PAL_H__ #define __PAL_H__ -#ifdef PAL_STDCPP_COMPAT #include #include #include @@ -42,12 +41,17 @@ Module Name: #include #include #include +#include #include #include #include #include #include #include +#include + +#ifdef __cplusplus +#include #endif #ifdef __cplusplus @@ -183,78 +187,6 @@ extern bool g_arm64_atomics_present; #endif // __has_cpp_attribute(fallthrough) #endif // FALLTHROUGH -#ifndef PAL_STDCPP_COMPAT - -#if __GNUC__ - -typedef __builtin_va_list va_list; - -/* We should consider if the va_arg definition here is actually necessary. - Could we use the standard va_arg definition? */ - -#define va_start __builtin_va_start -#define va_arg __builtin_va_arg - -#define va_copy __builtin_va_copy -#define va_end __builtin_va_end - -#define VOID void - -#else // __GNUC__ - -typedef char * va_list; - -#define _INTSIZEOF(n) ( (sizeof(n) + sizeof(int) - 1) & ~(sizeof(int) - 1) ) - -#if _MSC_VER >= 1400 - -#ifdef __cplusplus -#define _ADDRESSOF(v) ( &reinterpret_cast(v) ) -#else -#define _ADDRESSOF(v) ( &(v) ) -#endif - -#define _crt_va_start(ap,v) ( ap = (va_list)_ADDRESSOF(v) + _INTSIZEOF(v) ) -#define _crt_va_arg(ap,t) ( *(t *)((ap += _INTSIZEOF(t)) - _INTSIZEOF(t)) ) -#define _crt_va_end(ap) ( ap = (va_list)0 ) - -#define va_start _crt_va_start -#define va_arg _crt_va_arg -#define va_end _crt_va_end - -#else // _MSC_VER - -#define va_start(ap,v) (ap = (va_list) (&(v)) + _INTSIZEOF(v)) -#define va_arg(ap,t) ( *(t *)((ap += _INTSIZEOF(t)) - _INTSIZEOF(t)) ) -#define va_end(ap) - -#endif // _MSC_VER - -#define va_copy(dest,src) (dest = src) - -#endif // __GNUC__ - -#define CHAR_BIT 8 - -#define SCHAR_MIN (-128) -#define SCHAR_MAX 127 -#define UCHAR_MAX 0xff - -#define SHRT_MIN (-32768) -#define SHRT_MAX 32767 -#define USHRT_MAX 0xffff - -#define INT_MIN (-2147483647 - 1) -#define INT_MAX 2147483647 -#define UINT_MAX 0xffffffff - -// LONG_MIN, LONG_MAX, ULONG_MAX -- use INT32_MIN etc. instead. - -#define FLT_MAX 3.402823466e+38F -#define DBL_MAX 1.7976931348623157e+308 - -#endif // !PAL_STDCPP_COMPAT - /******************* PAL-Specific Entrypoints *****************************/ #define IsDebuggerPresent PAL_IsDebuggerPresent @@ -264,44 +196,6 @@ BOOL PALAPI PAL_IsDebuggerPresent(); -/* minimum signed 64 bit value */ -#define _I64_MIN (I64(-9223372036854775807) - 1) -/* maximum signed 64 bit value */ -#define _I64_MAX I64(9223372036854775807) -/* maximum unsigned 64 bit value */ -#define _UI64_MAX UI64(0xffffffffffffffff) - -#define _I8_MAX SCHAR_MAX -#define _I8_MIN SCHAR_MIN -#define _I16_MAX SHRT_MAX -#define _I16_MIN SHRT_MIN -#define _I32_MAX INT_MAX -#define _I32_MIN INT_MIN -#define _UI8_MAX UCHAR_MAX -#define _UI8_MIN UCHAR_MIN -#define _UI16_MAX USHRT_MAX -#define _UI16_MIN USHRT_MIN -#define _UI32_MAX UINT_MAX -#define _UI32_MIN UINT_MIN - -#undef NULL - -#if defined(__cplusplus) -#define NULL 0 -#else -#define NULL ((PVOID)0) -#endif - -#if defined(PAL_STDCPP_COMPAT) && !defined(__cplusplus) -#define nullptr NULL -#endif // defined(PAL_STDCPP_COMPAT) && !defined(__cplusplus) - -#ifndef PAL_STDCPP_COMPAT - -typedef __int64 time_t; -#define _TIME_T_DEFINED -#endif // !PAL_STDCPP_COMPAT - #define DLL_PROCESS_ATTACH 1 #define DLL_THREAD_ATTACH 2 #define DLL_THREAD_DETACH 3 @@ -3976,16 +3870,6 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); #endif //FEATURE_PAL_ANSI /******************* C Runtime Entrypoints *******************************/ -/* Some C runtime functions needs to be reimplemented by the PAL. - To avoid name collisions, those functions have been renamed using - defines */ -#ifndef PAL_STDCPP_COMPAT -// Forward declare functions that are in header files we can't include yet -int printf(const char *, ...); -int vprintf(const char *, va_list); - -#endif // !PAL_STDCPP_COMPAT - #ifndef _CONST_RETURN #ifdef __cplusplus #define _CONST_RETURN const @@ -3998,65 +3882,6 @@ int vprintf(const char *, va_list); /* For backwards compatibility */ #define _WConst_return _CONST_RETURN -#define EOF (-1) - -typedef int errno_t; - -#if defined(__WINT_TYPE__) -typedef __WINT_TYPE__ wint_t; -#else -typedef unsigned int wint_t; -#endif - -#ifndef PAL_STDCPP_COMPAT -PALIMPORT void * __cdecl memcpy(void *, const void *, size_t); -PALIMPORT int __cdecl memcmp(const void *, const void *, size_t); -PALIMPORT void * __cdecl memset(void *, int, size_t); -PALIMPORT void * __cdecl memmove(void *, const void *, size_t); -PALIMPORT void * __cdecl memchr(const void *, int, size_t); -PALIMPORT long long int __cdecl atoll(const char *) MATH_THROW_DECL; -PALIMPORT size_t __cdecl strlen(const char *); -PALIMPORT int __cdecl strcmp(const char*, const char *); -PALIMPORT int __cdecl strncmp(const char*, const char *, size_t); -PALIMPORT int __cdecl strncasecmp(const char *, const char *, size_t); -PALIMPORT char * __cdecl strcat(char *, const char *); -PALIMPORT char * __cdecl strncat(char *, const char *, size_t); -PALIMPORT char * __cdecl strcpy(char *, const char *); -PALIMPORT char * __cdecl strncpy(char *, const char *, size_t); -PALIMPORT char * __cdecl strchr(const char *, int); -PALIMPORT char * __cdecl strrchr(const char *, int); -PALIMPORT char * __cdecl strpbrk(const char *, const char *); -PALIMPORT char * __cdecl strstr(const char *, const char *); -PALIMPORT char * __cdecl strtok_r(char *, const char *, char **); -PALIMPORT char * __cdecl strdup(const char*); -PALIMPORT int __cdecl atoi(const char *); -PALIMPORT unsigned long __cdecl strtoul(const char *, char **, int); -PALIMPORT ULONGLONG __cdecl strtoull(const char *, char **, int); -PALIMPORT double __cdecl atof(const char *); -PALIMPORT double __cdecl strtod(const char *, char **); -PALIMPORT size_t strnlen(const char *, size_t); -PALIMPORT int __cdecl isprint(int); -PALIMPORT int __cdecl isspace(int); -PALIMPORT int __cdecl isalpha(int); -PALIMPORT int __cdecl isalnum(int); -PALIMPORT int __cdecl isdigit(int); -PALIMPORT int __cdecl isxdigit(int); -PALIMPORT int __cdecl tolower(int); -PALIMPORT int __cdecl toupper(int); -PALIMPORT int __cdecl iswalpha(wint_t); -PALIMPORT int __cdecl iswdigit(wint_t); -PALIMPORT int __cdecl iswupper(wint_t); -PALIMPORT int __cdecl iswprint(wint_t); -PALIMPORT int __cdecl iswspace(wint_t); -PALIMPORT int __cdecl iswxdigit(wint_t); -PALIMPORT wint_t __cdecl towupper(wint_t); -PALIMPORT wint_t __cdecl towlower(wint_t); -PALIMPORT int remove(const char*); - -#define SEEK_SET 0 -#define SEEK_CUR 1 -#define SEEK_END 2 - /* Locale categories */ #define LC_ALL 0 #define LC_COLLATE 1 @@ -4065,62 +3890,14 @@ PALIMPORT int remove(const char*); #define LC_NUMERIC 4 #define LC_TIME 5 -#define _IOFBF 0 /* setvbuf should set fully buffered */ -#define _IOLBF 1 /* setvbuf should set line buffered */ -#define _IONBF 2 /* setvbuf should set unbuffered */ - -struct _FILE; - -#ifdef DEFINE_DUMMY_FILE_TYPE -#define FILE _PAL_FILE -struct _PAL_FILE; -#else -typedef _FILE FILE; -#endif // DEFINE_DUMMY_FILE_TYPE - -PALIMPORT int __cdecl fclose(FILE *); -PALIMPORT int __cdecl fflush(FILE *); -PALIMPORT size_t __cdecl fwrite(const void *, size_t, size_t, FILE *); -PALIMPORT size_t __cdecl fread(void *, size_t, size_t, FILE *); -PALIMPORT char * __cdecl fgets(char *, int, FILE *); -PALIMPORT int __cdecl fputs(const char *, FILE *); -PALIMPORT int __cdecl fprintf(FILE *, const char *, ...); -PALIMPORT int __cdecl vfprintf(FILE *, const char *, va_list); -PALIMPORT int __cdecl fseek(FILE *, LONG, int); -PALIMPORT LONG __cdecl ftell(FILE *); -PALIMPORT int __cdecl ferror(FILE *); -PALIMPORT FILE * __cdecl fopen(const char *, const char *); -PALIMPORT int __cdecl setvbuf(FILE *stream, char *, int, size_t); - -// We need a PAL shim for errno and the standard streams as it's not possible to replicate these definition from the standard library -// in all cases. Instead, we shim it and implement the PAL function where we can include the standard headers. -// When we allow people to include the standard headers, then we can remove this. - -PALIMPORT DLLEXPORT int * __cdecl PAL_errno(); -#define errno (*PAL_errno()) - -// Only provide a prototype for the PAL forwarders for the standard streams if we are not including the standard headers. -#ifndef DEFINE_DUMMY_FILE_TYPE - -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stdout(); -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stdin(); -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stderr(); -#define stdout PAL_stdout() -#define stdin PAL_stdin() -#define stderr PAL_stderr() - -#endif - -#ifdef DEFINE_DUMMY_FILE_TYPE -#undef FILE -#endif -#endif // PAL_STDCPP_COMPAT - /* _TRUNCATE */ #if !defined(_TRUNCATE) #define _TRUNCATE ((size_t)-1) #endif +// errno_t is only defined when the Secure CRT Extensions library is available (which no standard library that we build with implements anyway) +typedef error_t errno_t; + PALIMPORT DLLEXPORT errno_t __cdecl memcpy_s(void *, size_t, const void *, size_t) THROW_DECL; PALIMPORT errno_t __cdecl memmove_s(void *, size_t, const void *, size_t); PALIMPORT DLLEXPORT int __cdecl strcasecmp(const char *, const char *); @@ -4152,10 +3929,7 @@ PALIMPORT DLLEXPORT double __cdecl PAL_wcstod(const WCHAR *, WCHAR **); PALIMPORT errno_t __cdecl _wcslwr_s(WCHAR *, size_t sz); PALIMPORT DLLEXPORT errno_t __cdecl _i64tow_s(long long, WCHAR *, size_t, int); PALIMPORT int __cdecl _wtoi(const WCHAR *); - -#ifndef DEFINE_DUMMY_FILE_TYPE PALIMPORT FILE * __cdecl _wfopen(const WCHAR *, const WCHAR *); -#endif inline int _stricmp(const char* a, const char* b) { @@ -4232,131 +4006,6 @@ unsigned int __cdecl _rotr(unsigned int value, int shift) #endif // !__has_builtin(_rotr) -PALIMPORT int __cdecl abs(int); -// clang complains if this is declared with __int64 -PALIMPORT long long __cdecl llabs(long long); -#ifndef PAL_STDCPP_COMPAT - -PALIMPORT double __cdecl copysign(double, double); -PALIMPORT double __cdecl acos(double); -PALIMPORT double __cdecl acosh(double) MATH_THROW_DECL; -PALIMPORT double __cdecl asin(double); -PALIMPORT double __cdecl asinh(double) MATH_THROW_DECL; -PALIMPORT double __cdecl atan(double) MATH_THROW_DECL; -PALIMPORT double __cdecl atanh(double) MATH_THROW_DECL; -PALIMPORT double __cdecl atan2(double, double); -PALIMPORT double __cdecl cbrt(double) MATH_THROW_DECL; -PALIMPORT double __cdecl ceil(double); -PALIMPORT double __cdecl cos(double); -PALIMPORT double __cdecl cosh(double); -PALIMPORT double __cdecl exp(double); -PALIMPORT double __cdecl fabs(double); -PALIMPORT double __cdecl floor(double); -PALIMPORT double __cdecl fmod(double, double); -PALIMPORT double __cdecl fma(double, double, double) MATH_THROW_DECL; -PALIMPORT int __cdecl ilogb(double); -PALIMPORT double __cdecl log(double); -PALIMPORT double __cdecl log2(double) MATH_THROW_DECL; -PALIMPORT double __cdecl log10(double); -PALIMPORT double __cdecl modf(double, double*); -PALIMPORT double __cdecl pow(double, double); -PALIMPORT double __cdecl sin(double); -PALIMPORT void __cdecl sincos(double, double*, double*); -#ifdef __APPLE__ -PALIMPORT void __cdecl __sincos(double, double*, double*); -#endif -PALIMPORT double __cdecl sinh(double); -PALIMPORT double __cdecl sqrt(double); -PALIMPORT double __cdecl tan(double); -PALIMPORT double __cdecl tanh(double); -PALIMPORT double __cdecl trunc(double); - -PALIMPORT float __cdecl copysignf(float, float); -PALIMPORT float __cdecl acosf(float); -PALIMPORT float __cdecl acoshf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl asinf(float); -PALIMPORT float __cdecl asinhf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl atanf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl atanhf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl atan2f(float, float); -PALIMPORT float __cdecl cbrtf(float) MATH_THROW_DECL; -PALIMPORT float __cdecl ceilf(float); -PALIMPORT float __cdecl cosf(float); -PALIMPORT float __cdecl coshf(float); -PALIMPORT float __cdecl expf(float); -PALIMPORT float __cdecl fabsf(float); -PALIMPORT float __cdecl floorf(float); -PALIMPORT float __cdecl fmodf(float, float); -PALIMPORT float __cdecl fmaf(float, float, float) MATH_THROW_DECL; -PALIMPORT int __cdecl ilogbf(float); -PALIMPORT float __cdecl logf(float); -PALIMPORT float __cdecl log2f(float) MATH_THROW_DECL; -PALIMPORT float __cdecl log10f(float); -PALIMPORT float __cdecl modff(float, float*); -PALIMPORT float __cdecl powf(float, float); -PALIMPORT float __cdecl sinf(float); -PALIMPORT void __cdecl sincosf(float, float*, float*); -#ifdef __APPLE__ -PALIMPORT void __cdecl __sincosf(float, float*, float*); -#endif -PALIMPORT float __cdecl sinhf(float); -PALIMPORT float __cdecl sqrtf(float); -PALIMPORT float __cdecl tanf(float); -PALIMPORT float __cdecl tanhf(float); -PALIMPORT float __cdecl truncf(float); -#endif // !PAL_STDCPP_COMPAT - -#ifndef PAL_STDCPP_COMPAT - -#ifdef __cplusplus -extern "C++" { - -inline __int64 abs(__int64 _X) { - return llabs(_X); -} - -#ifdef __APPLE__ -inline __int64 abs(SSIZE_T _X) { - return llabs((__int64)_X); -} -#endif - -} -#endif - -PALIMPORT DLLEXPORT void * __cdecl malloc(size_t); -PALIMPORT DLLEXPORT void __cdecl free(void *); -PALIMPORT DLLEXPORT void * __cdecl realloc(void *, size_t); - -#if defined(_MSC_VER) -#define alloca _alloca -#else -#define _alloca alloca -#endif //_MSC_VER - -#define alloca __builtin_alloca - -#define max(a, b) (((a) > (b)) ? (a) : (b)) -#define min(a, b) (((a) < (b)) ? (a) : (b)) - -#endif // !PAL_STDCPP_COMPAT - -PALIMPORT PAL_NORETURN void __cdecl exit(int); - -#ifndef PAL_STDCPP_COMPAT - -PALIMPORT DLLEXPORT void __cdecl qsort(void *, size_t, size_t, int(__cdecl *)(const void *, const void *)); -PALIMPORT DLLEXPORT void * __cdecl bsearch(const void *, const void *, size_t, size_t, - int(__cdecl *)(const void *, const void *)); - -PALIMPORT time_t __cdecl time(time_t *); -PALIMPORT DLLEXPORT char * __cdecl getenv(const char *); - -#endif // !PAL_STDCPP_COMPAT - -PALIMPORT int __cdecl rand(void); -PALIMPORT void __cdecl srand(unsigned int); - PALIMPORT DLLEXPORT char * __cdecl PAL_getenv(const char *); PALIMPORT DLLEXPORT int __cdecl _putenv(const char *); diff --git a/src/coreclr/pal/inc/pal_mstypes.h b/src/coreclr/pal/inc/pal_mstypes.h index 1eee6b2bbbd24..457d6e2f59468 100644 --- a/src/coreclr/pal/inc/pal_mstypes.h +++ b/src/coreclr/pal/inc/pal_mstypes.h @@ -101,7 +101,9 @@ extern "C" { #else #define PALIMPORT +#ifndef DLLEXPORT #define DLLEXPORT __attribute__((visibility("default"))) +#endif #define PAL_NORETURN __attribute__((noreturn)) #endif @@ -207,20 +209,6 @@ extern "C" { #endif // _MSC_VER #ifndef PAL_STDCPP_COMPAT -// Defined in gnu's types.h. For non PAL_IMPLEMENTATION system -// includes are not included, so we need to define them. -#ifndef PAL_IMPLEMENTATION - -typedef __int64 int64_t; -typedef unsigned __int64 uint64_t; -typedef __int32 int32_t; -typedef unsigned __int32 uint32_t; -typedef __int16 int16_t; -typedef unsigned __int16 uint16_t; -typedef __int8 int8_t; -typedef unsigned __int8 uint8_t; - -#endif // PAL_IMPLEMENTATION #ifndef _MSC_VER diff --git a/src/coreclr/pal/inc/rt/cpp/assert.h b/src/coreclr/pal/inc/rt/cpp/assert.h deleted file mode 100644 index 7493b151d6a0f..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/assert.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: assert.h -// -// =========================================================================== -// dummy assert.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/cstdlib b/src/coreclr/pal/inc/rt/cpp/cstdlib deleted file mode 100644 index 1cfd40828a47c..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/cstdlib +++ /dev/null @@ -1,13 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// clrosdev -// -// =========================================================================== -// File: cstdlib -// -// =========================================================================== -// dummy cstdlib for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/ctype.h b/src/coreclr/pal/inc/rt/cpp/ctype.h deleted file mode 100644 index cb41fcd88e6e0..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/ctype.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: ctype.h -// -// =========================================================================== -// dummy ctype.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/emmintrin.h b/src/coreclr/pal/inc/rt/cpp/emmintrin.h deleted file mode 100644 index f2e8e0c1fd662..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/emmintrin.h +++ /dev/null @@ -1,128 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// From llvm-3.9/clang-3.9.1 emmintrin.h: - -/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------=== - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - *===-----------------------------------------------------------------------=== - */ - -#include "palrt.h" -#ifdef __GNUC__ -#ifndef __EMMINTRIN_H -#define __IMMINTRIN_H - -typedef long long __m128i __attribute__((__vector_size__(16))); - -typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16))); -typedef short __v8hi __attribute__((__vector_size__(16))); -typedef char __v16qi __attribute__((__vector_size__(16))); - - -/* Define the default attribute for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, NODEBUG_ATTRIBUTE)) - -/// \brief Performs a bitwise OR of two 128-bit integer vectors. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VPOR / POR instruction. -/// -/// \param __a -/// A 128-bit integer vector containing one of the source operands. -/// \param __b -/// A 128-bit integer vector containing one of the source operands. -/// \returns A 128-bit integer vector containing the bitwise OR of the values -/// in both operands. -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_or_si128(__m128i __a, __m128i __b) -{ - return (__m128i)((__v2du)__a | (__v2du)__b); -} - -/// \brief Compares each of the corresponding 16-bit values of the 128-bit -/// integer vectors for equality. Each comparison yields 0h for false, FFFFh -/// for true. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VPCMPEQW / PCMPEQW instruction. -/// -/// \param __a -/// A 128-bit integer vector. -/// \param __b -/// A 128-bit integer vector. -/// \returns A 128-bit integer vector containing the comparison results. -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_cmpeq_epi16(__m128i __a, __m128i __b) -{ - return (__m128i)((__v8hi)__a == (__v8hi)__b); -} - -/// \brief Moves packed integer values from an unaligned 128-bit memory location -/// to elements in a 128-bit integer vector. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VMOVDQU / MOVDQU instruction. -/// -/// \param __p -/// A pointer to a memory location containing integer values. -/// \returns A 128-bit integer vector containing the moved values. -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_loadu_si128(__m128i const *__p) -{ - struct __loadu_si128 { - __m128i __v; - } __attribute__((__packed__, __may_alias__)); - return ((struct __loadu_si128*)__p)->__v; -} - -/// \brief Initializes all values in a 128-bit vector of [8 x i16] with the -/// specified 16-bit value. -/// -/// \headerfile -/// -/// This intrinsic is a utility function and does not correspond to a specific -/// instruction. -/// -/// \param __w -/// A 16-bit value used to initialize the elements of the destination integer -/// vector. -/// \returns An initialized 128-bit vector of [8 x i16] with all elements -/// containing the value provided in the operand. -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_set1_epi16(short __w) -{ - return (__m128i)(__v8hi){ __w, __w, __w, __w, __w, __w, __w, __w }; -} - -static __inline__ int __DEFAULT_FN_ATTRS -_mm_movemask_epi8(__m128i __a) -{ - return __builtin_ia32_pmovmskb128((__v16qi)__a); -} - -#undef __DEFAULT_FN_ATTRS - -#endif /* __EMMINTRIN_H */ -#endif // __GNUC__ diff --git a/src/coreclr/pal/inc/rt/cpp/fcntl.h b/src/coreclr/pal/inc/rt/cpp/fcntl.h deleted file mode 100644 index 556145a9f0847..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/fcntl.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: fcntl.h -// -// =========================================================================== -// dummy fcntl.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/float.h b/src/coreclr/pal/inc/rt/cpp/float.h deleted file mode 100644 index a1dc803380e44..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/float.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: float.h -// -// =========================================================================== -// dummy float.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/limits.h b/src/coreclr/pal/inc/rt/cpp/limits.h deleted file mode 100644 index bd667f14eaf99..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/limits.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: limits.h -// -// =========================================================================== -// dummy limits.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/malloc.h b/src/coreclr/pal/inc/rt/cpp/malloc.h deleted file mode 100644 index 255a2c7f2fa22..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/malloc.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: malloc.h -// -// =========================================================================== -// dummy malloc.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/math.h b/src/coreclr/pal/inc/rt/cpp/math.h deleted file mode 100644 index e42c1852c1399..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/math.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: math.h -// -// =========================================================================== -// dummy math.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/memory.h b/src/coreclr/pal/inc/rt/cpp/memory.h deleted file mode 100644 index bcc0d7d9c5d5b..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/memory.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: memory.h -// -// =========================================================================== -// dummy memory.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdarg.h b/src/coreclr/pal/inc/rt/cpp/stdarg.h deleted file mode 100644 index 59d0d046d5f91..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdarg.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: stdarg.h -// -// =========================================================================== -// dummy stdarg.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdbool.h b/src/coreclr/pal/inc/rt/cpp/stdbool.h deleted file mode 100644 index b23533a2940dd..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdbool.h +++ /dev/null @@ -1,4 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stddef.h b/src/coreclr/pal/inc/rt/cpp/stddef.h deleted file mode 100644 index b347dbf414970..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stddef.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: stddef.h -// -// =========================================================================== -// dummy stddef.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdint.h b/src/coreclr/pal/inc/rt/cpp/stdint.h deleted file mode 100644 index b23533a2940dd..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdint.h +++ /dev/null @@ -1,4 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdio.h b/src/coreclr/pal/inc/rt/cpp/stdio.h deleted file mode 100644 index 33c1912bb2b72..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdio.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: stdio.h -// -// =========================================================================== -// dummy stdio.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/stdlib.h b/src/coreclr/pal/inc/rt/cpp/stdlib.h deleted file mode 100644 index d2d49357b88e0..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/stdlib.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: stdlib.h -// -// =========================================================================== -// dummy stdlib.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/string.h b/src/coreclr/pal/inc/rt/cpp/string.h deleted file mode 100644 index b66d883338e10..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/string.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: string.h -// -// =========================================================================== -// dummy string.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/time.h b/src/coreclr/pal/inc/rt/cpp/time.h deleted file mode 100644 index 00c83f99d3438..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/time.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: time.h -// -// =========================================================================== -// dummy time.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/wchar.h b/src/coreclr/pal/inc/rt/cpp/wchar.h deleted file mode 100644 index 5497d729e43b8..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/wchar.h +++ /dev/null @@ -1,12 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// - -// -// =========================================================================== -// File: wchar.h -// -// =========================================================================== -// dummy wchar.h for PAL - -#include "palrt.h" diff --git a/src/coreclr/pal/inc/rt/cpp/xmmintrin.h b/src/coreclr/pal/inc/rt/cpp/xmmintrin.h deleted file mode 100644 index 826d2d788676f..0000000000000 --- a/src/coreclr/pal/inc/rt/cpp/xmmintrin.h +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// From llvm-3.9/clang-3.9.1 xmmintrin.h: - -/*===---- xmmintrin.h - SSE intrinsics -------------------------------------=== -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -* -*===-----------------------------------------------------------------------=== -*/ - -#ifdef __GNUC__ - -typedef float __m128 __attribute__((__vector_size__(16))); - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, NODEBUG_ATTRIBUTE)) - -/// \brief Loads a 128-bit floating-point vector of [4 x float] from an aligned -/// memory location. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS instruction. -/// -/// \param __p -/// A pointer to a 128-bit memory location. The address of the memory -/// location has to be 128-bit aligned. -/// \returns A 128-bit vector of [4 x float] containing the loaded valus. -static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_load_ps(const float *__p) -{ - return *(__m128*)__p; -} - -/// \brief Loads a 128-bit floating-point vector of [4 x float] from an -/// unaligned memory location. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction. -/// -/// \param __p -/// A pointer to a 128-bit memory location. The address of the memory -/// location does not have to be aligned. -/// \returns A 128-bit vector of [4 x float] containing the loaded values. -static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_loadu_ps(const float *__p) -{ - struct __loadu_ps - { - __m128 __v; - } __attribute__((__packed__, __may_alias__)); - return ((struct __loadu_ps*)__p)->__v; -} - -/// \brief Stores float values from a 128-bit vector of [4 x float] to an -/// unaligned memory location. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction. -/// -/// \param __p -/// A pointer to a 128-bit memory location. The address of the memory -/// location does not have to be aligned. -/// \param __a -/// A 128-bit vector of [4 x float] containing the values to be stored. -static __inline__ void __DEFAULT_FN_ATTRS -_mm_storeu_ps(float *__p, __m128 __a) -{ - struct __storeu_ps - { - __m128 __v; - } __attribute__((__packed__, __may_alias__)); - ((struct __storeu_ps*)__p)->__v = __a; -} - -/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into -/// four contiguous elements in an aligned memory location. -/// -/// \headerfile -/// -/// This intrinsic corresponds to \c VMOVAPS / MOVAPS + \c shuffling -/// instruction. -/// -/// \param __p -/// A pointer to a 128-bit memory location. -/// \param __a -/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each -/// of the four contiguous elements pointed by __p. -static __inline__ void __DEFAULT_FN_ATTRS -_mm_store_ps(float *__p, __m128 __a) -{ - *(__m128*)__p = __a; -} - -#undef __DEFAULT_FN_ATTRS - -#endif // __GNUC__ diff --git a/src/coreclr/pal/inc/rt/palrt.h b/src/coreclr/pal/inc/rt/palrt.h index 1f7f413456965..18e25222c5db8 100644 --- a/src/coreclr/pal/inc/rt/palrt.h +++ b/src/coreclr/pal/inc/rt/palrt.h @@ -135,18 +135,6 @@ typedef enum tagEFaultRepRetVal #include "pal.h" -#ifndef PAL_STDCPP_COMPAT -#ifdef __cplusplus -#ifndef __PLACEMENT_NEW_INLINE -#define __PLACEMENT_NEW_INLINE -inline void *__cdecl operator new(size_t, void *_P) -{ - return (_P); -} -#endif // __PLACEMENT_NEW_INLINE -#endif // __cplusplus -#endif // !PAL_STDCPP_COMPAT - #include #define NTAPI __cdecl @@ -280,9 +268,7 @@ typedef union _ULARGE_INTEGER { DWORD HighPart; #endif } -#ifndef PAL_STDCPP_COMPAT u -#endif // PAL_STDCPP_COMPAT ; ULONGLONG QuadPart; } ULARGE_INTEGER, *PULARGE_INTEGER; diff --git a/src/coreclr/pal/inc/rt/safecrt.h b/src/coreclr/pal/inc/rt/safecrt.h index 12b5eceaad589..7abc7271b0709 100644 --- a/src/coreclr/pal/inc/rt/safecrt.h +++ b/src/coreclr/pal/inc/rt/safecrt.h @@ -86,15 +86,6 @@ #endif #endif -/* NULL */ -#if !defined(NULL) -#if !defined(__cplusplus) -#define NULL 0 -#else -#define NULL ((void *)0) -#endif -#endif - /* _W64 */ #if !defined(_W64) #if !defined(__midl) && (defined(HOST_X86) || defined(_M_IX86)) && _MSC_VER >= 1300 @@ -1116,10 +1107,8 @@ errno_t __cdecl _wcsnset_s(WCHAR *_Dst, size_t _SizeInWords, WCHAR _Value, size_ #endif -#ifndef PAL_STDCPP_COMPAT - /* wcsnlen */ -_SAFECRT__EXTERN_C +extern size_t __cdecl wcsnlen(const WCHAR *inString, size_t inMaxSize); #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL @@ -1140,7 +1129,6 @@ size_t __cdecl wcsnlen(const WCHAR *inString, size_t inMaxSize) } #endif -#endif // PAL_STDCPP_COMPAT /* _wmakepath_s */ _SAFECRT__EXTERN_C diff --git a/src/coreclr/pal/inc/rt/sal.h b/src/coreclr/pal/inc/rt/sal.h index bec3352aa3f16..b28a1ef8d1a8c 100644 --- a/src/coreclr/pal/inc/rt/sal.h +++ b/src/coreclr/pal/inc/rt/sal.h @@ -2399,16 +2399,13 @@ extern "C" { #define _SA_SPECSTRIZE( x ) #x /* - __null p __notnull p __maybenull p - Annotates a pointer p. States that pointer p is null. Commonly used - in the negated form __notnull or the possibly null form __maybenull. + Annotates a pointer p. States that pointer p is never null or maybe null. */ #ifndef PAL_STDCPP_COMPAT - #define __null _Null_impl_ #define __notnull _Notnull_impl_ #define __maybenull _Maybenull_impl_ #endif // !PAL_STDCPP_COMPAT @@ -2599,7 +2596,6 @@ extern "C" { #else // ][ #ifndef PAL_STDCPP_COMPAT - #define __null #define __notnull #define __deref #endif // !PAL_STDCPP_COMPAT diff --git a/src/coreclr/pal/inc/rt/specstrings_strict.h b/src/coreclr/pal/inc/rt/specstrings_strict.h index dadb49930ceb8..293df55cffac4 100644 --- a/src/coreclr/pal/inc/rt/specstrings_strict.h +++ b/src/coreclr/pal/inc/rt/specstrings_strict.h @@ -656,7 +656,6 @@ /*************************************************************************** * Expert Macros ***************************************************************************/ -#define __null __allowed(on_typedecl) #define __notnull __allowed(on_typedecl) #define __maybenull __allowed(on_typedecl) #define __exceptthat __allowed(on_typedecl) diff --git a/src/coreclr/pal/inc/rt/specstrings_undef.h b/src/coreclr/pal/inc/rt/specstrings_undef.h index b0e1848c5eb86..b6c5e28072ab1 100644 --- a/src/coreclr/pal/inc/rt/specstrings_undef.h +++ b/src/coreclr/pal/inc/rt/specstrings_undef.h @@ -388,7 +388,6 @@ #undef __notnull #undef __notreadonly #undef __notvalid -#undef __null #undef __nullnullterminated #undef __nullterminated #undef __out_awcount diff --git a/src/coreclr/pal/src/cruntime/misc.cpp b/src/coreclr/pal/src/cruntime/misc.cpp index 2240b0b48c678..5b66dfec17d48 100644 --- a/src/coreclr/pal/src/cruntime/misc.cpp +++ b/src/coreclr/pal/src/cruntime/misc.cpp @@ -24,23 +24,16 @@ Module Name: #include "pal/misc.h" #include -/* needs to be included after "palinternal.h" to avoid name - collision for va_start and va_end */ #include #include #include -#if defined(HOST_AMD64) || defined(_x86_) -#include -#endif // defined(HOST_AMD64) || defined(_x86_) #if defined(_DEBUG) #include #endif //defined(_DEBUG) SET_DEFAULT_DEBUG_CHANNEL(CRT); -using namespace CorUnix; - /*++ Function: _gcvt_s @@ -108,27 +101,3 @@ __iscsym( int c ) PERF_EXIT(__iscsym); return 0; } -/*++ - -PAL forwarders for standard macro headers. - ---*/ -PALIMPORT DLLEXPORT int * __cdecl PAL_errno() -{ - return &errno; -} - -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stdout() -{ - return stdout; -} - -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stdin() -{ - return stdin; -} - -extern "C" PALIMPORT DLLEXPORT FILE* __cdecl PAL_stderr() -{ - return stderr; -} diff --git a/src/coreclr/pal/src/debug/debug.cpp b/src/coreclr/pal/src/debug/debug.cpp index f0a504452c59b..0e56ccceec7a1 100644 --- a/src/coreclr/pal/src/debug/debug.cpp +++ b/src/coreclr/pal/src/debug/debug.cpp @@ -40,6 +40,7 @@ SET_DEFAULT_DEBUG_CHANNEL(DEBUG); // some headers have code with asserts, so do #include #include +#include #if HAVE_PROCFS_CTL #include #elif defined(HAVE_TTRACE) // HAVE_PROCFS_CTL diff --git a/src/coreclr/pal/src/include/pal/file.h b/src/coreclr/pal/src/include/pal/file.h index 9b6e319536404..0ec765317d48f 100644 --- a/src/coreclr/pal/src/include/pal/file.h +++ b/src/coreclr/pal/src/include/pal/file.h @@ -25,6 +25,7 @@ Revision History: #include "pal/stackstring.hpp" #include #include +#include #ifdef __cplusplus extern "C" diff --git a/src/coreclr/pal/src/include/pal/palinternal.h b/src/coreclr/pal/src/include/pal/palinternal.h index b5a543bb77419..ae53848b9d260 100644 --- a/src/coreclr/pal/src/include/pal/palinternal.h +++ b/src/coreclr/pal/src/include/pal/palinternal.h @@ -161,199 +161,6 @@ function_name() to call the system's implementation #include "pal_perf.h" #endif -/* C runtime functions needed to be renamed to avoid duplicate definition - of those functions when including standard C header files */ -#define memcpy DUMMY_memcpy -#define memcmp DUMMY_memcmp -#define memset DUMMY_memset -#define memmove DUMMY_memmove -#define memchr DUMMY_memchr -#define atoll DUMMY_atoll -#define strlen DUMMY_strlen -#define stricmp DUMMY_stricmp -#define strstr DUMMY_strstr -#define strcmp DUMMY_strcmp -#define strcat DUMMY_strcat -#define strncat DUMMY_strncat -#define strcpy DUMMY_strcpy -#define strncmp DUMMY_strncmp -#define strncpy DUMMY_strncpy -#define strchr DUMMY_strchr -#define strrchr DUMMY_strrchr -#define strpbrk DUMMY_strpbrk -#define strtod DUMMY_strtod -#define strtoul DUMMY_strtoul -#define strtoull DUMMY_strtoull -#define strnlen DUMMY_strnlen -#define strcasecmp DUMMY_strcasecmp -#define strncasecmp DUMMY_strncasecmp -#define strdup DUMMY_strdup -#define strtok_r DUMMY_strtok_r -#define tolower DUMMY_tolower -#define toupper DUMMY_toupper -#define isprint DUMMY_isprint -#define isdigit DUMMY_isdigit -#define iswalpha DUMMY_iswalpha -#define iswdigit DUMMY_iswdigit -#define iswupper DUMMY_iswupper -#define towupper DUMMY_towupper -#define towlower DUMMY_towlower -#define iswprint DUMMY_iswprint -#define iswspace DUMMY_iswspace -#define iswxdigit DUMMY_iswxdigit -#define wint_t DUMMY_wint_t -#define srand DUMMY_srand -#define atoi DUMMY_atoi -#define atof DUMMY_atof -#define size_t DUMMY_size_t -#define time_t PAL_time_t -#define va_list DUMMY_va_list -#define exit DUMMY_exit -#define abs DUMMY_abs -#define llabs DUMMY_llabs -#define ceil DUMMY_ceil -#define cos DUMMY_cos -#define cosh DUMMY_cosh -#define fabs DUMMY_fabs -#define floor DUMMY_floor -#define fmod DUMMY_fmod -#define modf DUMMY_modf -#define sin DUMMY_sin -#define sinh DUMMY_sinh -#define sqrt DUMMY_sqrt -#define tan DUMMY_tan -#define tanh DUMMY_tanh -#define trunc DUMMY_trunc -#define ceilf DUMMY_ceilf -#define cosf DUMMY_cosf -#define coshf DUMMY_coshf -#define fabsf DUMMY_fabsf -#define floorf DUMMY_floorf -#define fmodf DUMMY_fmodf -#define modff DUMMY_modff -#define sinf DUMMY_sinf -#define sinhf DUMMY_sinhf -#define sqrtf DUMMY_sqrtf -#define tanf DUMMY_tanf -#define tanhf DUMMY_tanhf -#define truncf DUMMY_truncf -#define acos DUMMMY_acos -#define asin DUMMMY_asin -#define atan2 DUMMMY_atan2 -#define exp DUMMMY_exp -#define ilogb DUMMMY_ilogb -#define log DUMMMY_log -#define log10 DUMMMY_log10 -#define pow DUMMMY_pow -#define sincos DUMMMY_sincos -#define acosf DUMMMY_acosf -#define asinf DUMMMY_asinf -#define atan2f DUMMMY_atan2f -#define expf DUMMMY_expf -#define ilogbf DUMMMY_ilogbf -#define logf DUMMMY_logf -#define log10f DUMMMY_log10f -#define powf DUMMMY_powf -#define sincosf DUMMMY_sincosf -#define copysign DUMMY_copysign -#define copysignf DUMMY_copysignf -#define remove DUMMY_remove -#define printf DUMMY_printf -#define vprintf DUMMY_vprintf -#define fopen DUMMY_fopen -#define setvbuf DUMMY_setvbuf -#define fprintf DUMMY_fprintf -#define vfprintf DUMMY_vfprintf -#define fgets DUMMY_fgets -#define ferror DUMMY_ferror -#define fread DUMMY_fread -#define fwrite DUMMY_fwrite -#define ftell DUMMY_ftell -#define fclose DUMMY_fclose -#define fflush DUMMY_fflush -#define fputs DUMMY_fputs -#define fseek DUMMY_fseek -#define fgetpos DUMMY_fgetpos -#define fsetpos DUMMY_fsetpos -#define rand DUMMY_rand -#define realloc DUMMY_realloc -#define free DUMMY_free -#define malloc DUMMY_malloc -#define time DUMMY_time -#define getenv DUMMY_getenv -#define qsort DUMMY_qsort -#define bsearch DUMMY_bsearch - -/* RAND_MAX needed to be renamed to avoid duplicate definition when including - stdlib.h header files. PAL_RAND_MAX should have the same value as RAND_MAX - defined in pal.h */ -#define PAL_RAND_MAX 0x7fff - -/* The standard headers define isspace and isxdigit as macros and functions, - To avoid redefinition problems, undefine those macros. */ -#ifdef isspace -#undef isspace -#endif -#ifdef isxdigit -#undef isxdigit -#endif -#ifdef isalpha -#undef isalpha -#endif -#ifdef isalnum -#undef isalnum -#endif -#define isspace DUMMY_isspace -#define isxdigit DUMMY_isxdigit -#define isalpha DUMMY_isalpha -#define isalnum DUMMY_isalnum - -#ifdef stdin -#undef stdin -#endif -#ifdef stdout -#undef stdout -#endif -#ifdef stderr -#undef stderr -#endif - -#ifdef SCHAR_MIN -#undef SCHAR_MIN -#endif -#ifdef SCHAR_MAX -#undef SCHAR_MAX -#endif -#ifdef SHRT_MIN -#undef SHRT_MIN -#endif -#ifdef SHRT_MAX -#undef SHRT_MAX -#endif -#ifdef UCHAR_MAX -#undef UCHAR_MAX -#endif -#ifdef USHRT_MAX -#undef USHRT_MAX -#endif -#ifdef ULONG_MAX -#undef ULONG_MAX -#endif -#ifdef LONG_MIN -#undef LONG_MIN -#endif -#ifdef LONG_MAX -#undef LONG_MAX -#endif -#ifdef RAND_MAX -#undef RAND_MAX -#endif -#ifdef DBL_MAX -#undef DBL_MAX -#endif -#ifdef FLT_MAX -#undef FLT_MAX -#endif #ifdef __record_type_class #undef __record_type_class #endif @@ -361,24 +168,6 @@ function_name() to call the system's implementation #undef __real_type_class #endif -// The standard headers define va_start and va_end as macros, -// To avoid redefinition problems, undefine those macros. -#ifdef va_start -#undef va_start -#endif -#ifdef va_end -#undef va_end -#endif -#ifdef va_copy -#undef va_copy -#endif - -#define ptrdiff_t PAL_ptrdiff_t -#define intptr_t PAL_intptr_t -#define uintptr_t PAL_uintptr_t -#define timeval PAL_timeval - -#define DEFINE_DUMMY_FILE_TYPE #include "pal.h" #include "palprivate.h" @@ -395,217 +184,6 @@ function_name() to call the system's implementation #undef _BitScanReverse64 #endif -/* pal.h defines alloca(3) as a compiler builtin. - Redefining it to native libc will result in undefined breakage because - a compiler is allowed to make assumptions about the stack and frame - pointers. */ - -/* Undef all functions and types previously defined so those functions and - types could be mapped to the C runtime and socket implementation of the - native OS */ -#undef exit -#undef memcpy -#undef memcmp -#undef memset -#undef memmove -#undef memchr -#undef atoll -#undef strlen -#undef strnlen -#undef wcsnlen -#undef stricmp -#undef strstr -#undef strcmp -#undef strcat -#undef strncat -#undef strcpy -#undef strncmp -#undef strncpy -#undef strchr -#undef strrchr -#undef strpbrk -#undef strtoul -#undef strtoull -#undef strcasecmp -#undef strncasecmp -#undef strdup -#undef strtod -#undef strtok_r -#undef strdup -#undef tolower -#undef toupper -#undef isprint -#undef isdigit -#undef isspace -#undef iswdigit -#undef iswxdigit -#undef iswalpha -#undef iswprint -#undef isxdigit -#undef isalpha -#undef isalnum -#undef iswalpha -#undef iswdigit -#undef iswupper -#undef towupper -#undef towlower -#undef wint_t -#undef atoi -#undef atof -#undef malloc -#undef realloc -#undef free -#undef qsort -#undef bsearch -#undef time -#undef fclose -#undef fopen -#undef fread -#undef ferror -#undef ftell -#undef fflush -#undef fwrite -#undef fgets -#undef fputs -#undef fseek -#undef fgetpos -#undef fsetpos -#undef getcwd -#undef setvbuf -#undef unlink -#undef size_t -#undef time_t -#undef va_list -#undef va_start -#undef va_end -#undef va_copy -#undef va_arg -#undef stdin -#undef stdout -#undef stderr -#undef abs -#undef llabs -#undef acos -#undef acosh -#undef asin -#undef asinh -#undef atan -#undef atanh -#undef atan2 -#undef cbrt -#undef ceil -#undef cos -#undef cosh -#undef exp -#undef fabs -#undef floor -#undef fmod -#undef fma -#undef ilogb -#undef log -#undef log2 -#undef log10 -#undef modf -#undef pow -#undef sin -#undef sincos -#undef copysign -#undef sinh -#undef sqrt -#undef tan -#undef tanh -#undef trunc -#undef acosf -#undef acoshf -#undef asinf -#undef asinhf -#undef atanf -#undef atanhf -#undef atan2f -#undef cbrtf -#undef ceilf -#undef cosf -#undef coshf -#undef expf -#undef fabsf -#undef floorf -#undef fmodf -#undef fmaf -#undef ilogbf -#undef logf -#undef log2f -#undef log10f -#undef modff -#undef powf -#undef sinf -#undef sincosf -#undef copysignf -#undef sinhf -#undef sqrtf -#undef tanf -#undef tanhf -#undef truncf -#undef acos -#undef asin -#undef atan2 -#undef exp -#undef ilogb -#undef log -#undef log10 -#undef pow -#undef sincos -#undef acosf -#undef asinf -#undef atan2f -#undef expf -#undef ilogbf -#undef logf -#undef log10f -#undef powf -#undef sincosf -#undef rand -#undef srand -#undef errno -#undef getenv -#undef open -#undef glob -#undef remove -#undef printf -#undef vprintf -#undef ptrdiff_t -#undef intptr_t -#undef uintptr_t -#undef timeval - -#undef fprintf -#undef vfprintf -#undef iswupper -#undef iswspace -#undef towlower -#undef towupper - -#undef min -#undef max - -#undef SCHAR_MIN -#undef SCHAR_MAX -#undef UCHAR_MAX -#undef SHRT_MIN -#undef SHRT_MAX -#undef USHRT_MAX -#undef LONG_MIN -#undef LONG_MAX -#undef ULONG_MAX -#undef RAND_MAX -#undef DBL_MAX -#undef FLT_MAX -#undef __record_type_class -#undef __real_type_class - -#if HAVE_CHAR_BIT -#undef CHAR_BIT -#endif - // We need a sigsetjmp prototype in pal.h for the SEH macros, but we // can't use the "real" prototype (because we don't want to define sigjmp_buf). // So we must rename the "real" sigsetjmp to avoid redefinition errors. @@ -627,18 +205,6 @@ function_name() to call the system's implementation // https://gcc.gnu.org/ml/libstdc++/2016-01/msg00025.html #define _GLIBCXX_INCLUDE_NEXT_C_HEADERS 1 -#define _WITH_GETLINE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - #ifdef __APPLE__ #undef GetCurrentThread diff --git a/src/coreclr/pal/src/map/map.cpp b/src/coreclr/pal/src/map/map.cpp index 707284b58fad9..e5f1203c53641 100644 --- a/src/coreclr/pal/src/map/map.cpp +++ b/src/coreclr/pal/src/map/map.cpp @@ -35,6 +35,7 @@ Module Name: #include #include #include +#include #include #include "rt/ntimage.h" diff --git a/src/coreclr/pal/src/misc/miscpalapi.cpp b/src/coreclr/pal/src/misc/miscpalapi.cpp index 06129210ef9c9..f0d32f0388e86 100644 --- a/src/coreclr/pal/src/misc/miscpalapi.cpp +++ b/src/coreclr/pal/src/misc/miscpalapi.cpp @@ -29,6 +29,7 @@ Revision History: #include #include +#include #include #include #include diff --git a/src/coreclr/pal/src/synchmgr/synchmanager.cpp b/src/coreclr/pal/src/synchmgr/synchmanager.cpp index c34aa4e27fc8c..c6b0b3db1cfd6 100644 --- a/src/coreclr/pal/src/synchmgr/synchmanager.cpp +++ b/src/coreclr/pal/src/synchmgr/synchmanager.cpp @@ -28,6 +28,7 @@ SET_DEFAULT_DEBUG_CHANNEL(SYNC); // some headers have code with asserts, so do t #include #include #include +#include #include #include #include diff --git a/src/coreclr/pal/src/thread/process.cpp b/src/coreclr/pal/src/thread/process.cpp index b23eab001cca4..901a91961d486 100644 --- a/src/coreclr/pal/src/thread/process.cpp +++ b/src/coreclr/pal/src/thread/process.cpp @@ -44,6 +44,7 @@ SET_DEFAULT_DEBUG_CHANNEL(PROCESS); // some headers have code with asserts, so d #endif // HAVE_POLL #include +#include #include #include #include diff --git a/src/coreclr/pal/src/thread/threadsusp.cpp b/src/coreclr/pal/src/thread/threadsusp.cpp index d2fae05c4256f..867f46b3fa38f 100644 --- a/src/coreclr/pal/src/thread/threadsusp.cpp +++ b/src/coreclr/pal/src/thread/threadsusp.cpp @@ -29,6 +29,7 @@ Revision History: #include #include +#include #include #include #include diff --git a/src/coreclr/palrt/memorystream.cpp b/src/coreclr/palrt/memorystream.cpp index 0ed06547f3bfa..91a5ca8b2d318 100644 --- a/src/coreclr/palrt/memorystream.cpp +++ b/src/coreclr/palrt/memorystream.cpp @@ -23,6 +23,10 @@ Revision History: #include "common.h" #include "objidl.h" +#include + +using std::min; +using std::max; class MemoryStream : public IStream { @@ -44,7 +48,7 @@ class MemoryStream : public IStream n = min(2 * n, n + n / 4 + 0x100000); // don't allocate tiny chunks - n = max(n, 0x100); + n = max(n, (ULONG)0x100); // compare with the hard limit nNewData = max(n, nNewData); diff --git a/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin.cpp b/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin.cpp index dc4a63c0e8db1..20d0f1b6b2291 100644 --- a/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin.cpp +++ b/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #ifndef INFINITY #define INFINITY 1e300 // Practically good enough - not sure why we miss this in our Linux build. @@ -42,6 +43,9 @@ bool IsInCantAllocStressLogRegion() #include "../../../inc/stresslog.h" #include "StressMsgReader.h" +using std::min; +using std::max; + size_t StressLog::writing_base_address; size_t StressLog::reading_base_address; @@ -1323,7 +1327,7 @@ int ProcessStressLog(void* baseAddress, int argc, char* argv[]) double latestTime = FindLatestTime(hdr); if (s_timeFilterStart < 0) { - s_timeFilterStart = max(latestTime + s_timeFilterStart, 0); + s_timeFilterStart = max(latestTime + s_timeFilterStart, 0.0); s_timeFilterEnd = latestTime; } for (ThreadStressLog* tsl = StressLog::TranslateMemoryMappedPointer(hdr->logs.t); tsl != nullptr; tsl = StressLog::TranslateMemoryMappedPointer(tsl->next)) @@ -1346,7 +1350,7 @@ int ProcessStressLog(void* baseAddress, int argc, char* argv[]) SYSTEM_INFO systemInfo; GetSystemInfo(&systemInfo); - DWORD threadCount = min(systemInfo.dwNumberOfProcessors, MAXIMUM_WAIT_OBJECTS); + DWORD threadCount = min(systemInfo.dwNumberOfProcessors, (DWORD)MAXIMUM_WAIT_OBJECTS); HANDLE threadHandle[64]; for (DWORD i = 0; i < threadCount; i++) { @@ -1361,7 +1365,7 @@ int ProcessStressLog(void* baseAddress, int argc, char* argv[]) // the interlocked increment may have increased s_msgCount beyond MAX_MESSAGE_COUNT - // make sure we don't go beyond the end of the buffer - s_msgCount = min(s_msgCount, MAX_MESSAGE_COUNT); + s_msgCount = min((LONG64)s_msgCount, MAX_MESSAGE_COUNT); if (s_gcFilterStart != 0) { diff --git a/src/coreclr/tools/metainfo/mdinfo.cpp b/src/coreclr/tools/metainfo/mdinfo.cpp index 579a5362f96d5..84d7f8d50f265 100644 --- a/src/coreclr/tools/metainfo/mdinfo.cpp +++ b/src/coreclr/tools/metainfo/mdinfo.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -3772,7 +3773,7 @@ int MDInfo::DumpHex( ++nLines; // Calculate spacing. - nPrint = min(cbData, nLine); + nPrint = std::min(cbData, nLine); nSpace = nLine - nPrint; // dump in hex. diff --git a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h index 8c511b45e91b9..f9e4bb561f0a8 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h @@ -59,21 +59,22 @@ #include #include -// Getting STL to work with PAL is difficult, so reimplement STL functionality to not require it. -#ifdef TARGET_UNIX -#include "clr_std/utility" -#include "clr_std/string" -#include "clr_std/algorithm" -#include "clr_std/vector" -#else // !TARGET_UNIX -#ifndef USE_STL -#define USE_STL -#endif // USE_STL #include #include #include #include -#endif // !TARGET_UNIX + +template +constexpr auto max(T&& t, U&& u) -> decltype(t > u ? t : u) +{ + return t > u ? t : u; +} + +template +constexpr auto min(T&& t, U&& u) -> decltype(t < u ? t : u) +{ + return t < u ? t : u; +} #ifdef USE_MSVCDIS #define DISLIB diff --git a/src/coreclr/utilcode/loaderheap.cpp b/src/coreclr/utilcode/loaderheap.cpp index 72d0d1a6f6f7f..985df665be6f4 100644 --- a/src/coreclr/utilcode/loaderheap.cpp +++ b/src/coreclr/utilcode/loaderheap.cpp @@ -1154,7 +1154,7 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit) } // Figure out how much to reserve - dwSizeToReserve = max(dwSizeToCommit, m_dwReserveBlockSize); + dwSizeToReserve = max(dwSizeToCommit, m_dwReserveBlockSize); // Round to VIRTUAL_ALLOC_RESERVE_GRANULARITY dwSizeToReserve = ALIGN_UP(dwSizeToReserve, VIRTUAL_ALLOC_RESERVE_GRANULARITY); diff --git a/src/coreclr/utilcode/stdafx.h b/src/coreclr/utilcode/stdafx.h index 18b820306f775..78e9840575819 100644 --- a/src/coreclr/utilcode/stdafx.h +++ b/src/coreclr/utilcode/stdafx.h @@ -12,6 +12,9 @@ #include #include #include +#include +using std::min; +using std::max; #define IN_WINFIX_CPP diff --git a/src/coreclr/utilcode/stgpool.cpp b/src/coreclr/utilcode/stgpool.cpp index f04f6e9e7b3bf..dd1858be84d70 100644 --- a/src/coreclr/utilcode/stgpool.cpp +++ b/src/coreclr/utilcode/stgpool.cpp @@ -1938,7 +1938,7 @@ CInMemoryStream::CopyTo( _ASSERTE(cb.QuadPart <= UINT32_MAX); ULONG cbTotal = min(static_cast(cb.QuadPart), m_cbSize - m_cbCurrent); - ULONG cbRead=min(1024, cbTotal); + ULONG cbRead=min(1024u, cbTotal); CQuickBytes rBuf; void *pBuf = rBuf.AllocNoThrow(cbRead); if (pBuf == 0) @@ -2061,7 +2061,7 @@ CGrowableStream::CGrowableStream(float multiplicativeGrowthRate, DWORD additiveG m_multiplicativeGrowthRate = min(max(1.0F, multiplicativeGrowthRate), 2.0F); _ASSERTE(additiveGrowthRate >= 1); - m_additiveGrowthRate = max(1, additiveGrowthRate); + m_additiveGrowthRate = max(1u, additiveGrowthRate); } // CGrowableStream::CGrowableStream #ifndef DACCESS_COMPILE diff --git a/src/coreclr/utilcode/stresslog.cpp b/src/coreclr/utilcode/stresslog.cpp index 90ad5900473ed..37abeb2cb92f4 100644 --- a/src/coreclr/utilcode/stresslog.cpp +++ b/src/coreclr/utilcode/stresslog.cpp @@ -227,7 +227,7 @@ void StressLog::Initialize(unsigned facilities, unsigned level, unsigned maxByte // in this case, interpret the number as GB maxBytesPerThread *= (1024 * 1024 * 1024); } - theLog.MaxSizePerThread = (unsigned)min(maxBytesPerThread,0xffffffff); + theLog.MaxSizePerThread = (unsigned)min(maxBytesPerThread,(size_t)0xffffffff); size_t maxBytesTotal = maxBytesTotalArg; if (maxBytesTotal < STRESSLOG_CHUNK_SIZE * 256) @@ -235,7 +235,7 @@ void StressLog::Initialize(unsigned facilities, unsigned level, unsigned maxByte // in this case, interpret the number as GB maxBytesTotal *= (1024 * 1024 * 1024); } - theLog.MaxSizeTotal = (unsigned)min(maxBytesTotal, 0xffffffff); + theLog.MaxSizeTotal = (unsigned)min(maxBytesTotal, (size_t)0xffffffff); theLog.totalChunk = 0; theLog.facilitiesToLog = facilities | LF_ALWAYS; theLog.levelToLog = level; diff --git a/src/coreclr/utilcode/utsem.cpp b/src/coreclr/utilcode/utsem.cpp index 94c1636dbe6ea..d7f1bc04326e9 100644 --- a/src/coreclr/utilcode/utsem.cpp +++ b/src/coreclr/utilcode/utsem.cpp @@ -84,7 +84,7 @@ SpinConstants g_SpinConstants = { inline void InitializeSpinConstants_NoHost() { - g_SpinConstants.dwMaximumDuration = max(2, g_SystemInfo.dwNumberOfProcessors) * 20000; + g_SpinConstants.dwMaximumDuration = max(2u, g_SystemInfo.dwNumberOfProcessors) * 20000; } #else //!SELF_NO_HOST diff --git a/src/coreclr/vm/appdomain.cpp b/src/coreclr/vm/appdomain.cpp index feafd1f8abad6..2e6b63650e958 100644 --- a/src/coreclr/vm/appdomain.cpp +++ b/src/coreclr/vm/appdomain.cpp @@ -313,7 +313,7 @@ OBJECTREF* PinnedHeapHandleTable::AllocateHandles(DWORD nRequested) // Retrieve the remaining number of handles in the bucket. DWORD numRemainingHandlesInBucket = (m_pHead != NULL) ? m_pHead->GetNumRemainingHandles() : 0; PTRARRAYREF pinnedHandleArrayObj = NULL; - DWORD nextBucketSize = min(m_NextBucketSize * 2, MAX_BUCKETSIZE); + DWORD nextBucketSize = min(m_NextBucketSize * 2, MAX_BUCKETSIZE); // create a new block if this request doesn't fit in the current block if (nRequested > numRemainingHandlesInBucket) @@ -4176,7 +4176,7 @@ void DomainLocalModule::EnsureDynamicClassIndex(DWORD dwID) return; } - SIZE_T aDynamicEntries = max(16, oldDynamicEntries); + SIZE_T aDynamicEntries = max(16, oldDynamicEntries); while (aDynamicEntries <= dwID) { aDynamicEntries *= 2; diff --git a/src/coreclr/vm/callcounting.cpp b/src/coreclr/vm/callcounting.cpp index c464949f7aeee..a6577fd42de34 100644 --- a/src/coreclr/vm/callcounting.cpp +++ b/src/coreclr/vm/callcounting.cpp @@ -664,7 +664,7 @@ bool CallCountingManager::SetCodeEntryPoint( // direct calls in codegen and they need to be promoted earlier than their callers. if (methodDesc->GetMethodTable() == g_pCastHelpers) { - callCountThreshold = max(1, (CallCount)(callCountThreshold / 2)); + callCountThreshold = max(1, (CallCount)(callCountThreshold / 2)); } NewHolder callCountingInfoHolder = new CallCountingInfo(activeCodeVersion, callCountThreshold); diff --git a/src/coreclr/vm/castcache.cpp b/src/coreclr/vm/castcache.cpp index 1e59f7862d72f..27105f3d25efa 100644 --- a/src/coreclr/vm/castcache.cpp +++ b/src/coreclr/vm/castcache.cpp @@ -12,6 +12,7 @@ BASEARRAYREF* CastCache::s_pTableRef = NULL; OBJECTHANDLE CastCache::s_sentinelTable = NULL; DWORD CastCache::s_lastFlushSize = INITIAL_CACHE_SIZE; +const DWORD CastCache::INITIAL_CACHE_SIZE; BASEARRAYREF CastCache::CreateCastCache(DWORD size) { diff --git a/src/coreclr/vm/ceeload.cpp b/src/coreclr/vm/ceeload.cpp index 0796e59a15c29..23304c29f38bf 100644 --- a/src/coreclr/vm/ceeload.cpp +++ b/src/coreclr/vm/ceeload.cpp @@ -951,26 +951,26 @@ void Module::BuildStaticsOffsets(AllocMemTracker *pamTracker) case ELEMENT_TYPE_I2: case ELEMENT_TYPE_U2: case ELEMENT_TYPE_CHAR: - dwAlignment[kk] = max(2, dwAlignment[kk]); + dwAlignment[kk] = max(2, dwAlignment[kk]); dwClassNonGCBytes[kk] += 2; break; case ELEMENT_TYPE_I4: case ELEMENT_TYPE_U4: case ELEMENT_TYPE_R4: - dwAlignment[kk] = max(4, dwAlignment[kk]); + dwAlignment[kk] = max(4, dwAlignment[kk]); dwClassNonGCBytes[kk] += 4; break; case ELEMENT_TYPE_FNPTR: case ELEMENT_TYPE_PTR: case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: - dwAlignment[kk] = max((1 << LOG2_PTRSIZE), dwAlignment[kk]); + dwAlignment[kk] = max((1 << LOG2_PTRSIZE), dwAlignment[kk]); dwClassNonGCBytes[kk] += (1 << LOG2_PTRSIZE); break; case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: case ELEMENT_TYPE_R8: - dwAlignment[kk] = max(8, dwAlignment[kk]); + dwAlignment[kk] = max(8, dwAlignment[kk]); dwClassNonGCBytes[kk] += 8; break; case ELEMENT_TYPE_VAR: @@ -994,7 +994,7 @@ void Module::BuildStaticsOffsets(AllocMemTracker *pamTracker) { // We'll have to be pessimistic here dwClassNonGCBytes[kk] += MAX_PRIMITIVE_FIELD_SIZE; - dwAlignment[kk] = max(MAX_PRIMITIVE_FIELD_SIZE, dwAlignment[kk]); + dwAlignment[kk] = max(MAX_PRIMITIVE_FIELD_SIZE, dwAlignment[kk]); dwClassGCHandles[kk] += 1; break; @@ -1555,7 +1555,7 @@ DWORD Module::AllocateDynamicEntry(MethodTable *pMT) if (newId >= m_maxDynamicEntries) { - SIZE_T maxDynamicEntries = max(16, m_maxDynamicEntries); + SIZE_T maxDynamicEntries = max(16, m_maxDynamicEntries); while (maxDynamicEntries <= newId) { maxDynamicEntries *= 2; diff --git a/src/coreclr/vm/ceemain.cpp b/src/coreclr/vm/ceemain.cpp index 813616e529c80..6fde41e6add35 100644 --- a/src/coreclr/vm/ceemain.cpp +++ b/src/coreclr/vm/ceemain.cpp @@ -935,7 +935,7 @@ void EEStartupHelper() // retrieve configured max size for the mini-metadata buffer (defaults to 64KB) g_MiniMetaDataBuffMaxSize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MiniMdBufferCapacity); // align up to GetOsPageSize(), with a maximum of 1 MB - g_MiniMetaDataBuffMaxSize = (DWORD) min(ALIGN_UP(g_MiniMetaDataBuffMaxSize, GetOsPageSize()), 1024 * 1024); + g_MiniMetaDataBuffMaxSize = (DWORD) min(ALIGN_UP(g_MiniMetaDataBuffMaxSize, GetOsPageSize()), 1024u * 1024u); // allocate the buffer. this is never touched while the process is running, so it doesn't // contribute to the process' working set. it is needed only as a "shadow" for a mini-metadata // buffer that will be set up and reported / updated in the Watson process (the diff --git a/src/coreclr/vm/classhash.cpp b/src/coreclr/vm/classhash.cpp index 5d2be11c9b328..a0eb8eb89c313 100644 --- a/src/coreclr/vm/classhash.cpp +++ b/src/coreclr/vm/classhash.cpp @@ -452,7 +452,7 @@ EEClassHashTable *EEClassHashTable::MakeCaseInsensitiveTable(Module *pModule, Al // Allocate the table and verify that we actually got one. EEClassHashTable * pCaseInsTable = EEClassHashTable::Create(pModule, - max(BaseGetElementCount() / 2, 11), + max(BaseGetElementCount() / 2, 11u), this, pamTracker); diff --git a/src/coreclr/vm/classlayoutinfo.cpp b/src/coreclr/vm/classlayoutinfo.cpp index 9dd6fb4881b23..7f78e4265bc83 100644 --- a/src/coreclr/vm/classlayoutinfo.cpp +++ b/src/coreclr/vm/classlayoutinfo.cpp @@ -136,7 +136,7 @@ namespace ) { UINT32 cbCurOffset = parentSize; - BYTE LargestAlignmentRequirement = max(1, min(packingSize, parentAlignmentRequirement)); + BYTE LargestAlignmentRequirement = max(1, min(packingSize, parentAlignmentRequirement)); // Start with the size inherited from the parent (if any). uint32_t calcTotalSize = parentSize; diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 52917161fb002..99a52bd6bd112 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -2296,7 +2296,7 @@ VOID EEJitManager::EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SI int allocMode = 0; // Try to reserve at least 16MB at a time - SIZE_T allocChunk = max(ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY), 16*1024*1024); + SIZE_T allocChunk = max(ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY), 16*1024*1024); while (reserveSize > 0) { @@ -2816,11 +2816,11 @@ void EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t reserveFo if ((flag & CORJIT_ALLOCMEM_FLG_32BYTE_ALIGN) != 0) { - alignment = max(alignment, 32); + alignment = max(alignment, 32u); } else if ((flag & CORJIT_ALLOCMEM_FLG_16BYTE_ALIGN) != 0) { - alignment = max(alignment, 16); + alignment = max(alignment, 16u); } #if defined(TARGET_X86) diff --git a/src/coreclr/vm/common.h b/src/coreclr/vm/common.h index f0edc0f15cd00..a51626dfaf320 100644 --- a/src/coreclr/vm/common.h +++ b/src/coreclr/vm/common.h @@ -58,7 +58,7 @@ #include #include #include - +#include #include #include @@ -73,6 +73,9 @@ #include +using std::max; +using std::min; + #ifdef _MSC_VER //non inline intrinsics are faster #pragma function(memcpy,memcmp,strcmp,strcpy,strlen,strcat) diff --git a/src/coreclr/vm/dacenumerablehash.inl b/src/coreclr/vm/dacenumerablehash.inl index a6083e26fda4e..93d63116e0f42 100644 --- a/src/coreclr/vm/dacenumerablehash.inl +++ b/src/coreclr/vm/dacenumerablehash.inl @@ -7,7 +7,7 @@ // See DacEnumerableHash.h for a more detailed description. // -#include "clr_std/type_traits" +#include // Our implementation embeds entry data supplied by the hash sub-class into a larger entry structure // containing DacEnumerableHash metadata. We often end up returning pointers to the inner entry to sub-class code and diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index bd5bebcce50f2..44b69193d306a 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -515,7 +515,7 @@ HostCodeHeap::TrackAllocation* HostCodeHeap::AllocFromFreeList(size_t header, si // The space left is not big enough for a new block, let's just // update the TrackAllocation record for the current block - if (pCurrent->size - realSize < max(HOST_CODEHEAP_SIZE_ALIGN, sizeof(TrackAllocation))) + if (pCurrent->size - realSize < max(HOST_CODEHEAP_SIZE_ALIGN, sizeof(TrackAllocation))) { LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Item removed %p, size 0x%X\n", this, pCurrent, pCurrent->size)); // remove current diff --git a/src/coreclr/vm/interpreter.h b/src/coreclr/vm/interpreter.h index 86a0a36efe452..7f7eed175dea0 100644 --- a/src/coreclr/vm/interpreter.h +++ b/src/coreclr/vm/interpreter.h @@ -13,7 +13,7 @@ #include "crst.h" #include "callhelpers.h" #include "codeversion.h" -#include "clr_std/type_traits" +#include typedef SSIZE_T NativeInt; typedef SIZE_T NativeUInt; diff --git a/src/coreclr/vm/jithelpers.cpp b/src/coreclr/vm/jithelpers.cpp index 450752ae36778..1cbe520a48de9 100644 --- a/src/coreclr/vm/jithelpers.cpp +++ b/src/coreclr/vm/jithelpers.cpp @@ -264,7 +264,7 @@ HCIMPL2(INT32, JIT_Div, INT32 dividend, INT32 divisor) } else if (divisor == -1) { - if (dividend == _I32_MIN) + if (dividend == INT32_MIN) { ehKind = kOverflowException; goto ThrowExcep; @@ -296,7 +296,7 @@ HCIMPL2(INT32, JIT_Mod, INT32 dividend, INT32 divisor) } else if (divisor == -1) { - if (dividend == _I32_MIN) + if (dividend == INT32_MIN) { ehKind = kOverflowException; goto ThrowExcep; @@ -674,32 +674,6 @@ HCIMPL1_V(INT64, JIT_Dbl2LngOvf, double val) } HCIMPLEND -#ifndef TARGET_WINDOWS -namespace -{ - bool isnan(float val) - { - UINT32 bits = *reinterpret_cast(&val); - return (bits & 0x7FFFFFFFU) > 0x7F800000U; - } - bool isnan(double val) - { - UINT64 bits = *reinterpret_cast(&val); - return (bits & 0x7FFFFFFFFFFFFFFFULL) > 0x7FF0000000000000ULL; - } - bool isfinite(float val) - { - UINT32 bits = *reinterpret_cast(&val); - return (~bits & 0x7F800000U) != 0; - } - bool isfinite(double val) - { - UINT64 bits = *reinterpret_cast(&val); - return (~bits & 0x7FF0000000000000ULL) != 0; - } -} -#endif - HCIMPL2_VV(float, JIT_FltRem, float dividend, float divisor) { FCALL_CONTRACT; diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 20c8321a7c496..e0f718f057300 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -702,7 +702,7 @@ size_t CEEInfo::printObjectDescription ( const UTF8* utf8data = stackStr.GetUTF8(); if (bufferSize > 0) { - bytesWritten = min(bufferSize - 1, stackStr.GetCount()); + bytesWritten = min(bufferSize - 1, stackStr.GetCount()); memcpy((BYTE*)buffer, (BYTE*)utf8data, bytesWritten); // Always null-terminate @@ -11407,7 +11407,7 @@ void CEEJitInfo::recordRelocation(void * location, // Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory // on retry to increase chances that the retry succeeds. - m_reserveForJumpStubs = max(0x400, m_reserveForJumpStubs + 0x10); + m_reserveForJumpStubs = max((size_t)0x400, m_reserveForJumpStubs + 0x10); } } diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp index b6edb7fee7e54..02c61ca66800f 100644 --- a/src/coreclr/vm/methodtablebuilder.cpp +++ b/src/coreclr/vm/methodtablebuilder.cpp @@ -8329,7 +8329,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach // Place by value class fields last // Update the number of GC pointer series // Calculate largest alignment requirement - int largestAlignmentRequirement = 1; + unsigned int largestAlignmentRequirement = 1; for (i = 0; i < bmtEnumFields->dwNumInstanceFields; i++) { if (pFieldDescList[i].IsByValue()) @@ -8362,7 +8362,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach else { int fieldAlignmentRequirement = pByValueMT->GetFieldAlignmentRequirement(); - largestAlignmentRequirement = max(largestAlignmentRequirement, fieldAlignmentRequirement); + largestAlignmentRequirement = max(largestAlignmentRequirement, (unsigned int)fieldAlignmentRequirement); dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, fieldAlignmentRequirement); } diff --git a/src/coreclr/vm/object.inl b/src/coreclr/vm/object.inl index 7f58c122097be..491aab1d4c873 100644 --- a/src/coreclr/vm/object.inl +++ b/src/coreclr/vm/object.inl @@ -91,7 +91,7 @@ inline void Object::EnumMemoryRegions(void) // Unfortunately, DacEnumMemoryRegion takes only ULONG32 as size argument while (size > 0) { // Use 0x10000000 instead of MAX_ULONG32 so that the chunks stays aligned - SIZE_T chunk = min(size, 0x10000000); + SIZE_T chunk = min(size, (SIZE_T)0x10000000); // If for any reason we can't enumerate the memory, stop. This would generally mean // that we have target corruption, or that the target is executing, etc. if (!DacEnumMemoryRegion(ptr, chunk)) diff --git a/src/coreclr/vm/profdetach.cpp b/src/coreclr/vm/profdetach.cpp index bf138209ce6ad..8a0a8b8e42675 100644 --- a/src/coreclr/vm/profdetach.cpp +++ b/src/coreclr/vm/profdetach.cpp @@ -446,8 +446,8 @@ void ProfilingAPIDetach::SleepWhileProfilerEvacuates(ProfilerDetachInfo *pDetach } // ...but keep it in bounds! - ui64SleepMilliseconds = min( - max(ui64SleepMilliseconds, s_dwMinSleepMs), + ui64SleepMilliseconds = min( + max(ui64SleepMilliseconds, s_dwMinSleepMs), s_dwMaxSleepMs); // At this point it's safe to cast ui64SleepMilliseconds down to a DWORD since we diff --git a/src/coreclr/vm/qcall.h b/src/coreclr/vm/qcall.h index d5f355ad9662b..e3154c7b1334c 100644 --- a/src/coreclr/vm/qcall.h +++ b/src/coreclr/vm/qcall.h @@ -7,7 +7,7 @@ #ifndef __QCall_h__ #define __QCall_h__ -#include "clr_std/type_traits" +#include // // QCALLS diff --git a/src/coreclr/vm/stackingallocator.cpp b/src/coreclr/vm/stackingallocator.cpp index 286c4d09e5fd2..913940db68ed8 100644 --- a/src/coreclr/vm/stackingallocator.cpp +++ b/src/coreclr/vm/stackingallocator.cpp @@ -188,7 +188,7 @@ bool StackingAllocator::AllocNewBlockForBytes(unsigned n) // request is larger than MaxBlockSize then allocate exactly that // amount. unsigned lower = MinBlockSize; - size_t allocSize = sizeof(StackBlock) + max(n, min(max(n * 4, lower), MaxBlockSize)); + size_t allocSize = sizeof(StackBlock) + max(n, min(max(n * 4, lower), (unsigned)MaxBlockSize)); // Allocate the block. // @todo: Is it worth implementing a non-thread safe standard heap for diff --git a/src/coreclr/vm/stringliteralmap.cpp b/src/coreclr/vm/stringliteralmap.cpp index 29d457207bf8d..ac37fd48acfca 100644 --- a/src/coreclr/vm/stringliteralmap.cpp +++ b/src/coreclr/vm/stringliteralmap.cpp @@ -442,7 +442,7 @@ static void LogStringLiteral(_In_z_ const char* action, EEStringData *pStringDat STATIC_CONTRACT_FORBID_FAULT; ULONG length = pStringData->GetCharCount(); - length = min(length, 128); + length = min(length, 128u); WCHAR *szString = (WCHAR *)_alloca((length + 1) * sizeof(WCHAR)); memcpyNoGCRefs((void*)szString, (void*)pStringData->GetStringBuffer(), length * sizeof(WCHAR)); szString[length] = '\0'; diff --git a/src/coreclr/vm/syncblk.cpp b/src/coreclr/vm/syncblk.cpp index 2cc7de6bd2b09..eb0b29fafdd02 100644 --- a/src/coreclr/vm/syncblk.cpp +++ b/src/coreclr/vm/syncblk.cpp @@ -2681,7 +2681,7 @@ BOOL AwareLock::EnterEpilogHelper(Thread* pCurThread, INT32 timeOut) { duration = end - start; } - duration = min(duration, (DWORD)timeOut); + duration = min(duration, (ULONGLONG)timeOut); timeOut -= (INT32)duration; } } diff --git a/src/coreclr/vm/threadstatics.cpp b/src/coreclr/vm/threadstatics.cpp index 94088ba399947..6a8a43a0821ef 100644 --- a/src/coreclr/vm/threadstatics.cpp +++ b/src/coreclr/vm/threadstatics.cpp @@ -125,7 +125,7 @@ void ThreadLocalBlock::EnsureModuleIndex(ModuleIndex index) return; } - SIZE_T aModuleIndices = max(16, m_TLMTableSize); + SIZE_T aModuleIndices = max((SIZE_T)16, m_TLMTableSize); while (aModuleIndices <= index.m_dwIndex) { aModuleIndices *= 2; @@ -411,7 +411,7 @@ void ThreadLocalModule::EnsureDynamicClassIndex(DWORD dwID) return; } - SIZE_T aDynamicEntries = max(16, m_aDynamicEntries); + SIZE_T aDynamicEntries = max((SIZE_T)16, m_aDynamicEntries); while (aDynamicEntries <= dwID) { aDynamicEntries *= 2; diff --git a/src/coreclr/vm/util.hpp b/src/coreclr/vm/util.hpp index e7b311d8724d4..ef05074b186a4 100644 --- a/src/coreclr/vm/util.hpp +++ b/src/coreclr/vm/util.hpp @@ -16,7 +16,7 @@ #include "clrdata.h" #include "xclrdata.h" #include "posterror.h" -#include "clr_std/type_traits" +#include // Hot cache lines need to be aligned to cache line size to improve performance #if defined(TARGET_ARM64) diff --git a/src/coreclr/vm/virtualcallstub.cpp b/src/coreclr/vm/virtualcallstub.cpp index e82f8b84a580b..a87eac54f607c 100644 --- a/src/coreclr/vm/virtualcallstub.cpp +++ b/src/coreclr/vm/virtualcallstub.cpp @@ -823,6 +823,8 @@ void VirtualCallStubManager::ReclaimAll() g_reclaim_counter++; } +const UINT32 VirtualCallStubManager::counter_block::MAX_COUNTER_ENTRIES; + /* reclaim/rearrange any structures that can only be done during a gc sync point i.e. need to be serialized and non-concurrant. */ void VirtualCallStubManager::Reclaim() diff --git a/src/mono/dlls/mscordbi/CMakeLists.txt b/src/mono/dlls/mscordbi/CMakeLists.txt index 52b4e62ad3412..e39aeac5ac6d2 100644 --- a/src/mono/dlls/mscordbi/CMakeLists.txt +++ b/src/mono/dlls/mscordbi/CMakeLists.txt @@ -110,7 +110,6 @@ if (CLR_CMAKE_HOST_UNIX) add_subdirectory(${CLR_DIR}/pal pal) include_directories(${CLR_DIR}/pal/inc/rt/cpp) - add_compile_options(-nostdinc) endif (CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_HOST_UNIX) From 309e3307fdfd066759271b8285e5014e2f975fd1 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 12 Feb 2024 17:17:52 -0800 Subject: [PATCH 12/60] Various changes to fix the Windows build. --- src/coreclr/gc/env/common.h | 2 + src/coreclr/gc/vxsort/defs.h | 45 ++----------------- src/coreclr/gc/vxsort/machine_traits.avx2.h | 2 +- src/coreclr/gc/vxsort/machine_traits.avx512.h | 2 +- src/coreclr/gc/vxsort/packer.h | 4 +- .../bitonic_sort.AVX2.int32_t.generated.h | 2 +- .../bitonic_sort.AVX2.int64_t.generated.h | 2 +- .../bitonic_sort.AVX512.int32_t.generated.h | 2 +- .../bitonic_sort.AVX512.int64_t.generated.h | 2 +- .../gc/vxsort/smallsort/codegen/avx2.py | 2 +- .../gc/vxsort/smallsort/codegen/avx512.py | 2 +- src/coreclr/gc/vxsort/vxsort.h | 6 +-- src/coreclr/ildasm/ildasmpch.h | 4 ++ src/coreclr/inc/contract.inl | 6 +-- src/coreclr/inc/gcinfotypes.h | 12 ++--- src/coreclr/jit/assertionprop.cpp | 2 +- src/coreclr/jit/compiler.cpp | 6 +-- src/coreclr/jit/compiler.h | 2 +- src/coreclr/jit/compiler.hpp | 2 +- src/coreclr/jit/emit.cpp | 4 +- src/coreclr/jit/fgdiagnostic.cpp | 2 +- src/coreclr/jit/gentree.cpp | 2 +- src/coreclr/jit/hashbv.cpp | 2 +- src/coreclr/jit/jiteh.cpp | 2 +- src/coreclr/jit/jitpch.h | 1 + src/coreclr/jit/lclvars.cpp | 2 +- src/coreclr/jit/morph.cpp | 6 +-- src/coreclr/jit/targetamd64.h | 2 +- src/coreclr/jit/targetarm.h | 2 +- src/coreclr/jit/targetarm64.h | 2 +- src/coreclr/jit/targetloongarch64.h | 2 +- src/coreclr/jit/targetriscv64.h | 2 +- src/coreclr/jit/targetx86.h | 2 +- .../superpmi/superpmi-shared/standardpch.h | 1 + src/coreclr/vm/cgensys.h | 5 --- 35 files changed, 56 insertions(+), 90 deletions(-) diff --git a/src/coreclr/gc/env/common.h b/src/coreclr/gc/env/common.h index 6c612f672eab0..29216345db188 100644 --- a/src/coreclr/gc/env/common.h +++ b/src/coreclr/gc/env/common.h @@ -25,6 +25,8 @@ #include #include +#include +#include #ifdef TARGET_UNIX #include diff --git a/src/coreclr/gc/vxsort/defs.h b/src/coreclr/gc/vxsort/defs.h index 0cc72b23fa24e..13c02d97d4b15 100644 --- a/src/coreclr/gc/vxsort/defs.h +++ b/src/coreclr/gc/vxsort/defs.h @@ -45,45 +45,8 @@ #define NOINLINE __attribute__((noinline)) #endif -namespace std { -template -class numeric_limits { - public: - static constexpr _Ty Max() { static_assert(sizeof(_Ty) != sizeof(_Ty), "func must be specialized!"); return _Ty(); } - static constexpr _Ty Min() { static_assert(sizeof(_Ty) != sizeof(_Ty), "func must be specialized!"); return _Ty(); } -}; - -template <> -class numeric_limits { -public: - static constexpr int32_t Max() { return 0x7fffffff; } - static constexpr int32_t Min() { return -0x7fffffff - 1; } -}; - -template <> -class numeric_limits { -public: - static constexpr uint32_t Max() { return 0xffffffff; } - static constexpr uint32_t Min() { return 0; } -}; - -template <> -class numeric_limits { - public: - static constexpr int64_t Max() { return 0x7fffffffffffffffi64; } - - static constexpr int64_t Min() { return -0x7fffffffffffffffi64 - 1; } -}; -} // namespace std - -#ifndef max -template -T max(T a, T b) { - if (a > b) - return a; - else - return b; -} -#endif - +#undef max +#undef min +using std::max; +using std::min; #endif // VXSORT_DEFS_H diff --git a/src/coreclr/gc/vxsort/machine_traits.avx2.h b/src/coreclr/gc/vxsort/machine_traits.avx2.h index ccadc2a9a27a5..00830b0df192b 100644 --- a/src/coreclr/gc/vxsort/machine_traits.avx2.h +++ b/src/coreclr/gc/vxsort/machine_traits.avx2.h @@ -123,7 +123,7 @@ class vxsort_machine_traits { template static constexpr bool can_pack(T span) { - const auto PACK_LIMIT = (((TU) std::numeric_limits::Max() + 1)) << Shift; + const auto PACK_LIMIT = (((TU) std::numeric_limits::max() + 1)) << Shift; return ((TU) span) < PACK_LIMIT; } diff --git a/src/coreclr/gc/vxsort/machine_traits.avx512.h b/src/coreclr/gc/vxsort/machine_traits.avx512.h index 8df8660aa13a7..0fc0e8a4bc7d0 100644 --- a/src/coreclr/gc/vxsort/machine_traits.avx512.h +++ b/src/coreclr/gc/vxsort/machine_traits.avx512.h @@ -92,7 +92,7 @@ class vxsort_machine_traits { template static constexpr bool can_pack(T span) { - const auto PACK_LIMIT = (((TU) std::numeric_limits::Max() + 1)) << Shift; + const auto PACK_LIMIT = (((TU) std::numeric_limits::max() + 1)) << Shift; return ((TU) span) < PACK_LIMIT; } diff --git a/src/coreclr/gc/vxsort/packer.h b/src/coreclr/gc/vxsort/packer.h index be50b7d5fb41b..1707258f7ecab 100644 --- a/src/coreclr/gc/vxsort/packer.h +++ b/src/coreclr/gc/vxsort/packer.h @@ -56,7 +56,7 @@ class packer { public: static void pack(TFrom *mem, size_t len, TFrom base) { - TFrom offset = MT::template shift_n_sub(base, (TFrom) std::numeric_limits::Min()); + TFrom offset = MT::template shift_n_sub(base, (TFrom) std::numeric_limits::min()); auto baseVec = MT::broadcast(offset); auto pre_aligned_mem = reinterpret_cast(reinterpret_cast(mem) & ~ALIGN_MASK); @@ -156,7 +156,7 @@ class packer { static void unpack(TTo *mem, size_t len, TFrom base) { - TFrom offset = MT::template shift_n_sub(base, (TFrom) std::numeric_limits::Min()); + TFrom offset = MT::template shift_n_sub(base, (TFrom) std::numeric_limits::min()); auto baseVec = MT::broadcast(offset); auto mem_read = mem + len; diff --git a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h index c3f141c1046bb..c805a425fbeae 100644 --- a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h +++ b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h @@ -39,7 +39,7 @@ extern "C" const uint8_t mask_table_8[M8_SIZE]; template<> struct bitonic { static const int N = 8; - static constexpr int32_t MAX = std::numeric_limits::Max(); + static constexpr int32_t MAX = std::numeric_limits::max(); public: static INLINE void sort_01v_ascending(__m256i& d01) { diff --git a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h index a012161c99dd9..c3403bbe31aaa 100644 --- a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h +++ b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h @@ -39,7 +39,7 @@ extern "C" const uint8_t mask_table_8[M8_SIZE]; template<> struct bitonic { static const int N = 4; - static constexpr int64_t MAX = std::numeric_limits::Max(); + static constexpr int64_t MAX = std::numeric_limits::max(); public: static INLINE void sort_01v_ascending(__m256i& d01) { diff --git a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h index 1326c8fee5e5c..eb9ee4d275926 100644 --- a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h +++ b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h @@ -36,7 +36,7 @@ namespace vxsort { namespace smallsort { template<> struct bitonic { static const int N = 16; - static constexpr int32_t MAX = std::numeric_limits::Max(); + static constexpr int32_t MAX = std::numeric_limits::max(); public: static INLINE void sort_01v_ascending(__m512i& d01) { diff --git a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h index ac44992fe2392..98fe507b73430 100644 --- a/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h +++ b/src/coreclr/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h @@ -36,7 +36,7 @@ namespace vxsort { namespace smallsort { template<> struct bitonic { static const int N = 8; - static constexpr int64_t MAX = std::numeric_limits::Max(); + static constexpr int64_t MAX = std::numeric_limits::max(); public: static INLINE void sort_01v_ascending(__m512i& d01) { diff --git a/src/coreclr/gc/vxsort/smallsort/codegen/avx2.py b/src/coreclr/gc/vxsort/smallsort/codegen/avx2.py index 9944cbbc8968e..b9c39770d549c 100644 --- a/src/coreclr/gc/vxsort/smallsort/codegen/avx2.py +++ b/src/coreclr/gc/vxsort/smallsort/codegen/avx2.py @@ -303,7 +303,7 @@ def generate_prologue(self, f): template<> struct bitonic<{t}, AVX2> {{ static const int N = {self.vector_size()}; - static constexpr {t} MAX = std::numeric_limits<{t}>::Max(); + static constexpr {t} MAX = std::numeric_limits<{t}>::max(); public: """ print(s, file=f) diff --git a/src/coreclr/gc/vxsort/smallsort/codegen/avx512.py b/src/coreclr/gc/vxsort/smallsort/codegen/avx512.py index e259027c5636b..9b417723c6e3b 100644 --- a/src/coreclr/gc/vxsort/smallsort/codegen/avx512.py +++ b/src/coreclr/gc/vxsort/smallsort/codegen/avx512.py @@ -299,7 +299,7 @@ def generate_prologue(self, f): namespace smallsort {{ template<> struct bitonic<{t}, AVX512> {{ static const int N = {self.vector_size()}; - static constexpr {t} MAX = std::numeric_limits<{t}>::Max(); + static constexpr {t} MAX = std::numeric_limits<{t}>::max(); public: """ print(s, file=f) diff --git a/src/coreclr/gc/vxsort/vxsort.h b/src/coreclr/gc/vxsort/vxsort.h index b8eaac51f4213..79839fb8b2c06 100644 --- a/src/coreclr/gc/vxsort/vxsort.h +++ b/src/coreclr/gc/vxsort/vxsort.h @@ -374,7 +374,7 @@ class vxsort { auto pivot = *right; // We do this here just in case we need to pre-align to the right // We end up - *right = std::numeric_limits::Max(); + *right = std::numeric_limits::max(); // Broadcast the selected pivot const TV P = MT::broadcast(pivot); @@ -617,8 +617,8 @@ class vxsort { * larger-than than all values contained within the provided array. */ NOINLINE void sort(T* left, T* right, - T left_hint = std::numeric_limits::Min(), - T right_hint = std::numeric_limits::Max()) + T left_hint = std::numeric_limits::min(), + T right_hint = std::numeric_limits::max()) { // init_isa_detection(); diff --git a/src/coreclr/ildasm/ildasmpch.h b/src/coreclr/ildasm/ildasmpch.h index 0717099eb3cc5..219e27f0ba182 100644 --- a/src/coreclr/ildasm/ildasmpch.h +++ b/src/coreclr/ildasm/ildasmpch.h @@ -6,6 +6,7 @@ #define OEMRESOURCE #define INITGUID +#define NOMINMAX #include #include @@ -14,6 +15,9 @@ #include #include +using std::min; +using std::max; + #ifndef Debug_ReportError #define Debug_ReportError(strMessage) #endif diff --git a/src/coreclr/inc/contract.inl b/src/coreclr/inc/contract.inl index d614f84e74f2a..1578e87161df4 100644 --- a/src/coreclr/inc/contract.inl +++ b/src/coreclr/inc/contract.inl @@ -352,7 +352,7 @@ inline void DbgStateLockData::LockTaken(DbgStateLockType dbgStateLockType, // Remember as many of these new entrances in m_rgTakenLockInfos as we can for (UINT i = cCombinedLocks; - i < min (ARRAY_SIZE(m_rgTakenLockInfos), cCombinedLocks + cTakes); + i < min (ARRAY_SIZE(m_rgTakenLockInfos), (size_t)(cCombinedLocks + cTakes)); i++) { m_rgTakenLockInfos[i].m_pvLock = pvLock; @@ -377,7 +377,7 @@ inline void DbgStateLockData::LockReleased(DbgStateLockType dbgStateLockType, UI // If lock count is within range of our m_rgTakenLockInfos buffer size, then // make sure we're releasing locks in reverse order of how we took them for (UINT i = cCombinedLocks - cReleases; - i < min (ARRAY_SIZE(m_rgTakenLockInfos), cCombinedLocks); + i < min (ARRAY_SIZE(m_rgTakenLockInfos), (size_t)cCombinedLocks); i++) { if (m_rgTakenLockInfos[i].m_pvLock != pvLock) @@ -443,7 +443,7 @@ inline BOOL DbgStateLockState::IsLockRetaken(void * pvLock) // m_cLocksEnteringCannotRetakeLock records the number of locks that were taken // when CANNOT_RETAKE_LOCK contract was constructed. for (UINT i = 0; - i < min(ARRAY_SIZE(m_pLockData->m_rgTakenLockInfos), m_cLocksEnteringCannotRetakeLock); + i < min(ARRAY_SIZE(m_pLockData->m_rgTakenLockInfos), (size_t)m_cLocksEnteringCannotRetakeLock); ++i) { if (m_pLockData->m_rgTakenLockInfos[i].m_pvLock == pvLock) diff --git a/src/coreclr/inc/gcinfotypes.h b/src/coreclr/inc/gcinfotypes.h index 9270d4441d7e7..7457063d47eb3 100644 --- a/src/coreclr/inc/gcinfotypes.h +++ b/src/coreclr/inc/gcinfotypes.h @@ -608,7 +608,7 @@ void FASTCALL decodeCallPattern(int pattern, #if defined(TARGET_AMD64) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -664,7 +664,7 @@ void FASTCALL decodeCallPattern(int pattern, #elif defined(TARGET_ARM) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 4u // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -722,7 +722,7 @@ void FASTCALL decodeCallPattern(int pattern, #elif defined(TARGET_ARM64) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -777,7 +777,7 @@ void FASTCALL decodeCallPattern(int pattern, #elif defined(TARGET_LOONGARCH64) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -832,7 +832,7 @@ void FASTCALL decodeCallPattern(int pattern, #elif defined(TARGET_RISCV64) #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) @@ -896,7 +896,7 @@ PORTABILITY_WARNING("Please specialize these definitions for your platform!") #endif #ifndef TARGET_POINTER_SIZE -#define TARGET_POINTER_SIZE 4u // equal to sizeof(void*) and the managed pointer size in bytes for this target +#define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target #endif #define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) #define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index fb317faea42c1..103ec639205e7 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -673,7 +673,7 @@ void Compiler::optAssertionInit(bool isLocalProp) // Local assertion prop keeps mappings from each local var to the assertions about that var. // optAssertionDep = - new (this, CMK_AssertionProp) JitExpandArray(getAllocator(CMK_AssertionProp), max(1, lvaCount)); + new (this, CMK_AssertionProp) JitExpandArray(getAllocator(CMK_AssertionProp), max(1u, lvaCount)); if (optCrossBlockLocalAssertionProp) { diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index c351419687fbf..42a5324aba3d0 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5508,7 +5508,7 @@ void Compiler::SplitTreesRandomly() rng.Init(info.compMethodHash() ^ 0x077cc4d4); // Splitting creates a lot of new locals. Set a limit on how many we end up creating here. - unsigned maxLvaCount = max(lvaCount * 2, 50000); + unsigned maxLvaCount = max(lvaCount * 2, 50000u); for (BasicBlock* block : Blocks()) { @@ -5570,7 +5570,7 @@ void Compiler::SplitTreesRandomly() void Compiler::SplitTreesRemoveCommas() { // Splitting creates a lot of new locals. Set a limit on how many we end up creating here. - unsigned maxLvaCount = max(lvaCount * 2, 50000); + unsigned maxLvaCount = max(lvaCount * 2, 50000u); for (BasicBlock* block : Blocks()) { @@ -7366,7 +7366,7 @@ void Compiler::compInitVarScopeMap() compVarScopeMap = new (getAllocator()) VarNumToScopeDscMap(getAllocator()); // 599 prime to limit huge allocations; for ex: duplicated scopes on single var. - compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599)); + compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599u)); for (unsigned i = 0; i < info.compVarScopesCount; ++i) { diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index ab93a8044bb54..a328702e2469f 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -345,7 +345,7 @@ class SsaDefArray void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; - unsigned newSize = max(2, oldSize * 2); + unsigned newSize = max(2u, oldSize * 2); T* newArray = alloc.allocate(newSize); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 59e53baeb19f8..6482ba2d729ff 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -2618,7 +2618,7 @@ inline #else int outGoingArgSpaceSize = 0; #endif - varOffset = outGoingArgSpaceSize + max(-varNum * TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize()); + varOffset = outGoingArgSpaceSize + max(-varNum * (int)TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize()); } else { diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index d3ac84e7919a1..3f578e4466bd4 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -6243,10 +6243,10 @@ void emitter::emitLoopAlignAdjustments() { #if defined(TARGET_XARCH) - unsigned newPadding = min(paddingToAdj, MAX_ENCODED_SIZE); + unsigned newPadding = min(paddingToAdj, (unsigned)MAX_ENCODED_SIZE); alignInstrToAdj->idCodeSize(newPadding); #elif defined(TARGET_ARM64) - unsigned newPadding = min(paddingToAdj, INSTR_ENCODED_SIZE); + unsigned newPadding = min(paddingToAdj, (unsigned)INSTR_ENCODED_SIZE); if (newPadding == 0) { alignInstrToAdj->idInsOpt(INS_OPTS_NONE); diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 65248487fb118..be92b4c26bbb0 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -1951,7 +1951,7 @@ void Compiler::fgTableDispBasicBlock(const BasicBlock* block, else { _snprintf_s(buffer, ArrLen(buffer), ArrLen(buffer), FMT_BB, b->bbNum); - printedBlockWidth += 2 /* BB */ + max(CountDigits(b->bbNum), 2); + printedBlockWidth += 2 /* BB */ + max(CountDigits(b->bbNum), 2u); } return buffer; diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index ad68efef307b7..bd07f23a3f260 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -26953,7 +26953,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, assert(varTypeIsValidHfaType(hfaType)); // Note that the retail build issues a warning about a potential divsion by zero without this "max", - unsigned elemSize = max(1, genTypeSize(hfaType)); + unsigned elemSize = max(1u, genTypeSize(hfaType)); // The size of this struct should be evenly divisible by elemSize assert((structSize % elemSize) == 0); diff --git a/src/coreclr/jit/hashbv.cpp b/src/coreclr/jit/hashbv.cpp index 854215235261d..6a8667af1d754 100644 --- a/src/coreclr/jit/hashbv.cpp +++ b/src/coreclr/jit/hashbv.cpp @@ -824,7 +824,7 @@ void hashBv::setAll(indexType numToSet) for (unsigned int i = 0; i < numToSet; i += BITS_PER_NODE) { hashBvNode* node = getOrAddNodeForIndex(i); - indexType bits_to_set = min(BITS_PER_NODE, numToSet - i); + indexType bits_to_set = min((unsigned int)BITS_PER_NODE, numToSet - i); node->setLowest(bits_to_set); } } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index a4eacd9069db4..cbeaddec38714 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -1554,7 +1554,7 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum) // Double the table size. For stress, we could use +1. Note that if the table isn't allocated // yet, such as when we add an EH region for synchronized methods that don't already have one, // we start at zero, so we need to make sure the new table has at least one entry. - unsigned newHndBBtabAllocCount = max(1, compHndBBtabAllocCount * 2); + unsigned newHndBBtabAllocCount = max(1u, compHndBBtabAllocCount * 2); noway_assert(compHndBBtabAllocCount < newHndBBtabAllocCount); // check for overflow if (newHndBBtabAllocCount > MAX_XCPTN_INDEX) diff --git a/src/coreclr/jit/jitpch.h b/src/coreclr/jit/jitpch.h index 63f12133f61bf..0717dba8a7260 100644 --- a/src/coreclr/jit/jitpch.h +++ b/src/coreclr/jit/jitpch.h @@ -1,6 +1,7 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +#define NOMINMAX #include #include #include diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 55787580cc1df..0d7bf563291e6 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -1747,7 +1747,7 @@ bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE #if defined(FEATURE_SIMD) // getMaxVectorByteLength() represents the size of the largest primitive type that we can struct promote. const unsigned maxSize = - MAX_NumOfFieldsInPromotableStruct * max(compiler->getMaxVectorByteLength(), sizeof(double)); + MAX_NumOfFieldsInPromotableStruct * max(compiler->getMaxVectorByteLength(), (uint32_t)sizeof(double)); #else // !FEATURE_SIMD // sizeof(double) represents the size of the largest primitive type that we can struct promote. const unsigned maxSize = MAX_NumOfFieldsInPromotableStruct * sizeof(double); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 0c44bbad973ea..a58858f5dba63 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -2956,7 +2956,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument - intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG); + intArgRegNum = min(intArgRegNum + size, (unsigned)MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); @@ -2967,7 +2967,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call intArgRegNum += size; #ifdef WINDOWS_AMD64_ABI - fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG); + fltArgRegNum = min(fltArgRegNum + size, (unsigned)MAX_FLOAT_REG_ARG); #endif // WINDOWS_AMD64_ABI } } @@ -3041,7 +3041,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call unsigned CallArgs::OutgoingArgsStackSize() const { unsigned aligned = Compiler::GetOutgoingArgByteSize(m_nextStackByteOffset); - return max(aligned, MIN_ARG_AREA_FOR_CALL); + return max(aligned, (unsigned)MIN_ARG_AREA_FOR_CALL); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h index a6e8606b9500d..4abe71984b57c 100644 --- a/src/coreclr/jit/targetamd64.h +++ b/src/coreclr/jit/targetamd64.h @@ -66,7 +66,7 @@ #define NOGC_WRITE_BARRIERS 0 // We DO-NOT have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 - #define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #ifdef UNIX_AMD64_ABI diff --git a/src/coreclr/jit/targetarm.h b/src/coreclr/jit/targetarm.h index b07523db62d36..ac9d72cab31f6 100644 --- a/src/coreclr/jit/targetarm.h +++ b/src/coreclr/jit/targetarm.h @@ -38,7 +38,7 @@ #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. - #define TARGET_POINTER_SIZE 4u // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods diff --git a/src/coreclr/jit/targetarm64.h b/src/coreclr/jit/targetarm64.h index 7ed7e66421be7..3646ecb4407bf 100644 --- a/src/coreclr/jit/targetarm64.h +++ b/src/coreclr/jit/targetarm64.h @@ -40,7 +40,7 @@ #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. - #define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods diff --git a/src/coreclr/jit/targetloongarch64.h b/src/coreclr/jit/targetloongarch64.h index fa2a7fc93dae5..736fd1406c304 100644 --- a/src/coreclr/jit/targetloongarch64.h +++ b/src/coreclr/jit/targetloongarch64.h @@ -45,7 +45,7 @@ #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. - #define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_FUNCLETS 1 #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. diff --git a/src/coreclr/jit/targetriscv64.h b/src/coreclr/jit/targetriscv64.h index f3d4af7b13bb6..9cf0185a56935 100644 --- a/src/coreclr/jit/targetriscv64.h +++ b/src/coreclr/jit/targetriscv64.h @@ -42,7 +42,7 @@ #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. - #define TARGET_POINTER_SIZE 8u // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods diff --git a/src/coreclr/jit/targetx86.h b/src/coreclr/jit/targetx86.h index b7add06df05bc..60b2f7793f435 100644 --- a/src/coreclr/jit/targetx86.h +++ b/src/coreclr/jit/targetx86.h @@ -49,7 +49,7 @@ #endif #define USER_ARGS_COME_LAST 0 #define EMIT_TRACK_STACK_DEPTH 1 - #define TARGET_POINTER_SIZE 4u // equal to sizeof(void*) and the managed pointer size in bytes for this + #define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this // target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, // filter-handler, fault) and directly execute 'finally' clauses. diff --git a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h index f9e4bb561f0a8..4b23750b99431 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h @@ -12,6 +12,7 @@ #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif // WIN32_LEAN_AND_MEAN +#define NOMINMAX #include #ifdef INTERNAL_BUILD diff --git a/src/coreclr/vm/cgensys.h b/src/coreclr/vm/cgensys.h index 98b1cbc94781d..a3accc91a9973 100644 --- a/src/coreclr/vm/cgensys.h +++ b/src/coreclr/vm/cgensys.h @@ -84,11 +84,6 @@ BOOL GetAnyThunkTarget (T_CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDes #endif // DACCESS_COMPILE -#if defined(TARGET_AMD64) -extern "C" DWORD _mm_getcsr(); -extern "C" void _mm_setcsr(DWORD); -#endif - // // ResetProcessorStateHolder saves/restores processor state around calls to // CoreLib during exception handling. From 108f83d0530149dcb03622239634a680bc7e6a7b Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 13 Feb 2024 01:24:00 +0000 Subject: [PATCH 13/60] Remove min/max macros from NativeAOT and fix other build failures on non-Windows. --- src/coreclr/gc/gc.cpp | 4 +--- src/coreclr/nativeaot/Runtime/CommonMacros.h | 10 ---------- src/coreclr/vm/methodtable.cpp | 2 +- src/coreclr/vm/methodtablebuilder.cpp | 6 +++--- 4 files changed, 5 insertions(+), 17 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 279b5c57efbc0..9931d537cf1bb 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -51,9 +51,7 @@ class gc_rand uint64_t gc_rand::x = 0; -// NativeAOT defines max/min as macros. -// CoreCLR does not. -// Define them if they aren't already available. +// Define min/max as macros #ifndef min #define min(_a, _b) ((_a) < (_b) ? (_a) : (_b)) #endif diff --git a/src/coreclr/nativeaot/Runtime/CommonMacros.h b/src/coreclr/nativeaot/Runtime/CommonMacros.h index 9c762216dd7f1..e698b81389e96 100644 --- a/src/coreclr/nativeaot/Runtime/CommonMacros.h +++ b/src/coreclr/nativeaot/Runtime/CommonMacros.h @@ -89,16 +89,6 @@ inline bool IS_ALIGNED(T* val, uintptr_t alignment); #define ZeroMemory(_dst, _size) memset((_dst), 0, (_size)) #endif -//------------------------------------------------------------------------------------------------- -// min/max - -#ifndef min -#define min(_a, _b) ((_a) < (_b) ? (_a) : (_b)) -#endif -#ifndef max -#define max(_a, _b) ((_a) < (_b) ? (_b) : (_a)) -#endif - #endif // !DACCESS_COMPILE //------------------------------------------------------------------------------------------------- diff --git a/src/coreclr/vm/methodtable.cpp b/src/coreclr/vm/methodtable.cpp index 41307b3d1a8f2..315f0e2b9f51b 100644 --- a/src/coreclr/vm/methodtable.cpp +++ b/src/coreclr/vm/methodtable.cpp @@ -9229,7 +9229,7 @@ int MethodTable::GetFieldAlignmentRequirement() { return GetClass()->GetOverriddenFieldAlignmentRequirement(); } - return min(GetNumInstanceFieldBytes(), TARGET_POINTER_SIZE); + return min((int)GetNumInstanceFieldBytes(), TARGET_POINTER_SIZE); } UINT32 MethodTable::GetNativeSize() diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp index 02c61ca66800f..56d2c2811a85d 100644 --- a/src/coreclr/vm/methodtablebuilder.cpp +++ b/src/coreclr/vm/methodtablebuilder.cpp @@ -8356,7 +8356,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach // this field type has GC pointers in it, which need to be pointer-size aligned // so do this if it has not been done already dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, TARGET_POINTER_SIZE); - largestAlignmentRequirement = max(largestAlignmentRequirement, TARGET_POINTER_SIZE); + largestAlignmentRequirement = max(largestAlignmentRequirement, (unsigned int)TARGET_POINTER_SIZE); containsGCPointers = true; } else @@ -8385,7 +8385,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach // non-value-type fields always require pointer alignment // This does not account for types that are marked IsAlign8Candidate due to 8-byte fields // but that is explicitly handled when we calculate the final alignment for the type. - largestAlignmentRequirement = max(largestAlignmentRequirement, TARGET_POINTER_SIZE); + largestAlignmentRequirement = max(largestAlignmentRequirement, (unsigned int)TARGET_POINTER_SIZE); if (!pFieldDescList[i].IsObjRef()) { @@ -8427,7 +8427,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach minAlign *= 2; } - if (minAlign != min(dwNumInstanceFieldBytes, TARGET_POINTER_SIZE)) + if (minAlign != min(dwNumInstanceFieldBytes, (unsigned int)TARGET_POINTER_SIZE)) { EnsureOptionalFieldsAreAllocated(GetHalfBakedClass(), m_pAllocMemTracker, GetLoaderAllocator()->GetLowFrequencyHeap()); GetHalfBakedClass()->GetOptionalFields()->m_requiredFieldAlignment = (BYTE)minAlign; From 16a04e086bf080f1c533dbf5cb187e9efef17abd Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 13 Feb 2024 13:44:57 -0800 Subject: [PATCH 14/60] Remove duplicate definitions, fix callconv build failure. Disable warning on GCC that catches more cases than Clang, at least for now --- eng/native/configurecompiler.cmake | 1 - src/coreclr/pal/inc/pal.h | 10 +--------- src/coreclr/utilcode/clrconfig.cpp | 2 +- 3 files changed, 2 insertions(+), 11 deletions(-) diff --git a/eng/native/configurecompiler.cmake b/eng/native/configurecompiler.cmake index 3779adfc0b258..4d23294367b7d 100644 --- a/eng/native/configurecompiler.cmake +++ b/eng/native/configurecompiler.cmake @@ -29,7 +29,6 @@ if (CLR_CMAKE_HOST_UNIX) add_compile_options(-Wno-null-arithmetic) add_compile_options(-glldb) else() - add_compile_options($<$:-Werror=conversion-null>) add_compile_options(-g) endif() endif() diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index bd83ffc0eea3d..4fdb01e03b10d 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3882,21 +3882,13 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); /* For backwards compatibility */ #define _WConst_return _CONST_RETURN -/* Locale categories */ -#define LC_ALL 0 -#define LC_COLLATE 1 -#define LC_CTYPE 2 -#define LC_MONETARY 3 -#define LC_NUMERIC 4 -#define LC_TIME 5 - /* _TRUNCATE */ #if !defined(_TRUNCATE) #define _TRUNCATE ((size_t)-1) #endif // errno_t is only defined when the Secure CRT Extensions library is available (which no standard library that we build with implements anyway) -typedef error_t errno_t; +typedef int errno_t; PALIMPORT DLLEXPORT errno_t __cdecl memcpy_s(void *, size_t, const void *, size_t) THROW_DECL; PALIMPORT errno_t __cdecl memmove_s(void *, size_t, const void *, size_t); diff --git a/src/coreclr/utilcode/clrconfig.cpp b/src/coreclr/utilcode/clrconfig.cpp index dff8a9b704ed1..b531018eb08ae 100644 --- a/src/coreclr/utilcode/clrconfig.cpp +++ b/src/coreclr/utilcode/clrconfig.cpp @@ -202,7 +202,7 @@ namespace SString nameToConvert(name); #ifdef HOST_WINDOWS - CLRConfigNoCache nonCache = CLRConfigNoCache::Get(nameToConvert.GetUTF8(), noPrefix, &getenv); + CLRConfigNoCache nonCache = CLRConfigNoCache::Get(nameToConvert.GetUTF8(), noPrefix); #else CLRConfigNoCache nonCache = CLRConfigNoCache::Get(nameToConvert.GetUTF8(), noPrefix, &PAL_getenv); #endif From d8afe1d4ffc434b856647075c59bc9f0c6adf024 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 16 Feb 2024 23:57:03 +0000 Subject: [PATCH 15/60] Force C++ linkage when including the C++ standard headers (some files in the build include the PAL headers in an extern C block) --- src/coreclr/pal/inc/pal.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 4fdb01e03b10d..6dbcbe04cb7b8 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -51,7 +51,12 @@ Module Name: #include #ifdef __cplusplus +extern "C++" +{ + #include + +} #endif #ifdef __cplusplus From e8c8846d3a85caca8138f37b4bc716a380272277 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 16 Feb 2024 23:57:09 +0000 Subject: [PATCH 16/60] Add missing includes/usings. --- src/coreclr/pal/src/thread/process.cpp | 1 + src/coreclr/vm/jithelpers.cpp | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/coreclr/pal/src/thread/process.cpp b/src/coreclr/pal/src/thread/process.cpp index 901a91961d486..69121faa2b75b 100644 --- a/src/coreclr/pal/src/thread/process.cpp +++ b/src/coreclr/pal/src/thread/process.cpp @@ -85,6 +85,7 @@ SET_DEFAULT_DEBUG_CHANNEL(PROCESS); // some headers have code with asserts, so d #ifdef __APPLE__ #include +#include #include #include #include diff --git a/src/coreclr/vm/jithelpers.cpp b/src/coreclr/vm/jithelpers.cpp index 1cbe520a48de9..23930301cb7ef 100644 --- a/src/coreclr/vm/jithelpers.cpp +++ b/src/coreclr/vm/jithelpers.cpp @@ -58,6 +58,9 @@ #include "exinfo.h" +using std::isfinite; +using std::isnan; + //======================================================================== // // This file contains implementation of all JIT helpers. The helpers are From ce1f3e37b947da9437b535d30eddbe44455fa691 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 20 Feb 2024 18:56:16 +0000 Subject: [PATCH 17/60] Fix some build errors --- src/coreclr/inc/check.h | 2 +- src/coreclr/jit/jitpch.h | 1 + src/coreclr/jit/lower.cpp | 4 ++-- src/coreclr/vm/methodtablebuilder.cpp | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/coreclr/inc/check.h b/src/coreclr/inc/check.h index c1ac08016d836..8903d92fce55c 100644 --- a/src/coreclr/inc/check.h +++ b/src/coreclr/inc/check.h @@ -111,7 +111,7 @@ class CHECK #ifdef _DEBUG , m_condition (NULL) , m_file(NULL) - , m_line(NULL) + , m_line(0) , m_pCount(NULL) #endif {} diff --git a/src/coreclr/jit/jitpch.h b/src/coreclr/jit/jitpch.h index 0717dba8a7260..af86bdd6d8c9c 100644 --- a/src/coreclr/jit/jitpch.h +++ b/src/coreclr/jit/jitpch.h @@ -12,6 +12,7 @@ #include #include #include +#include #include // Don't allow using the windows.h #defines for the BitScan* APIs. Using the #defines means our diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 591db3a78a22c..0f120ada4a847 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -6795,7 +6795,7 @@ bool Lowering::TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode) } size_t absDivisorValue = - (divisorValue == SSIZE_T_MIN) ? static_cast(divisorValue) : static_cast(abs(divisorValue)); + (divisorValue == SSIZE_T_MIN) ? static_cast(divisorValue) : static_cast(std::abs(divisorValue)); if (!isPow2(absDivisorValue)) { @@ -8457,7 +8457,7 @@ bool Lowering::OptimizeForLdp(GenTreeIndir* ind) JITDUMP("[%06u] and [%06u] are indirs off the same base with offsets +%03u and +%03u\n", Compiler::dspTreeID(ind), Compiler::dspTreeID(prevIndir), (unsigned)offs, (unsigned)prev.Offset); - if (abs(offs - prev.Offset) == genTypeSize(ind)) + if (std::abs(offs - prev.Offset) == genTypeSize(ind)) { JITDUMP(" ..and they are amenable to ldp optimization\n"); if (TryMakeIndirsAdjacent(prevIndir, ind)) diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp index 56d2c2811a85d..eb2cf5b5187ca 100644 --- a/src/coreclr/vm/methodtablebuilder.cpp +++ b/src/coreclr/vm/methodtablebuilder.cpp @@ -8340,14 +8340,14 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach if (pByValueMT->GetNumInstanceFieldBytes() >= DATA_ALIGNMENT) { dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, DATA_ALIGNMENT); - largestAlignmentRequirement = max(largestAlignmentRequirement, DATA_ALIGNMENT); + largestAlignmentRequirement = max(largestAlignmentRequirement, (unsigned int)DATA_ALIGNMENT); } else #elif defined(FEATURE_64BIT_ALIGNMENT) if (pByValueMT->RequiresAlign8()) { dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, 8); - largestAlignmentRequirement = max(largestAlignmentRequirement, 8); + largestAlignmentRequirement = max(largestAlignmentRequirement, 8u); } else #endif // FEATURE_64BIT_ALIGNMENT From c7e6dc376468ef6aedcbcd78c566bf496acaff61 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 20 Feb 2024 19:19:26 +0000 Subject: [PATCH 18/60] Remove malloc wrapper and simplify InternalNew --- src/coreclr/pal/src/debug/debug.cpp | 2 +- src/coreclr/pal/src/file/file.cpp | 2 +- src/coreclr/pal/src/file/find.cpp | 2 +- src/coreclr/pal/src/handlemgr/handlemgr.cpp | 2 +- src/coreclr/pal/src/include/pal/malloc.hpp | 76 ++----------------- src/coreclr/pal/src/init/pal.cpp | 6 +- src/coreclr/pal/src/loader/module.cpp | 6 +- src/coreclr/pal/src/map/map.cpp | 8 +- src/coreclr/pal/src/map/virtual.cpp | 2 +- src/coreclr/pal/src/misc/strutil.cpp | 2 +- src/coreclr/pal/src/objmgr/palobjbase.cpp | 4 +- src/coreclr/pal/src/objmgr/shmobject.cpp | 2 +- .../pal/src/sharedmemory/sharedmemory.cpp | 2 +- src/coreclr/pal/src/synchmgr/synchmanager.hpp | 2 +- src/coreclr/pal/src/thread/process.cpp | 14 ++-- 15 files changed, 35 insertions(+), 97 deletions(-) diff --git a/src/coreclr/pal/src/debug/debug.cpp b/src/coreclr/pal/src/debug/debug.cpp index 0e56ccceec7a1..b38810864a587 100644 --- a/src/coreclr/pal/src/debug/debug.cpp +++ b/src/coreclr/pal/src/debug/debug.cpp @@ -204,7 +204,7 @@ OutputDebugStringW( } /* strLen includes the null terminator */ - if ((lpOutputStringA = (LPSTR) InternalMalloc((strLen * sizeof(CHAR)))) == NULL) + if ((lpOutputStringA = (LPSTR) malloc((strLen * sizeof(CHAR)))) == NULL) { ERROR("Insufficient memory available !\n"); SetLastError(ERROR_NOT_ENOUGH_MEMORY); diff --git a/src/coreclr/pal/src/file/file.cpp b/src/coreclr/pal/src/file/file.cpp index 8139f87c5d861..1712be93f698f 100644 --- a/src/coreclr/pal/src/file/file.cpp +++ b/src/coreclr/pal/src/file/file.cpp @@ -2881,7 +2881,7 @@ GetTempFileNameW( prefix_stringPS.CloseBuffer(prefix_size - 1); } - tempfile_name = (char*)InternalMalloc(MAX_LONGPATH); + tempfile_name = (char*)malloc(MAX_LONGPATH); if (tempfile_name == NULL) { pThread->SetLastError(ERROR_NOT_ENOUGH_MEMORY); diff --git a/src/coreclr/pal/src/file/find.cpp b/src/coreclr/pal/src/file/find.cpp index b874885992f8c..ead5c4335e76e 100644 --- a/src/coreclr/pal/src/file/find.cpp +++ b/src/coreclr/pal/src/file/find.cpp @@ -138,7 +138,7 @@ FindFirstFileA( goto done; } - find_data = (find_obj *)InternalMalloc(sizeof(find_obj)); + find_data = (find_obj *)malloc(sizeof(find_obj)); if ( find_data == NULL ) { ERROR("Unable to allocate memory for find_data\n"); diff --git a/src/coreclr/pal/src/handlemgr/handlemgr.cpp b/src/coreclr/pal/src/handlemgr/handlemgr.cpp index df4841ad472db..09405f1ec514b 100644 --- a/src/coreclr/pal/src/handlemgr/handlemgr.cpp +++ b/src/coreclr/pal/src/handlemgr/handlemgr.cpp @@ -51,7 +51,7 @@ CSimpleHandleManager::Initialize( field, with the head in the global 'm_hiFreeListStart'. */ m_dwTableSize = m_dwTableGrowthRate; - m_rghteHandleTable = reinterpret_cast(InternalMalloc((m_dwTableSize * sizeof(HANDLE_TABLE_ENTRY)))); + m_rghteHandleTable = reinterpret_cast(malloc((m_dwTableSize * sizeof(HANDLE_TABLE_ENTRY)))); if(NULL == m_rghteHandleTable) { ERROR("Unable to create initial handle table array"); diff --git a/src/coreclr/pal/src/include/pal/malloc.hpp b/src/coreclr/pal/src/include/pal/malloc.hpp index b62e8fbbc260f..65715fa9387ae 100644 --- a/src/coreclr/pal/src/include/pal/malloc.hpp +++ b/src/coreclr/pal/src/include/pal/malloc.hpp @@ -27,78 +27,16 @@ Module Name: #include namespace CorUnix{ - inline void * - InternalMalloc( - size_t szSize - ) + // Define "new" style allocators (which allocate then call a constructor). + template + T* InternalNew(Ts... args) { - void *pvMem; + T* pMem = (T*)malloc(sizeof(T)); - if (szSize == 0) - { - // malloc may return null for a requested size of zero bytes. Force a nonzero size to get a valid pointer. - szSize = 1; - } - - pvMem = (void*)malloc(szSize); - return pvMem; - } - - // Define common code for "new" style allocators below. -#define INTERNAL_NEW_COMMON() \ - T *pMem = (T*)InternalMalloc(sizeof(T)); \ - if (pMem == NULL) \ + if (pMem == NULL) return NULL; - // Define "new" style allocators (which allocate then call a constructor) for different numbers of - // constructor arguments. Added based on usage. - - // Default constructor (0 args) case. - template - T* InternalNew() - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(); - } - - // 1 arg case. - template - T* InternalNew(A1 arg1) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1); - } - - // 2 args case. - template - T* InternalNew(A1 arg1, A2 arg2) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1, arg2); - } - - // 3 args case. - template - T* InternalNew(A1 arg1, A2 arg2, A3 arg3) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1, arg2, arg3); - } - - // 4 args case. - template - T* InternalNew(A1 arg1, A2 arg2, A3 arg3, A4 arg4) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1, arg2, arg3, arg4); - } - - // 5 args case. - template - T* InternalNew(A1 arg1, A2 arg2, A3 arg3, A4 arg4, A5 arg5) - { - INTERNAL_NEW_COMMON(); - return new (pMem) T(arg1, arg2, arg3, arg4, arg5); + return new (pMem) T(args...); } template T* InternalNewArray(size_t cElements) @@ -106,7 +44,7 @@ namespace CorUnix{ size_t cbSize = (cElements * sizeof(T)) + sizeof(size_t); T *pMem; - pMem = (T*)InternalMalloc(cbSize); + pMem = (T*)malloc(cbSize); if (pMem == NULL) return NULL; diff --git a/src/coreclr/pal/src/init/pal.cpp b/src/coreclr/pal/src/init/pal.cpp index 9d0c82ac4ae53..67fcbb92bd251 100644 --- a/src/coreclr/pal/src/init/pal.cpp +++ b/src/coreclr/pal/src/init/pal.cpp @@ -1170,7 +1170,7 @@ static LPWSTR INIT_FormatCommandLine (int argc, const char * const *argv) length+=3; length+=strlen(argv[i])*2; } - command_line = reinterpret_cast(InternalMalloc(length)); + command_line = reinterpret_cast(malloc(length != 0 ? length : 1)); if(!command_line) { @@ -1222,7 +1222,7 @@ static LPWSTR INIT_FormatCommandLine (int argc, const char * const *argv) return nullptr; } - retval = reinterpret_cast(InternalMalloc((sizeof(WCHAR)*i))); + retval = reinterpret_cast(malloc((sizeof(WCHAR)*i))); if(retval == nullptr) { ERROR("can't allocate memory for Unicode command line!\n"); @@ -1278,7 +1278,7 @@ static LPWSTR INIT_GetCurrentEXEPath() return nullptr; } - return_value = reinterpret_cast(InternalMalloc((return_size*sizeof(WCHAR)))); + return_value = reinterpret_cast(malloc((return_size*sizeof(WCHAR)))); if (nullptr == return_value) { ERROR("Not enough memory to create full path\n"); diff --git a/src/coreclr/pal/src/loader/module.cpp b/src/coreclr/pal/src/loader/module.cpp index f0651d3bad586..0cda5045e01ef 100644 --- a/src/coreclr/pal/src/loader/module.cpp +++ b/src/coreclr/pal/src/loader/module.cpp @@ -1548,7 +1548,7 @@ static MODSTRUCT *LOADAllocModule(NATIVE_LIBRARY_HANDLE dl_handle, LPCSTR name) LPWSTR wide_name; /* no match found : try to create a new module structure */ - module = (MODSTRUCT *)InternalMalloc(sizeof(MODSTRUCT)); + module = (MODSTRUCT *)malloc(sizeof(MODSTRUCT)); if (nullptr == module) { ERROR("malloc() failed! errno is %d (%s)\n", errno, strerror(errno)); @@ -1805,11 +1805,11 @@ MODSTRUCT *LOADGetPalLibrary() if (g_szCoreCLRPath == nullptr) { size_t cbszCoreCLRPath = strlen(info.dli_fname) + 1; - g_szCoreCLRPath = (char*) InternalMalloc(cbszCoreCLRPath); + g_szCoreCLRPath = (char*) malloc(cbszCoreCLRPath); if (g_szCoreCLRPath == nullptr) { - ERROR("LOADGetPalLibrary: InternalMalloc failed!"); + ERROR("LOADGetPalLibrary: malloc failed!"); goto exit; } diff --git a/src/coreclr/pal/src/map/map.cpp b/src/coreclr/pal/src/map/map.cpp index e5f1203c53641..4f8cb6190c6d9 100644 --- a/src/coreclr/pal/src/map/map.cpp +++ b/src/coreclr/pal/src/map/map.cpp @@ -1129,7 +1129,7 @@ CorUnix::InternalMapViewOfFile( // the global list. // - PMAPPED_VIEW_LIST pNewView = (PMAPPED_VIEW_LIST)InternalMalloc(sizeof(*pNewView)); + PMAPPED_VIEW_LIST pNewView = (PMAPPED_VIEW_LIST)malloc(sizeof(*pNewView)); if (NULL != pNewView) { pNewView->lpAddress = pvBaseAddress; @@ -1833,7 +1833,7 @@ static PMAPPED_VIEW_LIST FindSharedMappingReplacement( /* The new desired mapping is fully contained in the one just found: we can reuse this one */ - pNewView = (PMAPPED_VIEW_LIST)InternalMalloc(sizeof(MAPPED_VIEW_LIST)); + pNewView = (PMAPPED_VIEW_LIST)malloc(sizeof(MAPPED_VIEW_LIST)); if (pNewView) { memcpy(pNewView, pView, sizeof(*pNewView)); @@ -1868,7 +1868,7 @@ static NativeMapHolder * NewNativeMapHolder(CPalThread *pThread, LPVOID address, } pThisMapHolder = - (NativeMapHolder *)InternalMalloc(sizeof(NativeMapHolder)); + (NativeMapHolder *)malloc(sizeof(NativeMapHolder)); if (pThisMapHolder) { @@ -1934,7 +1934,7 @@ MAPRecordMapping( PAL_ERROR palError = NO_ERROR; PMAPPED_VIEW_LIST pNewView; - pNewView = (PMAPPED_VIEW_LIST)InternalMalloc(sizeof(*pNewView)); + pNewView = (PMAPPED_VIEW_LIST)malloc(sizeof(*pNewView)); if (NULL != pNewView) { pNewView->lpAddress = addr; diff --git a/src/coreclr/pal/src/map/virtual.cpp b/src/coreclr/pal/src/map/virtual.cpp index 364f3bba1f025..3145faac5f505 100644 --- a/src/coreclr/pal/src/map/virtual.cpp +++ b/src/coreclr/pal/src/map/virtual.cpp @@ -401,7 +401,7 @@ static BOOL VIRTUALStoreAllocationInfo( return FALSE; } - if (!(pNewEntry = (PCMI)InternalMalloc(sizeof(*pNewEntry)))) + if (!(pNewEntry = (PCMI)malloc(sizeof(*pNewEntry)))) { ERROR( "Unable to allocate memory for the structure.\n"); return FALSE; diff --git a/src/coreclr/pal/src/misc/strutil.cpp b/src/coreclr/pal/src/misc/strutil.cpp index ed29831232cab..e665e22b65291 100644 --- a/src/coreclr/pal/src/misc/strutil.cpp +++ b/src/coreclr/pal/src/misc/strutil.cpp @@ -53,7 +53,7 @@ CPalString::CopyString( _ASSERTE(psSource->GetMaxLength() > psSource->GetStringLength()); WCHAR *pwsz = reinterpret_cast( - InternalMalloc(psSource->GetMaxLength() * sizeof(WCHAR)) + malloc(psSource->GetMaxLength() * sizeof(WCHAR)) ); if (NULL != pwsz) diff --git a/src/coreclr/pal/src/objmgr/palobjbase.cpp b/src/coreclr/pal/src/objmgr/palobjbase.cpp index dbfdf3b0c7156..c39b5df7e268f 100644 --- a/src/coreclr/pal/src/objmgr/palobjbase.cpp +++ b/src/coreclr/pal/src/objmgr/palobjbase.cpp @@ -58,7 +58,7 @@ CPalObjectBase::Initialize( if (0 != m_pot->GetImmutableDataSize()) { - m_pvImmutableData = InternalMalloc(m_pot->GetImmutableDataSize()); + m_pvImmutableData = malloc(m_pot->GetImmutableDataSize()); if (NULL != m_pvImmutableData) { ZeroMemory(m_pvImmutableData, m_pot->GetImmutableDataSize()); @@ -80,7 +80,7 @@ CPalObjectBase::Initialize( goto InitializeExit; } - m_pvLocalData = InternalMalloc(m_pot->GetProcessLocalDataSize()); + m_pvLocalData = malloc(m_pot->GetProcessLocalDataSize()); if (NULL != m_pvLocalData) { ZeroMemory(m_pvLocalData, m_pot->GetProcessLocalDataSize()); diff --git a/src/coreclr/pal/src/objmgr/shmobject.cpp b/src/coreclr/pal/src/objmgr/shmobject.cpp index 55b0e87c088a1..282dd113da2e2 100644 --- a/src/coreclr/pal/src/objmgr/shmobject.cpp +++ b/src/coreclr/pal/src/objmgr/shmobject.cpp @@ -119,7 +119,7 @@ CSharedMemoryObject::Initialize( // Allocate local memory to hold the shared data // - m_pvSharedData = InternalMalloc(m_pot->GetSharedDataSize()); + m_pvSharedData = malloc(m_pot->GetSharedDataSize()); if (NULL == m_pvSharedData) { ERROR("Failure allocating m_pvSharedData (local copy)\n"); diff --git a/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp b/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp index ea5aae444dad0..ba9447b889c39 100644 --- a/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp +++ b/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp @@ -139,7 +139,7 @@ const UINT64 SharedMemoryHelpers::InvalidSharedThreadId = static_cast(-1 void *SharedMemoryHelpers::Alloc(SIZE_T byteCount) { - void *buffer = InternalMalloc(byteCount); + void *buffer = malloc(byteCount != 0 ? byteCount : 1); if (buffer == nullptr) { throw SharedMemoryException(static_cast(SharedMemoryError::OutOfMemory)); diff --git a/src/coreclr/pal/src/synchmgr/synchmanager.hpp b/src/coreclr/pal/src/synchmgr/synchmanager.hpp index 925b896e7e572..ce325f75ecc1e 100644 --- a/src/coreclr/pal/src/synchmgr/synchmanager.hpp +++ b/src/coreclr/pal/src/synchmgr/synchmanager.hpp @@ -496,7 +496,7 @@ namespace CorUnix class CPalSynchronizationManager : public IPalSynchronizationManager { friend class CPalSynchMgrController; - template friend T *CorUnix::InternalNew(); + template friend T *CorUnix::InternalNew(Ts... args); public: // types diff --git a/src/coreclr/pal/src/thread/process.cpp b/src/coreclr/pal/src/thread/process.cpp index 69121faa2b75b..559748903eb1d 100644 --- a/src/coreclr/pal/src/thread/process.cpp +++ b/src/coreclr/pal/src/thread/process.cpp @@ -734,7 +734,7 @@ CorUnix::InternalCreateProcess( } } EnvironmentEntries++; - EnvironmentArray = (char **)InternalMalloc(EnvironmentEntries * sizeof(char *)); + EnvironmentArray = (char **)malloc(EnvironmentEntries * sizeof(char *)); EnvironmentEntries = 0; // Convert the environment block to array of strings @@ -2028,7 +2028,7 @@ PROCNotifyProcessShutdownDestructor() char* PROCFormatInt(ULONG32 value) { - char* buffer = (char*)InternalMalloc(128); + char* buffer = (char*)malloc(128); if (buffer != nullptr) { if (sprintf_s(buffer, 128, "%d", value) == -1) @@ -2050,7 +2050,7 @@ PROCFormatInt(ULONG32 value) char* PROCFormatInt64(ULONG64 value) { - char* buffer = (char*)InternalMalloc(128); + char* buffer = (char*)malloc(128); if (buffer != nullptr) { if (sprintf_s(buffer, 128, "%lld", value) == -1) @@ -2089,7 +2089,7 @@ PROCBuildCreateDumpCommandLine( } const char* DumpGeneratorName = "createdump"; int programLen = strlen(g_szCoreCLRPath) + strlen(DumpGeneratorName) + 1; - char* program = *pprogram = (char*)InternalMalloc(programLen); + char* program = *pprogram = (char*)malloc(programLen); if (program == nullptr) { return FALSE; @@ -2833,7 +2833,7 @@ CorUnix::InitializeProcessCommandLine( size_t n = PAL_wcslen(lpwstrFullPath) + 1; size_t iLen = n; - initial_dir = reinterpret_cast(InternalMalloc(iLen*sizeof(WCHAR))); + initial_dir = reinterpret_cast(malloc(iLen*sizeof(WCHAR))); if (NULL == initial_dir) { ERROR("malloc() failed! (initial_dir) \n"); @@ -3760,7 +3760,7 @@ buildArgv( pThread = InternalGetCurrentThread(); /* make sure to allocate enough space, up for the worst case scenario */ int iLength = (iWlen + lpAppPath.GetCount() + 2); - lpAsciiCmdLine = (char *) InternalMalloc(iLength); + lpAsciiCmdLine = (char *) malloc(iLength); if (lpAsciiCmdLine == NULL) { @@ -3940,7 +3940,7 @@ buildArgv( /* allocate lppargv according to the number of arguments in the command line */ - lppArgv = (char **) InternalMalloc((((*pnArg)+1) * sizeof(char *))); + lppArgv = (char **) malloc((((*pnArg)+1) * sizeof(char *))); if (lppArgv == NULL) { From 03a06fbb9cec58093d5cc0044906ae6b32421df8 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 20 Feb 2024 20:13:16 +0000 Subject: [PATCH 19/60] Remove cruntime/misc.cpp PAL APIs and inline usages of _gcvt_s --- src/coreclr/ildasm/dasm.cpp | 8 +- src/coreclr/ildasm/dis.cpp | 4 +- src/coreclr/pal/inc/pal.h | 3 - src/coreclr/pal/src/CMakeLists.txt | 1 - src/coreclr/pal/src/cruntime/misc.cpp | 103 ------------------ src/coreclr/pal/tests/palsuite/CMakeLists.txt | 1 - .../c_runtime/__iscsym/test1/__iscsym.cpp | 92 ---------------- .../pal/tests/palsuite/compilableTests.txt | 1 - .../pal/tests/palsuite/paltestlist.txt | 1 - 9 files changed, 6 insertions(+), 208 deletions(-) delete mode 100644 src/coreclr/pal/src/cruntime/misc.cpp delete mode 100644 src/coreclr/pal/tests/palsuite/c_runtime/__iscsym/test1/__iscsym.cpp diff --git a/src/coreclr/ildasm/dasm.cpp b/src/coreclr/ildasm/dasm.cpp index be95e36fa5d53..fa187d8dce7ca 100644 --- a/src/coreclr/ildasm/dasm.cpp +++ b/src/coreclr/ildasm/dasm.cpp @@ -1914,7 +1914,7 @@ BYTE* PrettyPrintCABlobValue(PCCOR_SIGNATURE &typePtr, for(n=0; n < numElements; n++) { if(n) appendStr(out," "); - _gcvt_s(str,64,*((float*)dataPtr), 8); + sprintf_s(str, 64, "%.*g", 8, (double)(*((float*)dataPtr))); float df = (float)atof(str); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 32-bit precision number!!!! @@ -1933,7 +1933,7 @@ BYTE* PrettyPrintCABlobValue(PCCOR_SIGNATURE &typePtr, { if(n) appendStr(out," "); char *pch; - _gcvt_s(str,64,*((double*)dataPtr), 17); + sprintf_s(str, 64, "%.*g", 17, *((double*)dataPtr)); double df = strtod(str, &pch); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 64-bit precision number!!!! @@ -2605,7 +2605,7 @@ void DumpDefaultValue(mdToken tok, __inout __nullterminated char* szString, void case ELEMENT_TYPE_R4: { char szf[32]; - _gcvt_s(szf,32,MDDV.m_fltValue, 8); + sprintf_s(szf, 32, "%.*g", 8, (double)MDDV.m_fltValue); float df = (float)atof(szf); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 32-bit precision number!!!! @@ -2619,7 +2619,7 @@ void DumpDefaultValue(mdToken tok, __inout __nullterminated char* szString, void case ELEMENT_TYPE_R8: { char szf[32], *pch; - _gcvt_s(szf,32,MDDV.m_dblValue, 17); + sprintf_s(szf, 32, "%.*g", 17, MDDV.m_dblValue); double df = strtod(szf, &pch); //atof(szf); szf[31]=0; // Must compare as underlying bytes, not floating point otherwise optimizer will diff --git a/src/coreclr/ildasm/dis.cpp b/src/coreclr/ildasm/dis.cpp index 21fc8c8679028..2ad1ecd2d200a 100644 --- a/src/coreclr/ildasm/dis.cpp +++ b/src/coreclr/ildasm/dis.cpp @@ -1573,7 +1573,7 @@ BOOL Disassemble(IMDInternalImport *pImport, BYTE *ILHeader, void *GUICookie, md if(f==0.0) strcpy_s(szf,32,((v>>24)==0)? "0.0" : "-0.0"); else - _gcvt_s(szf,32,(double)f, 8); + sprintf_s(szf, 32, "%.*g", 8, (double)f); float fd = (float)atof(szf); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 32-bit precision number!!!! @@ -1612,7 +1612,7 @@ BOOL Disassemble(IMDInternalImport *pImport, BYTE *ILHeader, void *GUICookie, md if(d==0.0) strcpy_s(szf,32,((v>>56)==0)? "0.0" : "-0.0"); else - _gcvt_s(szf,32,d, 17); + sprintf_s(szf, 32, "%.*g", 17, d); double df = strtod(szf, &pch); //atof(szf); // Must compare as underlying bytes, not floating point otherwise optimizer will // try to enregister and compare 80-bit precision number with 64-bit precision number!!!! diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 6dbcbe04cb7b8..e9f7f5b1b5144 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3897,9 +3897,6 @@ typedef int errno_t; PALIMPORT DLLEXPORT errno_t __cdecl memcpy_s(void *, size_t, const void *, size_t) THROW_DECL; PALIMPORT errno_t __cdecl memmove_s(void *, size_t, const void *, size_t); -PALIMPORT DLLEXPORT int __cdecl strcasecmp(const char *, const char *); -PALIMPORT char * __cdecl _gcvt_s(char *, int, double, int); -PALIMPORT int __cdecl __iscsym(int); PALIMPORT DLLEXPORT int __cdecl _wcsicmp(const WCHAR *, const WCHAR*); PALIMPORT int __cdecl _wcsnicmp(const WCHAR *, const WCHAR *, size_t); PALIMPORT DLLEXPORT int __cdecl _vsnprintf_s(char *, size_t, size_t, const char *, va_list); diff --git a/src/coreclr/pal/src/CMakeLists.txt b/src/coreclr/pal/src/CMakeLists.txt index 6db25a1fc722e..cb3693655dcad 100644 --- a/src/coreclr/pal/src/CMakeLists.txt +++ b/src/coreclr/pal/src/CMakeLists.txt @@ -129,7 +129,6 @@ if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND (CLR_CMAKE_HOST_ARCH_AMD64 OR CLR_CM endif() set(SOURCES - cruntime/misc.cpp cruntime/wchar.cpp debug/debug.cpp exception/seh.cpp diff --git a/src/coreclr/pal/src/cruntime/misc.cpp b/src/coreclr/pal/src/cruntime/misc.cpp deleted file mode 100644 index 5b66dfec17d48..0000000000000 --- a/src/coreclr/pal/src/cruntime/misc.cpp +++ /dev/null @@ -1,103 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*++ - - - -Module Name: - - cruntime/misc.cpp - -Abstract: - - Implementation of C runtime functions that don't fit anywhere else. - - - ---*/ - -#include "pal/thread.hpp" -#include "pal/threadsusp.hpp" -#include "pal/palinternal.h" -#include "pal/dbgmsg.h" -#include "pal/misc.h" - -#include -#include -#include -#include - -#if defined(_DEBUG) -#include -#endif //defined(_DEBUG) - -SET_DEFAULT_DEBUG_CHANNEL(CRT); - -/*++ -Function: - _gcvt_s - -See MSDN doc. ---*/ -char * -__cdecl -_gcvt_s( char * buffer, int iSize, double value, int digits ) -{ - PERF_ENTRY(_gcvt); - ENTRY( "_gcvt( value:%f digits=%d, buffer=%p )\n", value, digits, buffer ); - - if ( !buffer ) - { - ERROR( "buffer was an invalid pointer.\n" ); - } - - switch ( digits ) - { - case 7 : - /* Fall through */ - case 8 : - /* Fall through */ - case 15 : - /* Fall through */ - case 17 : - - sprintf_s( buffer, iSize, "%.*g", digits, value ); - break; - - default : - ASSERT( "Only the digits 7, 8, 15, and 17 are valid.\n" ); - *buffer = '\0'; - } - - LOGEXIT( "_gcvt returns %p (%s)\n", buffer , buffer ); - PERF_EXIT(_gcvt); - return buffer; -} - - -/*++ -Function : - - __iscsym - -See MSDN for more details. ---*/ -int -__cdecl -__iscsym( int c ) -{ - PERF_ENTRY(__iscsym); - ENTRY( "__iscsym( c=%d )\n", c ); - - if ( isalnum( c ) || c == '_' ) - { - LOGEXIT( "__iscsym returning 1\n" ); - PERF_EXIT(__iscsym); - return 1; - } - - LOGEXIT( "__iscsym returning 0\n" ); - PERF_EXIT(__iscsym); - return 0; -} diff --git a/src/coreclr/pal/tests/palsuite/CMakeLists.txt b/src/coreclr/pal/tests/palsuite/CMakeLists.txt index 0b8d0993b0c06..3d5dc9a749089 100644 --- a/src/coreclr/pal/tests/palsuite/CMakeLists.txt +++ b/src/coreclr/pal/tests/palsuite/CMakeLists.txt @@ -168,7 +168,6 @@ add_executable_clr(paltests c_runtime/_wfopen/test6/test6.cpp c_runtime/_wfopen/test7/test7.cpp c_runtime/_wtoi/test1/test1.cpp - c_runtime/__iscsym/test1/__iscsym.cpp #debug_api/DebugBreak/test1/test1.cpp debug_api/OutputDebugStringA/test1/helper.cpp debug_api/OutputDebugStringA/test1/test1.cpp diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/__iscsym/test1/__iscsym.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/__iscsym/test1/__iscsym.cpp deleted file mode 100644 index 9244c5f0a32e2..0000000000000 --- a/src/coreclr/pal/tests/palsuite/c_runtime/__iscsym/test1/__iscsym.cpp +++ /dev/null @@ -1,92 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -/*============================================================= -** -** Source: __iscsym.c -** -** Purpose: Positive test the __iscsym API. -** Call __iscsym to letter, digit and underscore -** -** -**============================================================*/ -#include - -PALTEST(c_runtime___iscsym_test1_paltest_iscsym_test1, "c_runtime/__iscsym/test1/paltest_iscsym_test1") -{ - int err; - int index; - char non_letter_set[]= - {'~','`','!','@','#','$','%','^','&','*','(',')',')', - '-','+','=','|','\\',';',':','"','\'','<','>', - ',','.','?','/','\0'}; - char errBuffer[200]; - - /*Initialize the PAL environment*/ - err = PAL_Initialize(argc, argv); - if(0 != err) - { - return FAIL; - } - - /*To check if the parameter passed in is a character*/ - for(index = 'a'; index <= 'z'; index++) - { - err = __iscsym(index); - if(0 == err) - { - Fail("\n__iscsym failed to recognize a " - "lower-case letter:%c!\n", index); - } - } - - /*To check if the parameter passed in is a character*/ - for(index = 'A'; index <= 'Z'; index++) - { - err = __iscsym(index); - if(0 == err) - { - Fail("\n__iscsym failed to recognize an " - "upper-case letter: %c!\n", index); - } - } - - /*To check if the parameter passed in is a digit*/ - for(index = '0'; index <= '9'; index++) - { - err = __iscsym(index); - if(0 == err) - { - Fail("\n__iscsym failed to recognize a digit %c!\n", - index); - } - } - - /*To check if the parameter passed in is a underscore*/ - err = __iscsym('_'); - if(0 == err) - { - Fail("\n__iscsym failed to recognize an underscore!\n"); - } - - memset(errBuffer, 0, 200); - - for(index = 0; non_letter_set[index]; index++) - { - err = __iscsym(non_letter_set[index]); - if(0 != err) - { - strncat(errBuffer, &non_letter_set[index], 1); - strcat(errBuffer, ", "); - } - } - - if(strlen(errBuffer) > 0) - { - Fail("\n__iscsym failed to identify the characters '%s' " - "as not letters, digits " - "or underscores\n", errBuffer); - } - PAL_Terminate(); - return PASS; -} diff --git a/src/coreclr/pal/tests/palsuite/compilableTests.txt b/src/coreclr/pal/tests/palsuite/compilableTests.txt index 987fe6fcd9e65..4d865fc63417e 100644 --- a/src/coreclr/pal/tests/palsuite/compilableTests.txt +++ b/src/coreclr/pal/tests/palsuite/compilableTests.txt @@ -110,7 +110,6 @@ c_runtime/_wfopen/test5/paltest_wfopen_test5 c_runtime/_wfopen/test6/paltest_wfopen_test6 c_runtime/_wfopen/test7/paltest_wfopen_test7 c_runtime/_wtoi/test1/paltest_wtoi_test1 -c_runtime/__iscsym/test1/paltest_iscsym_test1 debug_api/OutputDebugStringA/test1/paltest_outputdebugstringa_test1 debug_api/OutputDebugStringW/test1/paltest_outputdebugstringw_test1 exception_handling/RaiseException/test1/paltest_raiseexception_test1 diff --git a/src/coreclr/pal/tests/palsuite/paltestlist.txt b/src/coreclr/pal/tests/palsuite/paltestlist.txt index 8274b4a4e873b..e141789e71f06 100644 --- a/src/coreclr/pal/tests/palsuite/paltestlist.txt +++ b/src/coreclr/pal/tests/palsuite/paltestlist.txt @@ -99,7 +99,6 @@ c_runtime/_wfopen/test5/paltest_wfopen_test5 c_runtime/_wfopen/test6/paltest_wfopen_test6 c_runtime/_wfopen/test7/paltest_wfopen_test7 c_runtime/_wtoi/test1/paltest_wtoi_test1 -c_runtime/__iscsym/test1/paltest_iscsym_test1 debug_api/OutputDebugStringW/test1/paltest_outputdebugstringw_test1 exception_handling/RaiseException/test1/paltest_raiseexception_test1 exception_handling/RaiseException/test2/paltest_raiseexception_test2 From 54d2c8eb7d7a7ccb909c08d9446f85fe68a93bde Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 20 Feb 2024 21:52:39 +0000 Subject: [PATCH 20/60] Fix more build failures --- src/coreclr/debug/ee/funceval.cpp | 2 +- src/coreclr/inc/loaderheap.h | 2 +- src/coreclr/jit/utils.cpp | 2 +- src/coreclr/pal/inc/pal.h | 5 ----- src/coreclr/vm/common.h | 2 +- 5 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/coreclr/debug/ee/funceval.cpp b/src/coreclr/debug/ee/funceval.cpp index 7844edbe8b306..a7e888452c781 100644 --- a/src/coreclr/debug/ee/funceval.cpp +++ b/src/coreclr/debug/ee/funceval.cpp @@ -2806,7 +2806,7 @@ void PackArgumentArray(DebuggerEval *pDE, #ifdef FEATURE_HFA // The buffer for HFAs has to be always ENREGISTERED_RETURNTYPE_MAXSIZE - size = max(size, ENREGISTERED_RETURNTYPE_MAXSIZE); + size = max(size, (unsigned)ENREGISTERED_RETURNTYPE_MAXSIZE); #endif BYTE * pTemp = new (interopsafe) BYTE[ALIGN_UP(sizeof(ValueClassInfo), 8) + size]; diff --git a/src/coreclr/inc/loaderheap.h b/src/coreclr/inc/loaderheap.h index 216668315cbff..b155d0188b84e 100644 --- a/src/coreclr/inc/loaderheap.h +++ b/src/coreclr/inc/loaderheap.h @@ -158,7 +158,7 @@ struct LoaderHeapEvent; inline UINT32 GetStubCodePageSize() { #if defined(TARGET_ARM64) && defined(TARGET_UNIX) - return max(16*1024, GetOsPageSize()); + return max(16*1024u, GetOsPageSize()); #elif defined(TARGET_ARM) return 4096; // ARM is special as the 32bit instruction set does not easily permit a 16KB offset #else diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index aed8cda7c24df..86393a4f8cb97 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -4037,7 +4037,7 @@ T GetSignedMagic(T denom, int* shift /*out*/) UT t; T result_magic; - absDenom = abs(denom); + absDenom = std::abs(denom); t = two_nminus1 + (UT(denom) >> bits_minus_1); absNc = t - 1 - (t % absDenom); // absolute value of nc p = bits_minus_1; // initialize p diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index e9f7f5b1b5144..05af682992d17 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3973,11 +3973,6 @@ unsigned int __cdecl _rotl(unsigned int value, int shift) } #endif // !__has_builtin(_rotl) -// On 64 bit unix, make the long an int. -#ifdef HOST_64BIT -#define _lrotl _rotl -#endif - #if !__has_builtin(_rotr) /*++ diff --git a/src/coreclr/vm/common.h b/src/coreclr/vm/common.h index a51626dfaf320..8b8ff9e842b3a 100644 --- a/src/coreclr/vm/common.h +++ b/src/coreclr/vm/common.h @@ -66,7 +66,7 @@ #include #include #include -#include +#include #include #include #include From 90ad1fee6f9b5beacc9e3ead2025abb7b44d53ac Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 23 Feb 2024 12:11:47 -0800 Subject: [PATCH 21/60] Fix various build failures. --- eng/native/configurecompiler.cmake | 1 + src/coreclr/jit/codegenarm64.cpp | 2 +- src/coreclr/jit/emitarm.cpp | 2 +- src/coreclr/vm/jitinterface.cpp | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/eng/native/configurecompiler.cmake b/eng/native/configurecompiler.cmake index 4d23294367b7d..a4e7b4b007e85 100644 --- a/eng/native/configurecompiler.cmake +++ b/eng/native/configurecompiler.cmake @@ -30,6 +30,7 @@ if (CLR_CMAKE_HOST_UNIX) add_compile_options(-glldb) else() add_compile_options(-g) + add_compile_options(-Wno-error=conversion-null) endif() endif() diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 4587bace1697a..bc6c96a56aa04 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -432,7 +432,7 @@ void CodeGen::genStackPointerAdjustment(ssize_t spDelta, regNumber tmpReg, bool* { // spDelta is negative in the prolog, positive in the epilog, but we always tell the unwind codes the positive // value. - ssize_t spDeltaAbs = abs(spDelta); + ssize_t spDeltaAbs = std::abs(spDelta); unsigned unwindSpDelta = (unsigned)spDeltaAbs; assert((ssize_t)unwindSpDelta == spDeltaAbs); // make sure that it fits in a unsigned diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp index 3fa92b60d0e5b..362e304273415 100644 --- a/src/coreclr/jit/emitarm.cpp +++ b/src/coreclr/jit/emitarm.cpp @@ -6504,7 +6504,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) code |= (immHi << 16); code |= immLo; - disp = abs(disp); + disp = std::abs(disp); assert((disp & 0x00fffffe) == disp); callInstrSize = SafeCvtAssert(emitOutput_Thumb2Instr(dst, code)); diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 5fc3f5d83acb2..0b71278d89caf 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11468,7 +11468,7 @@ void CEEJitInfo::recordRelocation(void * location, // Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory // on retry to increase chances that the retry succeeds. - m_reserveForJumpStubs = max(0x400, m_reserveForJumpStubs + 2*BACK_TO_BACK_JUMP_ALLOCATE_SIZE); + m_reserveForJumpStubs = max((size_t)0x400, m_reserveForJumpStubs + 2*BACK_TO_BACK_JUMP_ALLOCATE_SIZE); if (jumpStubAddr == 0) { From a0244844a1f278880390af8bfc142acd0cc730d5 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 26 Feb 2024 14:18:10 -0800 Subject: [PATCH 22/60] Fix various build failures. --- src/coreclr/inc/crtwrap.h | 2 ++ src/coreclr/inc/utilcode.h | 2 +- src/coreclr/jit/emitarm64.cpp | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/coreclr/inc/crtwrap.h b/src/coreclr/inc/crtwrap.h index 5c8b2e6e78bf7..bbf01e03ac602 100644 --- a/src/coreclr/inc/crtwrap.h +++ b/src/coreclr/inc/crtwrap.h @@ -16,7 +16,9 @@ #include #include "debugmacros.h" #include +#if !defined(CLR_CMAKE_HOST_APPLE) #include +#endif #include #include diff --git a/src/coreclr/inc/utilcode.h b/src/coreclr/inc/utilcode.h index 2e6fd534cf951..62d05c2403e57 100644 --- a/src/coreclr/inc/utilcode.h +++ b/src/coreclr/inc/utilcode.h @@ -1894,7 +1894,7 @@ class CHashTableAndData : public CHashTable ~CHashTableAndData() { WRAPPER_NO_CONTRACT; - if (m_pcEntries != NULL) + if ((PTR_VOID)m_pcEntries != NULL) MemMgr::Free((BYTE*)m_pcEntries, MemMgr::RoundSize(m_iEntries * m_iEntrySize)); } diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index dea80c05e6b8b..c76d29e77ee31 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -16239,7 +16239,7 @@ void emitter::emitIns_Call(EmitCallType callType, // Our stack level should be always greater than the bytes of arguments we push. Just // a sanity test. - assert((unsigned)abs(argSize) <= codeGen->genStackLevel); + assert((unsigned)std::abs(argSize) <= codeGen->genStackLevel); // Trim out any callee-trashed registers from the live set. regMaskTP savedSet = emitGetGCRegsSavedOrModified(methHnd); From d3aec5ea492ce41a59e86566c6ba84e9c767819e Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 26 Feb 2024 15:17:49 -0800 Subject: [PATCH 23/60] Fix define and fix more community-leg failures. --- src/coreclr/inc/crtwrap.h | 2 +- src/coreclr/inc/safemath.h | 4 ++++ src/coreclr/inc/utilcode.h | 4 ++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/coreclr/inc/crtwrap.h b/src/coreclr/inc/crtwrap.h index bbf01e03ac602..8dd43d6a09d18 100644 --- a/src/coreclr/inc/crtwrap.h +++ b/src/coreclr/inc/crtwrap.h @@ -16,7 +16,7 @@ #include #include "debugmacros.h" #include -#if !defined(CLR_CMAKE_HOST_APPLE) +#if !defined(HOST_APPLE) #include #endif #include diff --git a/src/coreclr/inc/safemath.h b/src/coreclr/inc/safemath.h index 336d4b0b464e3..ff1fcbee78115 100644 --- a/src/coreclr/inc/safemath.h +++ b/src/coreclr/inc/safemath.h @@ -33,6 +33,10 @@ #include +#ifdef FEATURE_PAL +#include "pal_mstypes.h" +#endif // FEATURE_PAL + //================================================================== // Semantics: if val can be represented as the exact same value // when cast to Dst type, then FitsIn(val) will return true; diff --git a/src/coreclr/inc/utilcode.h b/src/coreclr/inc/utilcode.h index 62d05c2403e57..9f6630bf82824 100644 --- a/src/coreclr/inc/utilcode.h +++ b/src/coreclr/inc/utilcode.h @@ -1894,7 +1894,7 @@ class CHashTableAndData : public CHashTable ~CHashTableAndData() { WRAPPER_NO_CONTRACT; - if ((PTR_VOID)m_pcEntries != NULL) + if (m_pcEntries != (TADDR)NULL) MemMgr::Free((BYTE*)m_pcEntries, MemMgr::RoundSize(m_iEntries * m_iEntrySize)); } @@ -2074,7 +2074,7 @@ int CHashTableAndData::Grow() // 1 if successful, 0 if not. int iCurSize; // Current size in bytes. int iEntries; // New # of entries. - _ASSERTE(m_pcEntries != NULL); + _ASSERTE(m_pcEntries != (TADDR)NULL); _ASSERTE(m_iFree == UINT32_MAX); // Compute the current size and new # of entries. From 27fd40030dff60cdd36c110320410c6c6bc1f0e2 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Thu, 7 Mar 2024 18:22:45 -0800 Subject: [PATCH 24/60] Various osx and gcc fixes --- src/coreclr/inc/allocacheck.h | 4 +++- src/coreclr/inc/utilcode.h | 2 ++ src/coreclr/pal/inc/pal.h | 7 +++++++ src/coreclr/tools/StressLogAnalyzer/util.h | 2 ++ src/coreclr/tools/superpmi/superpmi-shared/standardpch.h | 2 ++ 5 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/coreclr/inc/allocacheck.h b/src/coreclr/inc/allocacheck.h index ea7e6df316f01..7dba90e29a905 100644 --- a/src/coreclr/inc/allocacheck.h +++ b/src/coreclr/inc/allocacheck.h @@ -23,7 +23,9 @@ #ifndef AllocaCheck_h #define AllocaCheck_h -#include // for alloca itself +#if !defined(HOST_APPLE) +#include // for alloca itself +#endif #if defined(assert) && !defined(_ASSERTE) #define _ASSERTE assert diff --git a/src/coreclr/inc/utilcode.h b/src/coreclr/inc/utilcode.h index 9f6630bf82824..61b5952820be5 100644 --- a/src/coreclr/inc/utilcode.h +++ b/src/coreclr/inc/utilcode.h @@ -14,7 +14,9 @@ #include "winwrap.h" #include #include +#if !defined(HOST_APPLE) #include +#endif #include #include #include diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 05af682992d17..e831f1b0e2461 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -49,6 +49,9 @@ Module Name: #include #include #include +#if !defined(HOST_APPLE) +#include +#endif #ifdef __cplusplus extern "C++" @@ -3940,6 +3943,10 @@ inline char* _strdup(const char* a) return strdup(a); } +// Define the MSVC implementation of the alloca concept. +// As this allocates on the current stack frame, use a macro instead of an inline function. +#define _alloca(x) alloca(x) + #ifdef __cplusplus extern "C++" { inline WCHAR *PAL_wcschr(WCHAR* S, WCHAR C) diff --git a/src/coreclr/tools/StressLogAnalyzer/util.h b/src/coreclr/tools/StressLogAnalyzer/util.h index 6999676c2e7cc..7d289e9121559 100644 --- a/src/coreclr/tools/StressLogAnalyzer/util.h +++ b/src/coreclr/tools/StressLogAnalyzer/util.h @@ -12,7 +12,9 @@ typedef void* CRITSEC_COOKIE; #define STRESS_LOG_ANALYZER +#if !defined(HOST_APPLE) #include +#endif #include "staticcontract.h" // This macro is used to standardize the wide character string literals between UNIX and Windows. diff --git a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h index 4b23750b99431..1dd5f18b20be1 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h @@ -51,7 +51,9 @@ #include #include #include +#if !defined(HOST_APPLE) #include +#endif #include #include #include From 5cfe4833938ae460599cc6b3ff6a5c0a3fca1217 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 8 Mar 2024 11:01:56 -0800 Subject: [PATCH 25/60] Define RC_INVOKED when preprocessing RC files and fix more NULL->TADDR/PCODE conversions for GCC --- eng/native/functions.cmake | 6 ++++++ src/coreclr/CMakeLists.txt | 2 +- src/coreclr/debug/inc/dbgipcevents.h | 2 +- src/coreclr/vm/codeman.cpp | 26 +++++++++++++------------- src/coreclr/vm/dllimportcallback.h | 4 ++-- src/coreclr/vm/virtualcallstub.h | 2 +- 6 files changed, 24 insertions(+), 18 deletions(-) diff --git a/eng/native/functions.cmake b/eng/native/functions.cmake index e10e008d775e4..d4a7e1bee92ed 100644 --- a/eng/native/functions.cmake +++ b/eng/native/functions.cmake @@ -220,6 +220,12 @@ endfunction(convert_to_absolute_path) function(preprocess_file inputFilename outputFilename) get_compile_definitions(PREPROCESS_DEFINITIONS) get_include_directories(PREPROCESS_INCLUDE_DIRECTORIES) + get_source_file_property(SOURCE_FILE_DEFINITIONS ${inputFilename} COMPILE_DEFINITIONS) + + foreach(DEFINITION IN LISTS SOURCE_FILE_DEFINITIONS) + list(APPEND PREPROCESS_DEFINITIONS -D${DEFINITION}) + endforeach() + if (MSVC) add_custom_command( OUTPUT ${outputFilename} diff --git a/src/coreclr/CMakeLists.txt b/src/coreclr/CMakeLists.txt index 79de28794d355..9e4405cd73fb5 100644 --- a/src/coreclr/CMakeLists.txt +++ b/src/coreclr/CMakeLists.txt @@ -212,7 +212,7 @@ if(CLR_CMAKE_HOST_UNIX) # given Windows .rc file. The target C++ file path is returned in the # variable specified by the TARGET_FILE parameter. function(build_resources SOURCE TARGET_NAME TARGET_FILE) - + set_source_files_properties(${SOURCE} PROPERTIES COMPILE_DEFINITIONS "RC_INVOKED") set(PREPROCESSED_SOURCE ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}.rc.i) preprocess_file(${SOURCE} ${PREPROCESSED_SOURCE}) diff --git a/src/coreclr/debug/inc/dbgipcevents.h b/src/coreclr/debug/inc/dbgipcevents.h index 0eb393c37fce9..aa18cf328326b 100644 --- a/src/coreclr/debug/inc/dbgipcevents.h +++ b/src/coreclr/debug/inc/dbgipcevents.h @@ -768,7 +768,7 @@ class MSLAYOUT VMPTR_Base // // Operators to emulate Pointer semantics. // - bool IsNull() { SUPPORTS_DAC; return m_addr == NULL; } + bool IsNull() { SUPPORTS_DAC; return m_addr == (TADDR)NULL; } static VMPTR_This NullPtr() { diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index af0a97af19c69..55f68a749d995 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -3204,7 +3204,7 @@ JumpStubBlockHeader * EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD n CrstHolder ch(&m_CodeHeapCritSec); mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, CODE_SIZE_ALIGN, &pCodeHeap); - if (mem == NULL) + if (mem == (TADDR)NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); RETURN(NULL); @@ -3754,7 +3754,7 @@ static CodeHeader * GetCodeHeaderFromDebugInfoRequest(const DebugInfoRequest & r } CONTRACTL_END; TADDR address = (TADDR) request.GetStartAddress(); - _ASSERTE(address != NULL); + _ASSERTE(address != (TADDR)NULL); CodeHeader * pHeader = dac_cast(address & ~3) - 1; _ASSERTE(pHeader != NULL); @@ -3926,7 +3926,7 @@ BOOL EEJitManager::JitCodeToMethodInfo( return FALSE; TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); - if (start == NULL) + if (start == (TADDR)NULL) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); @@ -3971,7 +3971,7 @@ StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSectio } TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); - if (start == NULL) + if (start == (TADDR)NULL) return STUB_CODE_BLOCK_NOCODE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); return pCHdr->IsStubCodeBlock() ? pCHdr->GetStubCodeBlockKind() : STUB_CODE_BLOCK_MANAGED; @@ -4426,7 +4426,7 @@ ExecutionManager::FindCodeRange(PCODE currentPC, ScanFlag scanFlag) SUPPORTS_DAC; } CONTRACTL_END; - if (currentPC == NULL) + if (currentPC == (PCODE)NULL) return NULL; if (scanFlag == ScanReaderLock) @@ -4464,7 +4464,7 @@ ExecutionManager::FindCodeRangeWithLock(PCODE currentPC) PCODE ExecutionManager::GetCodeStartAddress(PCODE currentPC) { WRAPPER_NO_CONTRACT; - _ASSERTE(currentPC != NULL); + _ASSERTE(currentPC != (PCODE)NULL); EECodeInfo codeInfo(currentPC); if (!codeInfo.IsValid()) @@ -4512,7 +4512,7 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC) GC_NOTRIGGER; } CONTRACTL_END; - if (currentPC == NULL) + if (currentPC == (PCODE)NULL) return FALSE; if (GetScanFlags() == ScanReaderLock) @@ -4598,7 +4598,7 @@ BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC, RangeSectionLockStat // but on we could also be in a stub, so we check for that // as well and we don't consider stub to be real managed code. TADDR start = dac_cast(pRS->_pjit)->FindMethodCode(pRS, currentPC); - if (start == NULL) + if (start == (TADDR)NULL) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); if (!pCHdr->IsStubCodeBlock()) @@ -5005,7 +5005,7 @@ PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target, POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange); } CONTRACT_END; - PCODE jumpStub = NULL; + PCODE jumpStub = (PCODE)NULL; if (pLoaderAllocator == NULL) { @@ -5055,7 +5055,7 @@ PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target, { jumpStub = i->m_jumpStub; - _ASSERTE(jumpStub != NULL); + _ASSERTE(jumpStub != (PCODE)NULL); // Is the matching entry with the requested range? if (((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr)) @@ -5067,10 +5067,10 @@ PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target, // If we get here we need to create a new jump stub // add or change the jump stub table to point at the new one jumpStub = getNextJumpStub(pMD, target, loAddr, hiAddr, pLoaderAllocator, throwOnOutOfMemoryWithinRange); // this statement can throw - if (jumpStub == NULL) + if (jumpStub == (PCODE)NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); - RETURN(NULL); + RETURN((PCODE)NULL); } _ASSERTE(((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr)); @@ -5170,7 +5170,7 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target, if (curBlock == NULL) { _ASSERTE(!throwOnOutOfMemoryWithinRange); - RETURN(NULL); + RETURN((PCODE)NULL); } curBlockWriterHolder.AssignExecutableWriterHolder(curBlock, sizeof(JumpStubBlockHeader) + ((size_t) (curBlock->m_used + 1) * BACK_TO_BACK_JUMP_ALLOCATE_SIZE)); diff --git a/src/coreclr/vm/dllimportcallback.h b/src/coreclr/vm/dllimportcallback.h index fb2214a8c18d5..3589ecee130ea 100644 --- a/src/coreclr/vm/dllimportcallback.h +++ b/src/coreclr/vm/dllimportcallback.h @@ -185,7 +185,7 @@ class UMEntryThunk uMThunkMarshInfoWriterHolder.GetRW()->RunTimeInit(); // Ensure that we have either the managed target or the delegate. - if (m_pObjectHandle == NULL && m_pManagedTarget == NULL) + if (m_pObjectHandle == NULL && m_pManagedTarget == (TADDR)NULL) m_pManagedTarget = m_pMD->GetMultiCallableAddrOfCode(); m_code.Encode(&pUMEntryThunkRX->m_code, (BYTE*)m_pUMThunkMarshInfo->GetExecStubEntryPoint(), pUMEntryThunkRX); @@ -223,7 +223,7 @@ class UMEntryThunk } else { - if (m_pManagedTarget != NULL) + if (m_pManagedTarget != (TADDR)NULL) { RETURN m_pManagedTarget; } diff --git a/src/coreclr/vm/virtualcallstub.h b/src/coreclr/vm/virtualcallstub.h index e6d89dcf50381..156353b6c1862 100644 --- a/src/coreclr/vm/virtualcallstub.h +++ b/src/coreclr/vm/virtualcallstub.h @@ -282,7 +282,7 @@ class VirtualCallStubManager : public StubManager m_counters(NULL), m_cur_counter_block(NULL), m_cur_counter_block_for_reclaim(NULL), - m_cur_counter_block_for_reclaim_index(NULL), + m_cur_counter_block_for_reclaim_index(0), m_pNext(NULL) { LIMITED_METHOD_CONTRACT; From d49ae0c2a7b44a3463eefee183e386e1d38c9a49 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 11 Mar 2024 13:36:38 -0700 Subject: [PATCH 26/60] Disable the NULL arithmetic and conversion warnings in CoreCLR to avoid making more NULL fixes than the CRT changes. --- eng/native/configurecompiler.cmake | 2 -- src/coreclr/CMakeLists.txt | 6 ++++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/eng/native/configurecompiler.cmake b/eng/native/configurecompiler.cmake index a4e7b4b007e85..38f80d584bd0d 100644 --- a/eng/native/configurecompiler.cmake +++ b/eng/native/configurecompiler.cmake @@ -26,11 +26,9 @@ if (CLR_CMAKE_HOST_UNIX) add_compile_options(-Wall) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wno-null-conversion) - add_compile_options(-Wno-null-arithmetic) add_compile_options(-glldb) else() add_compile_options(-g) - add_compile_options(-Wno-error=conversion-null) endif() endif() diff --git a/src/coreclr/CMakeLists.txt b/src/coreclr/CMakeLists.txt index 9e4405cd73fb5..cf6bc77c3d296 100644 --- a/src/coreclr/CMakeLists.txt +++ b/src/coreclr/CMakeLists.txt @@ -202,6 +202,12 @@ if(CLR_CMAKE_HOST_UNIX) add_subdirectory(debug/createdump) endif(CLR_CMAKE_HOST_OSX OR (CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86 AND NOT CLR_CMAKE_HOST_ANDROID)) + # The CoreCLR PAL used to redefine NULL, which caused a number of null conversion and arithmetic + # warnings and errors to be suppressed. + # Suppress these warnings here to avoid breaking the build. + add_compile_options($<$:-Wno-null-arithmetic>) + add_compile_options($<$:-Wno-null-conversion>) + set (NATIVE_RESOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/nativeresources) include_directories(${NATIVE_RESOURCE_DIR}) set (PROCESS_RC_SCRIPT ${NATIVE_RESOURCE_DIR}/processrc.sh) From 75529ac3ba6e352837a40e67a2e2312399a52035 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 11 Mar 2024 13:38:12 -0700 Subject: [PATCH 27/60] One more malloc header fix. --- src/coreclr/inc/contract.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/coreclr/inc/contract.h b/src/coreclr/inc/contract.h index d4376d61da85d..fa4975e70f146 100644 --- a/src/coreclr/inc/contract.h +++ b/src/coreclr/inc/contract.h @@ -233,7 +233,9 @@ #include "specstrings.h" #include "clrtypes.h" -#include "malloc.h" +#if !defined(HOST_APPLE) +#include +#endif #include "check.h" #include "debugreturn.h" #include "staticcontract.h" From 83a4d197855298e575c6b9aae1ff2d194c0a4af6 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 11 Mar 2024 14:23:45 -0700 Subject: [PATCH 28/60] Various build fixes/adjustments --- src/coreclr/debug/ee/stdafx.h | 1 + src/coreclr/inc/daccess.h | 4 ++++ src/coreclr/inc/dacprivate.h | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/coreclr/debug/ee/stdafx.h b/src/coreclr/debug/ee/stdafx.h index f81beacb25871..21ef5f0efa329 100644 --- a/src/coreclr/debug/ee/stdafx.h +++ b/src/coreclr/debug/ee/stdafx.h @@ -13,6 +13,7 @@ #include #include #include +#include #include diff --git a/src/coreclr/inc/daccess.h b/src/coreclr/inc/daccess.h index 1df04eef0778b..dc47041c99680 100644 --- a/src/coreclr/inc/daccess.h +++ b/src/coreclr/inc/daccess.h @@ -573,6 +573,10 @@ #include "crosscomp.h" #endif +#if !defined(HOST_WINDOWS) && !defined(NATIVEAOT) +#include +#endif + #include // Information stored in the DAC table of interest to the DAC implementation diff --git a/src/coreclr/inc/dacprivate.h b/src/coreclr/inc/dacprivate.h index e8d0be5aba07e..aa3837cd0eda0 100644 --- a/src/coreclr/inc/dacprivate.h +++ b/src/coreclr/inc/dacprivate.h @@ -467,7 +467,7 @@ struct MSLAYOUT DacpAssemblyData HRESULT Request(ISOSDacInterface *sos, CLRDATA_ADDRESS addr) { - return Request(sos, addr, NULL); + return Request(sos, addr, 0); } }; From 2d195d1e1cb98ab554ec44a87d1f9926ed9afca5 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 11 Mar 2024 16:15:12 -0700 Subject: [PATCH 29/60] Further build fixes --- src/coreclr/CMakeLists.txt | 1 + src/coreclr/inc/daccess.h | 8 ++++---- src/coreclr/inc/dacprivate.h | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/coreclr/CMakeLists.txt b/src/coreclr/CMakeLists.txt index cf6bc77c3d296..c7e873f5d812a 100644 --- a/src/coreclr/CMakeLists.txt +++ b/src/coreclr/CMakeLists.txt @@ -207,6 +207,7 @@ if(CLR_CMAKE_HOST_UNIX) # Suppress these warnings here to avoid breaking the build. add_compile_options($<$:-Wno-null-arithmetic>) add_compile_options($<$:-Wno-null-conversion>) + add_compile_options($<$:-Wno-conversion-null>) set (NATIVE_RESOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/nativeresources) include_directories(${NATIVE_RESOURCE_DIR}) diff --git a/src/coreclr/inc/daccess.h b/src/coreclr/inc/daccess.h index dc47041c99680..abf4e6b63b800 100644 --- a/src/coreclr/inc/daccess.h +++ b/src/coreclr/inc/daccess.h @@ -561,6 +561,10 @@ #ifndef NATIVEAOT #include +#if !defined(HOST_WINDOWS) && !defined(NATIVEAOT) +#include +#endif + #include "switches.h" #include "safemath.h" #include "corerror.h" @@ -573,10 +577,6 @@ #include "crosscomp.h" #endif -#if !defined(HOST_WINDOWS) && !defined(NATIVEAOT) -#include -#endif - #include // Information stored in the DAC table of interest to the DAC implementation diff --git a/src/coreclr/inc/dacprivate.h b/src/coreclr/inc/dacprivate.h index aa3837cd0eda0..ae91e940ce22f 100644 --- a/src/coreclr/inc/dacprivate.h +++ b/src/coreclr/inc/dacprivate.h @@ -577,7 +577,7 @@ struct MSLAYOUT DacpMethodDescData { return sos->GetMethodDescData( addr, - NULL, // IP address + 0, // IP address this, 0, // cRejitData NULL, // rejitData[] From 110965d49a8821442004fbee2709a74f1eda4d90 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 11 Mar 2024 16:15:54 -0700 Subject: [PATCH 30/60] Apply format patch --- src/coreclr/jit/compiler.hpp | 3 ++- src/coreclr/jit/utils.h | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 0e0f03e11fc4f..46ada7bb5b420 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -2618,7 +2618,8 @@ inline #else int outGoingArgSpaceSize = 0; #endif - varOffset = outGoingArgSpaceSize + max(-varNum * (int)TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize()); + varOffset = + outGoingArgSpaceSize + max(-varNum * (int)TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize()); } else { diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h index cd1c5436e2142..82af11924d9e3 100644 --- a/src/coreclr/jit/utils.h +++ b/src/coreclr/jit/utils.h @@ -160,13 +160,13 @@ int signum(T val) } } -template +template constexpr auto max(T&& t, U&& u) -> decltype(t > u ? t : u) { return t > u ? t : u; } -template +template constexpr auto min(T&& t, U&& u) -> decltype(t < u ? t : u) { return t < u ? t : u; From 141f08c5d49d647e650d2a0eec54d270a7becfb4 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 12 Mar 2024 00:19:20 +0000 Subject: [PATCH 31/60] Fix GCC build and continue to work on the linux-x86 build --- src/coreclr/CMakeLists.txt | 6 +++--- src/coreclr/utilcode/md5.cpp | 2 +- src/coreclr/vm/codeman.cpp | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/coreclr/CMakeLists.txt b/src/coreclr/CMakeLists.txt index c7e873f5d812a..96fa4f8408390 100644 --- a/src/coreclr/CMakeLists.txt +++ b/src/coreclr/CMakeLists.txt @@ -205,9 +205,9 @@ if(CLR_CMAKE_HOST_UNIX) # The CoreCLR PAL used to redefine NULL, which caused a number of null conversion and arithmetic # warnings and errors to be suppressed. # Suppress these warnings here to avoid breaking the build. - add_compile_options($<$:-Wno-null-arithmetic>) - add_compile_options($<$:-Wno-null-conversion>) - add_compile_options($<$:-Wno-conversion-null>) + add_compile_options($<$:-Wno-null-arithmetic>) + add_compile_options($<$:-Wno-conversion-null>) + add_compile_options($<$:-Wno-pointer-arith>) set (NATIVE_RESOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/nativeresources) include_directories(${NATIVE_RESOURCE_DIR}) diff --git a/src/coreclr/utilcode/md5.cpp b/src/coreclr/utilcode/md5.cpp index 7297114f21feb..cc86a48bedc2c 100644 --- a/src/coreclr/utilcode/md5.cpp +++ b/src/coreclr/utilcode/md5.cpp @@ -141,7 +141,7 @@ void MD5::GetHashValue(MD5HASHDATA* phash) // // but our compiler has an intrinsic! - #if (defined(HOST_X86) || defined(HOST_ARM)) && defined(TARGET_UNIX) + #if (defined(HOST_X86) || defined(HOST_ARM) || !defined(__clang__)) && defined(TARGET_UNIX) #define ROL(x, n) (((x) << (n)) | ((x) >> (32-(n)))) #define ROTATE_LEFT(x,n) (x) = ROL(x,n) #else diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 55f68a749d995..3e1536dbf92c1 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -2828,7 +2828,7 @@ void EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t reserveFo // the JIT can in turn 8-byte align the loop entry headers. else if ((g_pConfig->GenOptimizeType() != OPT_SIZE)) { - alignment = max(alignment, 8); + alignment = max(alignment, 8u); } #endif From ab910e60313e15ae023d91464e08ac70003e060d Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 12 Mar 2024 13:58:50 -0700 Subject: [PATCH 32/60] Ensure that we're doing an unsigned compare with the result of unsigned abs, not converting back to a signed compare (which would produce the incorrect result for SSIZE_T_MIN) --- src/coreclr/jit/emitarm64.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index 216f17be2a427..78ce8ed21b39a 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -5857,7 +5857,7 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) // true if this 'imm' can be encoded as a input operand to an add instruction /*static*/ bool emitter::emitIns_valid_imm_for_add(INT64 imm, emitAttr size) { - if (unsigned_abs(imm) <= 0x0fff) + if (unsigned_abs(imm) <= 0x0fffULL) return true; else if (canEncodeWithShiftImmBy12(imm)) // Try the shifted by 12 encoding return true; @@ -7889,7 +7889,7 @@ void emitter::emitIns_R_I(instruction ins, assert(insOptsNone(opt)); assert(isGeneralRegister(reg)); - if (unsigned_abs(imm) <= 0x0fff) + if (unsigned_abs(imm) <= 0x0fffULL) { if (imm < 0) { @@ -10533,7 +10533,7 @@ void emitter::emitIns_R_R_I(instruction ins, reg2 = encodingSPtoZR(reg2); } - if (unsigned_abs(imm) <= 0x0fff) + if (unsigned_abs(imm) <= 0x0fffULL) { if (imm < 0) { From 2ae316073ba2671e6dc5349b35d4d061217785b6 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 12 Mar 2024 14:43:04 -0700 Subject: [PATCH 33/60] Remove malloc.h include except for where we're pulling it in for _alloca --- src/coreclr/inc/allocacheck.h | 11 +++++++++-- src/coreclr/inc/contract.h | 3 --- src/coreclr/inc/crtwrap.h | 3 --- src/coreclr/inc/utilcode.h | 3 --- src/coreclr/pal/inc/pal.h | 3 --- src/coreclr/tools/StressLogAnalyzer/StressLogDump.cpp | 4 ++++ src/coreclr/tools/StressLogAnalyzer/util.h | 3 --- .../tools/superpmi/superpmi-shared/standardpch.h | 3 --- 8 files changed, 13 insertions(+), 20 deletions(-) diff --git a/src/coreclr/inc/allocacheck.h b/src/coreclr/inc/allocacheck.h index 7dba90e29a905..1c4f0a5849713 100644 --- a/src/coreclr/inc/allocacheck.h +++ b/src/coreclr/inc/allocacheck.h @@ -23,9 +23,16 @@ #ifndef AllocaCheck_h #define AllocaCheck_h -#if !defined(HOST_APPLE) + +#if defined(HOST_WINDOWS) #include // for alloca itself -#endif +#else +#if defined(__has_include) +#if __has_include() +#include +#endif // __has_include(alloca.h) +#endif // defined(__has_include) +#endif // defined(HOST_WINDOWS) #if defined(assert) && !defined(_ASSERTE) #define _ASSERTE assert diff --git a/src/coreclr/inc/contract.h b/src/coreclr/inc/contract.h index fa4975e70f146..257ac41aebba9 100644 --- a/src/coreclr/inc/contract.h +++ b/src/coreclr/inc/contract.h @@ -233,9 +233,6 @@ #include "specstrings.h" #include "clrtypes.h" -#if !defined(HOST_APPLE) -#include -#endif #include "check.h" #include "debugreturn.h" #include "staticcontract.h" diff --git a/src/coreclr/inc/crtwrap.h b/src/coreclr/inc/crtwrap.h index 8dd43d6a09d18..59b68d7d46694 100644 --- a/src/coreclr/inc/crtwrap.h +++ b/src/coreclr/inc/crtwrap.h @@ -16,9 +16,6 @@ #include #include "debugmacros.h" #include -#if !defined(HOST_APPLE) -#include -#endif #include #include diff --git a/src/coreclr/inc/utilcode.h b/src/coreclr/inc/utilcode.h index 61b5952820be5..a32e6a3c83ab6 100644 --- a/src/coreclr/inc/utilcode.h +++ b/src/coreclr/inc/utilcode.h @@ -14,9 +14,6 @@ #include "winwrap.h" #include #include -#if !defined(HOST_APPLE) -#include -#endif #include #include #include diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index e831f1b0e2461..3c50163ae8d31 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -49,9 +49,6 @@ Module Name: #include #include #include -#if !defined(HOST_APPLE) -#include -#endif #ifdef __cplusplus extern "C++" diff --git a/src/coreclr/tools/StressLogAnalyzer/StressLogDump.cpp b/src/coreclr/tools/StressLogAnalyzer/StressLogDump.cpp index 49bf662c21dde..eadc27e2557c2 100644 --- a/src/coreclr/tools/StressLogAnalyzer/StressLogDump.cpp +++ b/src/coreclr/tools/StressLogAnalyzer/StressLogDump.cpp @@ -24,6 +24,10 @@ class MapViewHolder #include "../../../inc/stresslog.h" #include "StressMsgReader.h" +#ifdef HOST_WINDOWS +#include +#endif + void GcHistClear(); void GcHistAddLog(LPCSTR msg, StressMsgReader stressMsg); diff --git a/src/coreclr/tools/StressLogAnalyzer/util.h b/src/coreclr/tools/StressLogAnalyzer/util.h index 7d289e9121559..752509277a770 100644 --- a/src/coreclr/tools/StressLogAnalyzer/util.h +++ b/src/coreclr/tools/StressLogAnalyzer/util.h @@ -12,9 +12,6 @@ typedef void* CRITSEC_COOKIE; #define STRESS_LOG_ANALYZER -#if !defined(HOST_APPLE) -#include -#endif #include "staticcontract.h" // This macro is used to standardize the wide character string literals between UNIX and Windows. diff --git a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h index 1dd5f18b20be1..3d12965de4f8f 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h @@ -51,9 +51,6 @@ #include #include #include -#if !defined(HOST_APPLE) -#include -#endif #include #include #include From 2e45f861376b4bdd17f59b8a7b40f36f7f8e3cc6 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 12 Mar 2024 14:46:08 -0700 Subject: [PATCH 34/60] Undo unsigned largestAlignmentRequirement --- src/coreclr/vm/methodtablebuilder.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp index ab8980f76d91b..cfcb1bb3e78c4 100644 --- a/src/coreclr/vm/methodtablebuilder.cpp +++ b/src/coreclr/vm/methodtablebuilder.cpp @@ -8329,7 +8329,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach // Place by value class fields last // Update the number of GC pointer series // Calculate largest alignment requirement - unsigned int largestAlignmentRequirement = 1; + int largestAlignmentRequirement = 1; for (i = 0; i < bmtEnumFields->dwNumInstanceFields; i++) { if (pFieldDescList[i].IsByValue()) @@ -8340,14 +8340,14 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach if (pByValueMT->GetNumInstanceFieldBytes() >= DATA_ALIGNMENT) { dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, DATA_ALIGNMENT); - largestAlignmentRequirement = max(largestAlignmentRequirement, (unsigned int)DATA_ALIGNMENT); + largestAlignmentRequirement = max(largestAlignmentRequirement, DATA_ALIGNMENT); } else #elif defined(FEATURE_64BIT_ALIGNMENT) if (pByValueMT->RequiresAlign8()) { dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, 8); - largestAlignmentRequirement = max(largestAlignmentRequirement, 8u); + largestAlignmentRequirement = max(largestAlignmentRequirement, 8); } else #endif // FEATURE_64BIT_ALIGNMENT @@ -8356,13 +8356,13 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach // this field type has GC pointers in it, which need to be pointer-size aligned // so do this if it has not been done already dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, TARGET_POINTER_SIZE); - largestAlignmentRequirement = max(largestAlignmentRequirement, (unsigned int)TARGET_POINTER_SIZE); + largestAlignmentRequirement = max(largestAlignmentRequirement, TARGET_POINTER_SIZE); containsGCPointers = true; } else { int fieldAlignmentRequirement = pByValueMT->GetFieldAlignmentRequirement(); - largestAlignmentRequirement = max(largestAlignmentRequirement, (unsigned int)fieldAlignmentRequirement); + largestAlignmentRequirement = max(largestAlignmentRequirement, fieldAlignmentRequirement); dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, fieldAlignmentRequirement); } @@ -8385,7 +8385,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach // non-value-type fields always require pointer alignment // This does not account for types that are marked IsAlign8Candidate due to 8-byte fields // but that is explicitly handled when we calculate the final alignment for the type. - largestAlignmentRequirement = max(largestAlignmentRequirement, (unsigned int)TARGET_POINTER_SIZE); + largestAlignmentRequirement = max(largestAlignmentRequirement, TARGET_POINTER_SIZE); if (!pFieldDescList[i].IsObjRef()) { @@ -8419,7 +8419,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach else #endif // FEATURE_64BIT_ALIGNMENT if (dwNumInstanceFieldBytes > TARGET_POINTER_SIZE) { - minAlign = containsGCPointers ? TARGET_POINTER_SIZE : (unsigned)largestAlignmentRequirement; + minAlign = (unsigned)(containsGCPointers ? TARGET_POINTER_SIZE : largestAlignmentRequirement); } else { minAlign = 1; From a11db1d561cb3d9db402a3b63fa924598502c780 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 12 Mar 2024 16:17:46 -0700 Subject: [PATCH 35/60] Explicitly use the C++ abs signature as we did in the rest of the jit --- src/coreclr/jit/jit.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h index 1df8c034d0c1d..2c09afc6e1d8c 100644 --- a/src/coreclr/jit/jit.h +++ b/src/coreclr/jit/jit.h @@ -697,19 +697,19 @@ inline unsigned int roundUp(unsigned size, unsigned mult) inline unsigned int unsigned_abs(int x) { - return ((unsigned int)abs(x)); + return ((unsigned int)std::abs(x)); } #ifdef TARGET_64BIT inline size_t unsigned_abs(ssize_t x) { - return ((size_t)abs((__int64)x)); + return ((size_t)std::abs((__int64)x)); } #ifdef __APPLE__ inline size_t unsigned_abs(__int64 x) { - return ((size_t)abs(x)); + return ((size_t)std::abs(x)); } #endif // __APPLE__ #endif // TARGET_64BIT From cf9efb274fac6c45e74481618e8525f4b1aa7403 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Wed, 13 Mar 2024 10:49:18 -0700 Subject: [PATCH 36/60] Remove constant manipulation as that wasn't the problem and add missing include for Windows debug build. --- src/coreclr/jit/compiler.cpp | 3 + src/coreclr/jit/emitarm64.cpp | 154 +++++++++++++++++----------------- 2 files changed, 80 insertions(+), 77 deletions(-) diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index bdfe28dfbf813..0d93d7f5c8fd7 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -22,6 +22,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "stacklevelsetter.h" #include "patchpointinfo.h" #include "jitstd/algorithm.h" +#ifdef HOST_WINDOWS +#include +#endif extern ICorJitHost* g_jitHost; diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index 984a700abaa11..b285f31eccc96 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -4424,7 +4424,7 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes2[] = { - #define INST1(id, nm, info, fmt, e1 ) + #define INST1(id, nm, info, fmt, e1 ) #define INST2(id, nm, info, fmt, e1, e2 ) e2, #define INST3(id, nm, info, fmt, e1, e2, e3 ) e2, #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) e2, @@ -4440,8 +4440,8 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes3[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) #define INST3(id, nm, info, fmt, e1, e2, e3 ) e3, #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) e3, #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) e3, @@ -4456,9 +4456,9 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes4[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) e4, #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) e4, #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) e4, @@ -4472,10 +4472,10 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes5[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) - #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) e5, #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) e5, #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) e5, @@ -4488,11 +4488,11 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes6[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) - #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) - #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) + #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) e6, #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) e6, #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) e6, @@ -4504,12 +4504,12 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes7[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) - #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) - #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) - #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) + #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) + #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) e7, #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) e7, #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) e7, @@ -4520,13 +4520,13 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes8[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) - #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) - #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) - #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) - #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) + #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) + #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) + #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) e8, #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) e8, #define INST11(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11 ) e8, @@ -4536,14 +4536,14 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes9[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) - #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) - #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) - #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) - #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) - #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) + #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) + #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) + #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) + #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) e9, #define INST11(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11 ) e9, #define INST13(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13 ) e9, @@ -4552,15 +4552,15 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes10[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) - #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) - #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) - #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) - #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) - #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) - #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) + #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) + #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) + #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) + #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) + #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) #define INST11(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11 ) e10, #define INST13(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13 ) e10, #include "instrsarm64sve.h" @@ -4568,15 +4568,15 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes11[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) - #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) - #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) - #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) - #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) - #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) - #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) + #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) + #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) + #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) + #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) + #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) #define INST11(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11 ) e11, #define INST13(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13 ) e11, #include "instrsarm64sve.h" @@ -4584,32 +4584,32 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) const static code_t insCodes12[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) - #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) - #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) - #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) - #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) - #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) - #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) - #define INST11(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) + #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) + #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) + #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) + #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) + #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) + #define INST11(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11 ) #define INST13(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13 ) e12, #include "instrsarm64sve.h" }; const static code_t insCodes13[] = { - #define INST1(id, nm, info, fmt, e1 ) - #define INST2(id, nm, info, fmt, e1, e2 ) - #define INST3(id, nm, info, fmt, e1, e2, e3 ) - #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) - #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) - #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) - #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) - #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) - #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) - #define INST11(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11 ) + #define INST1(id, nm, info, fmt, e1 ) + #define INST2(id, nm, info, fmt, e1, e2 ) + #define INST3(id, nm, info, fmt, e1, e2, e3 ) + #define INST4(id, nm, info, fmt, e1, e2, e3, e4 ) + #define INST5(id, nm, info, fmt, e1, e2, e3, e4, e5 ) + #define INST6(id, nm, info, fmt, e1, e2, e3, e4, e5, e6 ) + #define INST7(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7 ) + #define INST8(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) + #define INST9(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) + #define INST11(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11 ) #define INST13(id, nm, info, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13 ) e13, #include "instrsarm64sve.h" }; @@ -5897,7 +5897,7 @@ emitter::code_t emitter::emitInsCodeSve(instruction ins, insFormat fmt) // true if this 'imm' can be encoded as a input operand to an add instruction /*static*/ bool emitter::emitIns_valid_imm_for_add(INT64 imm, emitAttr size) { - if (unsigned_abs(imm) <= 0x0fffULL) + if (unsigned_abs(imm) <= 0x0fff) return true; else if (canEncodeWithShiftImmBy12(imm)) // Try the shifted by 12 encoding return true; @@ -7988,7 +7988,7 @@ void emitter::emitIns_R_I(instruction ins, assert(insOptsNone(opt)); assert(isGeneralRegister(reg)); - if (unsigned_abs(imm) <= 0x0fffULL) + if (unsigned_abs(imm) <= 0x0fff) { if (imm < 0) { @@ -10786,7 +10786,7 @@ void emitter::emitIns_R_R_I(instruction ins, reg2 = encodingSPtoZR(reg2); } - if (unsigned_abs(imm) <= 0x0fffULL) + if (unsigned_abs(imm) <= 0x0fff) { if (imm < 0) { From eb0180d679b829e055af78fb2dd5f707d11159b8 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Wed, 13 Mar 2024 11:42:58 -0700 Subject: [PATCH 37/60] Include malloc.h on windows for the whole JIT --- src/coreclr/jit/compiler.cpp | 3 --- src/coreclr/jit/jitpch.h | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 0d93d7f5c8fd7..bdfe28dfbf813 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -22,9 +22,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "stacklevelsetter.h" #include "patchpointinfo.h" #include "jitstd/algorithm.h" -#ifdef HOST_WINDOWS -#include -#endif extern ICorJitHost* g_jitHost; diff --git a/src/coreclr/jit/jitpch.h b/src/coreclr/jit/jitpch.h index af86bdd6d8c9c..001c3fa69b248 100644 --- a/src/coreclr/jit/jitpch.h +++ b/src/coreclr/jit/jitpch.h @@ -14,6 +14,9 @@ #include #include #include +#ifdef HOST_WINDOWS +#include +#endif // Don't allow using the windows.h #defines for the BitScan* APIs. Using the #defines means our // `BitOperations::BitScan*` functions have their name mapped, which is confusing and messes up From fb30172dfc51fd3331791a7a741c40515b18a3c5 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Thu, 14 Mar 2024 11:14:08 -0700 Subject: [PATCH 38/60] Fix PAL tests and update pal test script's usage information. --- .../pal/tests/palsuite/c_runtime/wcstoul/test5/test5.cpp | 8 ++++---- .../palsuite/debug_api/OutputDebugStringA/test1/test1.cpp | 3 --- src/coreclr/pal/tests/palsuite/runpaltests.sh | 2 +- .../threading/CriticalSectionFunctions/test8/test8.cpp | 1 + .../threading/WaitForMultipleObjectsEx/test6/test6.cpp | 1 + 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/coreclr/pal/tests/palsuite/c_runtime/wcstoul/test5/test5.cpp b/src/coreclr/pal/tests/palsuite/c_runtime/wcstoul/test5/test5.cpp index 2ffab4b9de051..428a5f24caa66 100644 --- a/src/coreclr/pal/tests/palsuite/c_runtime/wcstoul/test5/test5.cpp +++ b/src/coreclr/pal/tests/palsuite/c_runtime/wcstoul/test5/test5.cpp @@ -32,9 +32,9 @@ PALTEST(c_runtime_wcstoul_test5_paltest_wcstoul_test5, "c_runtime/wcstoul/test5/ errno = 0; l = wcstoul(overstr, &end, 10); - if (l != _UI32_MAX) + if (l != UINT32_MAX) { - Fail("ERROR: Expected wcstoul to return %u, got %u\n", _UI32_MAX, l); + Fail("ERROR: Expected wcstoul to return %u, got %u\n", UINT32_MAX, l); } if (end != overstr + 10) { @@ -49,9 +49,9 @@ PALTEST(c_runtime_wcstoul_test5_paltest_wcstoul_test5, "c_runtime/wcstoul/test5/ errno = 0; l = wcstoul(understr, &end, 10); - if (l != _UI32_MAX) + if (l != UINT32_MAX) { - Fail("ERROR: Expected wcstoul to return %u, got %u\n", _UI32_MAX, l); + Fail("ERROR: Expected wcstoul to return %u, got %u\n", UINT32_MAX, l); } if (end != understr + 2) { diff --git a/src/coreclr/pal/tests/palsuite/debug_api/OutputDebugStringA/test1/test1.cpp b/src/coreclr/pal/tests/palsuite/debug_api/OutputDebugStringA/test1/test1.cpp index 98f0a1b95b0a3..a8f55d7f9c04c 100644 --- a/src/coreclr/pal/tests/palsuite/debug_api/OutputDebugStringA/test1/test1.cpp +++ b/src/coreclr/pal/tests/palsuite/debug_api/OutputDebugStringA/test1/test1.cpp @@ -44,13 +44,10 @@ PALTEST(debug_api_OutputDebugStringA_test1_paltest_outputdebugstringa_test1, "de FALSE, 0, NULL, NULL, &si, &pi)) { DWORD dwError = GetLastError(); - free(name); Fail("ERROR: CreateProcess failed to load executable 'helper'. " "GetLastError() returned %d.\n", dwError); } - free(name); - /* This is the main loop. It exits when the process which is being debugged is finished executing. */ diff --git a/src/coreclr/pal/tests/palsuite/runpaltests.sh b/src/coreclr/pal/tests/palsuite/runpaltests.sh index c10930e2acc39..39c492212bc88 100755 --- a/src/coreclr/pal/tests/palsuite/runpaltests.sh +++ b/src/coreclr/pal/tests/palsuite/runpaltests.sh @@ -9,7 +9,7 @@ then echo "runpaltests.sh [] []" echo echo "For example:" - echo "runpaltests.sh /projectk/build/debug" + echo "runpaltests.sh artifacts/bin/coreclr/linux.x64.Debug/paltests/" echo exit 1 fi diff --git a/src/coreclr/pal/tests/palsuite/threading/CriticalSectionFunctions/test8/test8.cpp b/src/coreclr/pal/tests/palsuite/threading/CriticalSectionFunctions/test8/test8.cpp index 24f22afa456ce..8081b69109a9a 100644 --- a/src/coreclr/pal/tests/palsuite/threading/CriticalSectionFunctions/test8/test8.cpp +++ b/src/coreclr/pal/tests/palsuite/threading/CriticalSectionFunctions/test8/test8.cpp @@ -12,6 +12,7 @@ ** **===================================================================*/ #include +#include #define MAX_THREAD_COUNT 128 #define DEFAULT_THREAD_COUNT 10 diff --git a/src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test6/test6.cpp b/src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test6/test6.cpp index 80ecbaa2016c0..6ac838bd24b04 100644 --- a/src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test6/test6.cpp +++ b/src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test6/test6.cpp @@ -12,6 +12,7 @@ **=========================================================*/ #include +#include #define MAX_COUNT 10000 #define MAX_THREADS 256 From 93c34c40e2c473ba4ebfd8c17bc5b87f832da4da Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Thu, 14 Mar 2024 11:45:22 -0700 Subject: [PATCH 39/60] Remove PAL_STDCPP_COMPAT and remove min/max macro in the GC space and use the better-inference implementation from the JIT in the GC as well. --- src/coreclr/debug/createdump/CMakeLists.txt | 2 - src/coreclr/debug/dbgutil/CMakeLists.txt | 2 - src/coreclr/debug/debug-pal/CMakeLists.txt | 2 - src/coreclr/gc/env/gcenv.base.h | 8 --- src/coreclr/gc/gc.cpp | 18 ++++--- src/coreclr/gc/vxsort/defs.h | 2 - src/coreclr/inc/daccess.h | 4 +- src/coreclr/nativeaot/Runtime/CMakeLists.txt | 1 - src/coreclr/nativeaot/Runtime/threadstore.cpp | 2 +- src/coreclr/pal/inc/pal_mstypes.h | 52 ++----------------- src/coreclr/pal/inc/rt/sal.h | 4 -- src/coreclr/pal/inc/rt/specstrings.h | 2 - src/coreclr/pal/inc/rt/specstrings_undef.h | 2 - .../dummyprovider/CMakeLists.txt | 1 - .../lttngprovider/CMakeLists.txt | 1 - src/coreclr/vm/vars.hpp | 40 -------------- 16 files changed, 17 insertions(+), 126 deletions(-) diff --git a/src/coreclr/debug/createdump/CMakeLists.txt b/src/coreclr/debug/createdump/CMakeLists.txt index 71e5b78b08e52..3c72b8a0fa42b 100644 --- a/src/coreclr/debug/createdump/CMakeLists.txt +++ b/src/coreclr/debug/createdump/CMakeLists.txt @@ -56,8 +56,6 @@ else(CLR_CMAKE_HOST_WIN32) endif(CLR_CMAKE_HOST_OSX) endif (CORECLR_SET_RPATH) - add_definitions(-DPAL_STDCPP_COMPAT) - # This is so we can include "version.c" include_directories(${CMAKE_BINARY_DIR}) diff --git a/src/coreclr/debug/dbgutil/CMakeLists.txt b/src/coreclr/debug/dbgutil/CMakeLists.txt index 2d8e02b07fc7f..0ad223630a583 100644 --- a/src/coreclr/debug/dbgutil/CMakeLists.txt +++ b/src/coreclr/debug/dbgutil/CMakeLists.txt @@ -9,8 +9,6 @@ if(CLR_CMAKE_HOST_WIN32 OR CLR_CMAKE_HOST_OSX) include_directories(${CLR_DIR}/inc/llvm) endif(CLR_CMAKE_HOST_WIN32 OR CLR_CMAKE_HOST_OSX) -add_definitions(-DPAL_STDCPP_COMPAT) - if(CLR_CMAKE_TARGET_LINUX_MUSL) add_definitions(-DTARGET_LINUX_MUSL) endif(CLR_CMAKE_TARGET_LINUX_MUSL) diff --git a/src/coreclr/debug/debug-pal/CMakeLists.txt b/src/coreclr/debug/debug-pal/CMakeLists.txt index baa11c163dffe..adc8efacab4ae 100644 --- a/src/coreclr/debug/debug-pal/CMakeLists.txt +++ b/src/coreclr/debug/debug-pal/CMakeLists.txt @@ -2,8 +2,6 @@ include_directories(../inc) include_directories(../../pal/inc) include_directories(${EP_GENERATED_HEADER_PATH}) -add_definitions(-DPAL_STDCPP_COMPAT) - set(SHARED_EVENTPIPE_SOURCE_PATH ${CLR_SRC_NATIVE_DIR}/eventpipe) add_definitions(-DFEATURE_CORECLR) add_definitions(-DFEATURE_PERFTRACING) diff --git a/src/coreclr/gc/env/gcenv.base.h b/src/coreclr/gc/env/gcenv.base.h index dccf7e12fc7b5..587c5d7caeb09 100644 --- a/src/coreclr/gc/env/gcenv.base.h +++ b/src/coreclr/gc/env/gcenv.base.h @@ -100,14 +100,6 @@ inline HRESULT HRESULT_FROM_WIN32(unsigned long x) #define ZeroMemory(Destination,Length) memset((Destination),0,(Length)) -#ifndef min -#define min(a,b) (((a) < (b)) ? (a) : (b)) -#endif - -#ifndef max -#define max(a,b) (((a) > (b)) ? (a) : (b)) -#endif - #define C_ASSERT(cond) static_assert( cond, #cond ) #define UNREFERENCED_PARAMETER(P) (void)(P) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index a4d5e58cc8ff2..6c4b6b0daa8ac 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -51,13 +51,17 @@ class gc_rand uint64_t gc_rand::x = 0; -// Define min/max as macros -#ifndef min -#define min(_a, _b) ((_a) < (_b) ? (_a) : (_b)) -#endif -#ifndef max -#define max(_a, _b) ((_a) < (_b) ? (_b) : (_a)) -#endif +template +auto max(T&& t, U&& u) -> decltype(t > u ? t : u) +{ + return t > u ? t : u; +} + +template +auto min(T&& t, U&& u) -> decltype(t < u ? t : u) +{ + return t < u ? t : u; +} #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) BOOL bgc_heap_walk_for_etw_p = FALSE; diff --git a/src/coreclr/gc/vxsort/defs.h b/src/coreclr/gc/vxsort/defs.h index 13c02d97d4b15..d6373a21ad269 100644 --- a/src/coreclr/gc/vxsort/defs.h +++ b/src/coreclr/gc/vxsort/defs.h @@ -45,8 +45,6 @@ #define NOINLINE __attribute__((noinline)) #endif -#undef max -#undef min using std::max; using std::min; #endif // VXSORT_DEFS_H diff --git a/src/coreclr/inc/daccess.h b/src/coreclr/inc/daccess.h index 768411e95a0d2..ef6af6f2be034 100644 --- a/src/coreclr/inc/daccess.h +++ b/src/coreclr/inc/daccess.h @@ -561,7 +561,7 @@ #ifndef NATIVEAOT #include -#if !defined(HOST_WINDOWS) && !defined(NATIVEAOT) +#if !defined(HOST_WINDOWS) #include #endif @@ -573,9 +573,7 @@ #define DACCESS_TABLE_SYMBOL "g_dacTable" #include -#ifndef PAL_STDCPP_COMPAT #include "crosscomp.h" -#endif #include diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt index cb9a63c197377..cd4483e2fc1d0 100644 --- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt @@ -131,7 +131,6 @@ else() include_directories(unix) # sal.h, pshpack/poppack.h - add_definitions(-DPAL_STDCPP_COMPAT) include_directories(../../pal/inc/rt) include(CheckIncludeFiles) diff --git a/src/coreclr/nativeaot/Runtime/threadstore.cpp b/src/coreclr/nativeaot/Runtime/threadstore.cpp index 259d07e7ab0bb..e04effee0000a 100644 --- a/src/coreclr/nativeaot/Runtime/threadstore.cpp +++ b/src/coreclr/nativeaot/Runtime/threadstore.cpp @@ -230,7 +230,7 @@ void SpinWait(int iteration, int usecLimit) int64_t ticksPerSecond = PalQueryPerformanceFrequency(); int64_t endTicks = startTicks + (usecLimit * ticksPerSecond) / 1000000; - int l = min((unsigned)iteration, 30); + int l = (int)min((unsigned)iteration, 30u); for (int i = 0; i < l; i++) { for (int j = 0; j < (1 << i); j++) diff --git a/src/coreclr/pal/inc/pal_mstypes.h b/src/coreclr/pal/inc/pal_mstypes.h index 457d6e2f59468..2a873c2aa046a 100644 --- a/src/coreclr/pal/inc/pal_mstypes.h +++ b/src/coreclr/pal/inc/pal_mstypes.h @@ -64,13 +64,10 @@ extern "C" { #define _cdecl #define CDECL -// On ARM __fastcall is ignored and causes a compile error -#if !defined(PAL_STDCPP_COMPAT) || defined(__arm__) -# undef __fastcall -# undef _fastcall -# define __fastcall -# define _fastcall -#endif // !defined(PAL_STDCPP_COMPAT) || defined(__arm__) +#undef __fastcall +#undef _fastcall +#define __fastcall +#define _fastcall #endif // !defined(__i386__) @@ -208,7 +205,6 @@ extern "C" { #endif // _MSC_VER -#ifndef PAL_STDCPP_COMPAT #ifndef _MSC_VER @@ -217,7 +213,6 @@ typedef long double LONG_DOUBLE; #endif #endif // _MSC_VER -#endif // !PAL_STDCPP_COMPAT typedef void VOID; @@ -553,49 +548,10 @@ static_assert(sizeof(SSIZE_T) == sizeof(void*), "SSIZE_T should be pointer sized #define SSIZE_T_MIN (ssize_t)I64(0x8000000000000000) #endif -#ifndef PAL_STDCPP_COMPAT -#ifdef HOST_64BIT -typedef unsigned long size_t; -typedef long ssize_t; -typedef long ptrdiff_t; -#else // !HOST_64BIT -typedef unsigned int size_t; -typedef int ptrdiff_t; -#endif // !HOST_64BIT -#endif // !PAL_STDCPP_COMPAT -#define _SIZE_T_DEFINED - typedef LONG_PTR LPARAM; -#define _PTRDIFF_T_DEFINED -#ifdef _MINGW_ -// We need to define _PTRDIFF_T to make sure ptrdiff_t doesn't get defined -// again by system headers - but only for MinGW. -#define _PTRDIFF_T -#endif - typedef char16_t WCHAR; -#ifndef PAL_STDCPP_COMPAT - -#if defined(__linux__) -#ifdef HOST_64BIT -typedef long int intptr_t; -typedef unsigned long int uintptr_t; -#else // !HOST_64BIT -typedef int intptr_t; -typedef unsigned int uintptr_t; -#endif // !HOST_64BIT -#else -typedef long int intptr_t; -typedef unsigned long int uintptr_t; -#endif - -#endif // PAL_STDCPP_COMPAT - -#define _INTPTR_T_DEFINED -#define _UINTPTR_T_DEFINED - typedef DWORD LCID; typedef PDWORD PLCID; typedef WORD LANGID; diff --git a/src/coreclr/pal/inc/rt/sal.h b/src/coreclr/pal/inc/rt/sal.h index b28a1ef8d1a8c..9d461e8050f57 100644 --- a/src/coreclr/pal/inc/rt/sal.h +++ b/src/coreclr/pal/inc/rt/sal.h @@ -2405,10 +2405,8 @@ extern "C" { Annotates a pointer p. States that pointer p is never null or maybe null. */ -#ifndef PAL_STDCPP_COMPAT #define __notnull _Notnull_impl_ #define __maybenull _Maybenull_impl_ -#endif // !PAL_STDCPP_COMPAT /* __readonly l @@ -2595,10 +2593,8 @@ extern "C" { #else // ][ -#ifndef PAL_STDCPP_COMPAT #define __notnull #define __deref -#endif // !PAL_STDCPP_COMPAT #define __maybenull #define __readonly #define __notreadonly diff --git a/src/coreclr/pal/inc/rt/specstrings.h b/src/coreclr/pal/inc/rt/specstrings.h index 21a40d91a0dd5..1cccb42e1554d 100644 --- a/src/coreclr/pal/inc/rt/specstrings.h +++ b/src/coreclr/pal/inc/rt/specstrings.h @@ -309,11 +309,9 @@ __ANNOTATION(SAL_failureDefault(enum __SAL_failureKind)); __byte_readableTo((expr) ? (size) : (size) * 2) #define __post_invalid _Post_ __notvalid /* integer related macros */ -#ifndef PAL_STDCPP_COMPAT #define __allocator __inner_allocator #define __deallocate(kind) _Pre_ __notnull __post_invalid #define __deallocate_opt(kind) _Pre_ __maybenull __post_invalid -#endif #define __bound __inner_bound #define __range(lb,ub) __inner_range(lb,ub) #define __in_bound _Pre_ __inner_bound diff --git a/src/coreclr/pal/inc/rt/specstrings_undef.h b/src/coreclr/pal/inc/rt/specstrings_undef.h index b6c5e28072ab1..f3afd76a6818b 100644 --- a/src/coreclr/pal/inc/rt/specstrings_undef.h +++ b/src/coreclr/pal/inc/rt/specstrings_undef.h @@ -5,10 +5,8 @@ */ -#ifndef PAL_STDCPP_COMPAT #undef __in #undef __out -#endif // !PAL_STDCPP_COMPAT #undef _At_ #undef _Deref_out_ diff --git a/src/coreclr/pal/src/eventprovider/dummyprovider/CMakeLists.txt b/src/coreclr/pal/src/eventprovider/dummyprovider/CMakeLists.txt index e0105865f9aec..09986597b7c1b 100644 --- a/src/coreclr/pal/src/eventprovider/dummyprovider/CMakeLists.txt +++ b/src/coreclr/pal/src/eventprovider/dummyprovider/CMakeLists.txt @@ -24,7 +24,6 @@ foreach(DUMMY_PROVIDER_FILE ${DUMMY_PROVIDER_OUTPUT}) list(APPEND DUMMY_PROVIDER_SOURCES ${DUMMY_PROVIDER_FILE}) endforeach() -add_definitions(-DPAL_STDCPP_COMPAT=1) include_directories(${COREPAL_SOURCE_DIR}/inc/rt) include_directories(${CMAKE_CURRENT_BINARY_DIR}/dummy) diff --git a/src/coreclr/pal/src/eventprovider/lttngprovider/CMakeLists.txt b/src/coreclr/pal/src/eventprovider/lttngprovider/CMakeLists.txt index d116c0095ea55..40f65bf171142 100644 --- a/src/coreclr/pal/src/eventprovider/lttngprovider/CMakeLists.txt +++ b/src/coreclr/pal/src/eventprovider/lttngprovider/CMakeLists.txt @@ -30,7 +30,6 @@ foreach(LTTNG_PROVIDER_FILE ${LTTNG_PROVIDER_OUTPUT}) endif() endforeach() -add_definitions(-DPAL_STDCPP_COMPAT=1) include_directories(${COREPAL_SOURCE_DIR}/inc/rt) include_directories(${CMAKE_CURRENT_BINARY_DIR}/lttng) diff --git a/src/coreclr/vm/vars.hpp b/src/coreclr/vm/vars.hpp index 65712d031512d..b080bb0c60f8e 100644 --- a/src/coreclr/vm/vars.hpp +++ b/src/coreclr/vm/vars.hpp @@ -16,46 +16,6 @@ typedef DPTR(SLOT) PTR_SLOT; typedef LPVOID DictionaryEntry; -/* Define the implementation dependent size types */ - -#ifndef _INTPTR_T_DEFINED -#ifdef HOST_64BIT -typedef __int64 intptr_t; -#else -typedef int intptr_t; -#endif -#define _INTPTR_T_DEFINED -#endif - -#ifndef _UINTPTR_T_DEFINED -#ifdef HOST_64BIT -typedef unsigned __int64 uintptr_t; -#else -typedef unsigned int uintptr_t; -#endif -#define _UINTPTR_T_DEFINED -#endif - -#ifndef _PTRDIFF_T_DEFINED -#ifdef HOST_64BIT -typedef __int64 ptrdiff_t; -#else -typedef int ptrdiff_t; -#endif -#define _PTRDIFF_T_DEFINED -#endif - - -#ifndef _SIZE_T_DEFINED -#ifdef HOST_64BIT -typedef unsigned __int64 size_t; -#else -typedef unsigned int size_t; -#endif -#define _SIZE_T_DEFINED -#endif - - #include "util.hpp" #include #include From 1a0ee6ce7120d6b2c7e7026d4323befbba97a8f8 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Thu, 14 Mar 2024 13:08:52 -0700 Subject: [PATCH 40/60] Various build fixes and PR feedback --- src/coreclr/gc/sample/GCSample.cpp | 2 +- src/coreclr/pal/inc/pal_mstypes.h | 2 -- src/coreclr/pal/inc/rt/safecrt.h | 10 ---------- 3 files changed, 1 insertion(+), 13 deletions(-) diff --git a/src/coreclr/gc/sample/GCSample.cpp b/src/coreclr/gc/sample/GCSample.cpp index 41e275035b914..0f2afc7c20a71 100644 --- a/src/coreclr/gc/sample/GCSample.cpp +++ b/src/coreclr/gc/sample/GCSample.cpp @@ -176,7 +176,7 @@ int __cdecl main(int argc, char* argv[]) // GC expects the size of ObjHeader (extra void*) to be included in the size. baseSize = baseSize + sizeof(ObjHeader); // Add padding as necessary. GC requires the object size to be at least MIN_OBJECT_SIZE. - My_MethodTable.m_MT.m_baseSize = max(baseSize, MIN_OBJECT_SIZE); + My_MethodTable.m_MT.m_baseSize = max(baseSize, (uint32_t)MIN_OBJECT_SIZE); My_MethodTable.m_MT.m_componentSize = 0; // Array component size My_MethodTable.m_MT.m_flags = MTFlag_ContainsPointers; diff --git a/src/coreclr/pal/inc/pal_mstypes.h b/src/coreclr/pal/inc/pal_mstypes.h index 2a873c2aa046a..b4e04840d4837 100644 --- a/src/coreclr/pal/inc/pal_mstypes.h +++ b/src/coreclr/pal/inc/pal_mstypes.h @@ -64,8 +64,6 @@ extern "C" { #define _cdecl #define CDECL -#undef __fastcall -#undef _fastcall #define __fastcall #define _fastcall diff --git a/src/coreclr/pal/inc/rt/safecrt.h b/src/coreclr/pal/inc/rt/safecrt.h index 7abc7271b0709..df31623d903c8 100644 --- a/src/coreclr/pal/inc/rt/safecrt.h +++ b/src/coreclr/pal/inc/rt/safecrt.h @@ -95,16 +95,6 @@ #endif #endif -/* uintptr_t */ -#if !defined(_UINTPTR_T_DEFINED) -#if defined(HOST_64BIT) -typedef unsigned __int64 uintptr_t; -#else -typedef _W64 unsigned int uintptr_t; -#endif -#define _UINTPTR_T_DEFINED -#endif - #ifdef __GNUC__ #define SAFECRT_DEPRECATED __attribute__((deprecated)) #else From 57ed663cc55d2e42faf4ad314b58ed537794d2fd Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Thu, 14 Mar 2024 14:20:11 -0700 Subject: [PATCH 41/60] Fix build errors --- src/coreclr/gc/gc.cpp | 30 +++++++++++++++--------------- src/coreclr/pal/inc/strsafe.h | 9 --------- 2 files changed, 15 insertions(+), 24 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 6c4b6b0daa8ac..a8d3e9bd204f4 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -6998,7 +6998,7 @@ void gc_heap::gc_thread_function () dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[dynamic_heap_count_data.sample_index]; wait_time = min (wait_time, (uint32_t)(sample.elapsed_between_gcs / 1000 / 3)); - wait_time = max (wait_time, 1); + wait_time = max (wait_time, 1u); dprintf (6666, ("gc#0 thread waiting for %d ms (betwen GCs %I64d)", wait_time, sample.elapsed_between_gcs)); } @@ -13922,7 +13922,7 @@ uint32_t adjust_heaps_hard_limit_worker (uint32_t nhp, size_t limit) size_t aligned_limit = align_on_segment_hard_limit (limit); uint32_t nhp_oh = (uint32_t)(aligned_limit / min_segment_size_hard_limit); nhp = min (nhp_oh, nhp); - return (max (nhp, 1)); + return (max (nhp, 1u)); } uint32_t gc_heap::adjust_heaps_hard_limit (uint32_t nhp) @@ -14326,7 +14326,7 @@ gc_heap::init_semi_shared() #endif //!USE_REGIONS #ifdef MULTIPLE_HEAPS - mark_list_size = min (100*1024, max (8192, soh_segment_size/(2*10*32))); + mark_list_size = min (100*1024u, max (8192u, soh_segment_size/(2*10*32))); #ifdef DYNAMIC_HEAP_COUNT if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) { @@ -14348,7 +14348,7 @@ gc_heap::init_semi_shared() } #else //MULTIPLE_HEAPS - mark_list_size = min(100*1024, max (8192, soh_segment_size/(64*32))); + mark_list_size = min(100*1024u, max (8192u, soh_segment_size/(64*32))); g_mark_list_total_size = mark_list_size; g_mark_list = make_mark_list (mark_list_size); @@ -14470,7 +14470,7 @@ gc_heap::init_semi_shared() if (bgc_tuning::enable_fl_tuning && (current_memory_load < bgc_tuning::memory_load_goal)) { uint32_t distance_to_goal = bgc_tuning::memory_load_goal - current_memory_load; - bgc_tuning::stepping_interval = max (distance_to_goal / 10, 1); + bgc_tuning::stepping_interval = max (distance_to_goal / 10, 1u); bgc_tuning::last_stepping_mem_load = current_memory_load; bgc_tuning::last_stepping_bgc_count = 0; dprintf (BGC_TUNING_LOG, ("current ml: %d, %d to goal, interval: %d", @@ -21841,7 +21841,7 @@ size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps) inline uint64_t gc_heap::min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps) { - return min (available_mem, (256*1024*1024)) / num_heaps; + return min (available_mem, (256*1024*1024u)) / num_heaps; } enum { @@ -22102,7 +22102,7 @@ size_t gc_heap::exponential_smoothing (int gen, size_t collection_count, size_t { // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. - size_t smoothing = min(3, collection_count); + size_t smoothing = min(3u, collection_count); size_t desired_total = desired_per_heap * n_heaps; size_t new_smoothed_desired_total = desired_total / smoothing + ((smoothed_desired_total[gen] / smoothing) * (smoothing - 1)); @@ -43333,7 +43333,7 @@ void gc_heap::init_static_data() size_t gen0_max_size = #ifdef MULTIPLE_HEAPS - max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024)); + max (6*1024*1024u, min ( Align(soh_segment_size/2), 200*1024*1024u)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC @@ -47190,7 +47190,7 @@ enable_no_gc_region_callback_status gc_heap::enable_no_gc_callback(NoGCRegionCal soh_withheld_budget = soh_withheld_budget / gc_heap::n_heaps; loh_withheld_budget = loh_withheld_budget / gc_heap::n_heaps; #endif - soh_withheld_budget = max(soh_withheld_budget, 1); + soh_withheld_budget = max(soh_withheld_budget, 1u); soh_withheld_budget = Align(soh_withheld_budget, get_alignment_constant (TRUE)); loh_withheld_budget = Align(loh_withheld_budget, get_alignment_constant (FALSE)); #ifdef MULTIPLE_HEAPS @@ -48455,7 +48455,7 @@ HRESULT GCHeap::Initialize() nhp = ((nhp_from_config == 0) ? g_num_active_processors : nhp_from_config); - nhp = min (nhp, MAX_SUPPORTED_CPUS); + nhp = min (nhp, (uint32_t)MAX_SUPPORTED_CPUS); gc_heap::gc_thread_no_affinitize_p = (gc_heap::heap_hard_limit ? !affinity_config_specified_p : (GCConfig::GetNoAffinitize() != 0)); @@ -51239,11 +51239,11 @@ size_t gc_heap::get_gen0_min_size() #ifdef SERVER_GC // performance data seems to indicate halving the size results // in optimal perf. Ask for adjusted gen0 size. - gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024)); + gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024u)); // if gen0 size is too large given the available memory, reduce it. // Get true cache size, as we don't want to reduce below this. - size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024)); + size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024u)); dprintf (1, ("cache: %zd-%zd", GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE))); @@ -52784,7 +52784,7 @@ bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uin if (is_initialization) #endif //USE_REGIONS { - heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc); + heap_hard_limit = (size_t)max ((20ull * 1024 * 1024), physical_mem_for_gc); } } } @@ -52832,8 +52832,8 @@ bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uin uint32_t highmem_th_from_config = (uint32_t)GCConfig::GetGCHighMemPercent(); if (highmem_th_from_config) { - high_memory_load_th = min (99, highmem_th_from_config); - v_high_memory_load_th = min (99, (highmem_th_from_config + 7)); + high_memory_load_th = min (99u, highmem_th_from_config); + v_high_memory_load_th = min (99u, (highmem_th_from_config + 7)); #ifdef FEATURE_EVENT_TRACE high_mem_percent_from_config = highmem_th_from_config; #endif //FEATURE_EVENT_TRACE diff --git a/src/coreclr/pal/inc/strsafe.h b/src/coreclr/pal/inc/strsafe.h index b69feb73c2512..b833526e61777 100644 --- a/src/coreclr/pal/inc/strsafe.h +++ b/src/coreclr/pal/inc/strsafe.h @@ -27,15 +27,6 @@ #include // for memset #include // for va_start, etc. -#ifndef _SIZE_T_DEFINED -#ifdef HOST_64BIT -typedef unsigned __int64 size_t; -#else -typedef __w64 unsigned int size_t; -#endif // !HOST_64BIT -#define _SIZE_T_DEFINED -#endif // !_SIZE_T_DEFINED - #ifndef SUCCEEDED #define SUCCEEDED(hr) ((HRESULT)(hr) >= 0) #endif From 705bf6889c94346e4b1d765229e89c702fae3f7c Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 15 Mar 2024 12:09:57 -0700 Subject: [PATCH 42/60] Set NOMINMAX globally and move the CoreCLR and NativeAOT builds to never use min/max macros, even on Windows. --- src/coreclr/clrdefinitions.cmake | 1 + src/coreclr/debug/di/rsthread.cpp | 6 +++--- src/coreclr/debug/di/rstype.cpp | 2 +- src/coreclr/gc/env/gcenv.base.h | 1 - src/coreclr/gc/gc.cpp | 18 +++--------------- src/coreclr/gc/gcpriv.h | 18 +++++++++++++++--- src/coreclr/gc/sample/CMakeLists.txt | 1 + src/coreclr/gc/sample/GCSample.vcxproj | 6 +++--- src/coreclr/gc/windows/gcenv.windows.cpp | 10 +++++----- src/coreclr/hosts/coreshim/CoreShim.h | 1 - src/coreclr/ildasm/ildasmpch.h | 1 - src/coreclr/inc/contract.inl | 6 +++--- src/coreclr/inc/utilcode.h | 1 + src/coreclr/jit/jitpch.h | 1 - src/coreclr/md/compiler/import.cpp | 2 +- src/coreclr/md/enc/rwutil.cpp | 4 ++-- src/coreclr/nativeaot/CMakeLists.txt | 1 + .../superpmi/superpmi-shared/standardpch.h | 1 - src/coreclr/utilcode/stgpool.cpp | 6 +++--- src/coreclr/utilcode/util.cpp | 2 +- src/coreclr/utilcode/utsem.cpp | 2 +- src/coreclr/vm/.vscode/c_cpp_properties.json | 1 + src/coreclr/vm/ceemain.cpp | 2 +- src/coreclr/vm/classhash.cpp | 4 ++-- src/coreclr/vm/classlayoutinfo.cpp | 2 +- src/coreclr/vm/eetwain.cpp | 4 ++-- src/coreclr/vm/methodtablebuilder.cpp | 2 +- src/coreclr/vm/proftoeeinterfaceimpl.cpp | 2 +- src/coreclr/vm/stringliteralmap.cpp | 2 +- 29 files changed, 55 insertions(+), 55 deletions(-) diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index 2ffcfb00c0c05..1ff7266369539 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -53,6 +53,7 @@ if(CLR_CMAKE_HOST_WIN32) add_definitions(-D_WIN32_WINNT=0x0602) add_definitions(-DWIN32_LEAN_AND_MEAN) add_definitions(-D_CRT_SECURE_NO_WARNINGS) + add_compile_definitions(NOMINMAX) endif(CLR_CMAKE_HOST_WIN32) if (NOT (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX)) diff --git a/src/coreclr/debug/di/rsthread.cpp b/src/coreclr/debug/di/rsthread.cpp index 3c5024fc80fab..307da3f6d1da1 100644 --- a/src/coreclr/debug/di/rsthread.cpp +++ b/src/coreclr/debug/di/rsthread.cpp @@ -5122,7 +5122,7 @@ HRESULT CordbValueEnum::Next(ULONG celt, ICorDebugValue *values[], ULONG *pceltF HRESULT hr = S_OK; - int iMax = min( m_iMax, m_iCurrent+celt); + int iMax = (int)min( (ULONG)m_iMax, m_iCurrent+celt); int i; for (i = m_iCurrent; i< iMax;i++) { @@ -8229,7 +8229,7 @@ HRESULT CordbJITILFrame::FabricateNativeInfo(DWORD dwIndex, #else // STACK_GROWS_UP_ON_ARGS_WALK m_rgNVI[i].loc.vlFixedVarArg.vlfvOffset = (unsigned)(rpCur - m_FirstArgAddr); - rpCur += max(cbType, cbArchitectureMin); + rpCur += max((ULONG)cbType, cbArchitectureMin); AlignAddressForType(pArgType, rpCur); #endif @@ -10877,7 +10877,7 @@ HRESULT CordbCodeEnum::Next(ULONG celt, ICorDebugCode *values[], ULONG *pceltFet HRESULT hr = S_OK; - int iMax = min( m_iMax, m_iCurrent+celt); + int iMax = (int)min( (ULONG)m_iMax, m_iCurrent+celt); int i; for (i = m_iCurrent; i < iMax; i++) diff --git a/src/coreclr/debug/di/rstype.cpp b/src/coreclr/debug/di/rstype.cpp index 45ccd44be6565..ae686064e96cb 100644 --- a/src/coreclr/debug/di/rstype.cpp +++ b/src/coreclr/debug/di/rstype.cpp @@ -2898,7 +2898,7 @@ HRESULT CordbTypeEnum::Next(ULONG celt, ICorDebugType *values[], ULONG *pceltFet HRESULT hr = S_OK; - int iMax = min( m_iMax, m_iCurrent+celt); + int iMax = (int)min( (ULONG)m_iMax, m_iCurrent+celt); int i; for (i = m_iCurrent; i < iMax; i++) diff --git a/src/coreclr/gc/env/gcenv.base.h b/src/coreclr/gc/env/gcenv.base.h index 587c5d7caeb09..1abad9e43d17e 100644 --- a/src/coreclr/gc/env/gcenv.base.h +++ b/src/coreclr/gc/env/gcenv.base.h @@ -390,7 +390,6 @@ typedef struct _PROCESSOR_NUMBER { uint8_t Number; uint8_t Reserved; } PROCESSOR_NUMBER, *PPROCESSOR_NUMBER; - #endif // _INC_WINDOWS // ----------------------------------------------------------------------------------------------------------- diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index a8d3e9bd204f4..8362f5ea17435 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -51,18 +51,6 @@ class gc_rand uint64_t gc_rand::x = 0; -template -auto max(T&& t, U&& u) -> decltype(t > u ? t : u) -{ - return t > u ? t : u; -} - -template -auto min(T&& t, U&& u) -> decltype(t < u ? t : u) -{ - return t < u ? t : u; -} - #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) BOOL bgc_heap_walk_for_etw_p = FALSE; #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE @@ -7048,7 +7036,7 @@ void gc_heap::gc_thread_function () } // wait till the threads that should have gone idle at least reached the place where they are about to wait on the idle event. - if ((gc_heap::dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) && + if ((gc_heap::dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) && (n_heaps != dynamic_heap_count_data.last_n_heaps)) { int spin_count = 1024; @@ -48604,7 +48592,7 @@ HRESULT GCHeap::Initialize() /* * Allocation requests less than loh_size_threshold will be allocated on the small object heap. * - * An object cannot span more than one region and regions in small object heap are of the same size - gc_region_size. + * An object cannot span more than one region and regions in small object heap are of the same size - gc_region_size. * However, the space available for actual allocations is reduced by the following implementation details - * * 1.) heap_segment_mem is set to the new pages + sizeof(aligned_plug_and_gap) in make_heap_segment. @@ -48620,7 +48608,7 @@ HRESULT GCHeap::Initialize() #ifdef FEATURE_STRUCTALIGN /* * The above assumed FEATURE_STRUCTALIGN is not turned on for platforms where USE_REGIONS is supported, otherwise it is possible - * that the allocation size is inflated by ComputeMaxStructAlignPad in GCHeap::Alloc and we have to compute an upper bound of that + * that the allocation size is inflated by ComputeMaxStructAlignPad in GCHeap::Alloc and we have to compute an upper bound of that * function. * * Note that ComputeMaxStructAlignPad is defined to be 0 if FEATURE_STRUCTALIGN is turned off. diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index 788cbff9f5e50..0e44cf8aa12ef 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -3345,8 +3345,8 @@ class gc_heap size_t new_current_total_committed); #ifdef USE_REGIONS - PER_HEAP_ISOLATED_METHOD void compute_committed_bytes(size_t& total_committed, size_t& committed_decommit, size_t& committed_free, - size_t& committed_bookkeeping, size_t& new_current_total_committed, size_t& new_current_total_committed_bookkeeping, + PER_HEAP_ISOLATED_METHOD void compute_committed_bytes(size_t& total_committed, size_t& committed_decommit, size_t& committed_free, + size_t& committed_bookkeeping, size_t& new_current_total_committed, size_t& new_current_total_committed_bookkeeping, size_t* new_committed_by_oh); #endif @@ -4226,7 +4226,7 @@ class gc_heap #ifdef DYNAMIC_HEAP_COUNT // Sample collection - - // + // // For every GC, we collect the msl wait time + GC pause duration info and use both to calculate the // throughput cost percentage. We will also be using the wait time and the GC pause duration separately // for other purposes in the future. @@ -5961,3 +5961,15 @@ class card_marking_enumerator #else #define THIS_ARG #endif // FEATURE_CARD_MARKING_STEALING + +template +auto max(T&& t, U&& u) -> decltype(t > u ? t : u) +{ + return t > u ? t : u; +} + +template +auto min(T&& t, U&& u) -> decltype(t < u ? t : u) +{ + return t < u ? t : u; +} diff --git a/src/coreclr/gc/sample/CMakeLists.txt b/src/coreclr/gc/sample/CMakeLists.txt index 94a736e8c8126..1f297fd231332 100644 --- a/src/coreclr/gc/sample/CMakeLists.txt +++ b/src/coreclr/gc/sample/CMakeLists.txt @@ -53,6 +53,7 @@ if(CLR_CMAKE_TARGET_WIN32) list(APPEND SOURCES ../windows/gcenv.windows.cpp) add_definitions(-DUNICODE) + add_compile_definitions(NOMINMAX) else() list(APPEND SOURCES ../gcenv.unix.cpp) diff --git a/src/coreclr/gc/sample/GCSample.vcxproj b/src/coreclr/gc/sample/GCSample.vcxproj index 6e33738d18d0d..0b7e657b35f80 100644 --- a/src/coreclr/gc/sample/GCSample.vcxproj +++ b/src/coreclr/gc/sample/GCSample.vcxproj @@ -51,7 +51,7 @@ Use Level3 Disabled - WIN32;HOST_X86;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + WIN32;HOST_X86;NOMINMAX;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true common.h .;..;..\env @@ -68,7 +68,7 @@ MaxSpeed true true - WIN32;HOST_X86;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + WIN32;HOST_X86;NOMINMAX;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true .;..;..\env @@ -109,4 +109,4 @@ - \ No newline at end of file + diff --git a/src/coreclr/gc/windows/gcenv.windows.cpp b/src/coreclr/gc/windows/gcenv.windows.cpp index 0aae8e035bbb4..c9138d537bb5b 100644 --- a/src/coreclr/gc/windows/gcenv.windows.cpp +++ b/src/coreclr/gc/windows/gcenv.windows.cpp @@ -290,8 +290,8 @@ static size_t GetRestrictedPhysicalMemoryLimit() (job_process_memory_limit != (size_t)UINTPTR_MAX) || (job_workingset_limit != (size_t)UINTPTR_MAX)) { - job_physical_memory_limit = min (job_memory_limit, job_process_memory_limit); - job_physical_memory_limit = min (job_physical_memory_limit, job_workingset_limit); + job_physical_memory_limit = std::min (job_memory_limit, job_process_memory_limit); + job_physical_memory_limit = std::min (job_physical_memory_limit, job_workingset_limit); MEMORYSTATUSEX ms; ::GetProcessMemoryLoad(&ms); @@ -299,7 +299,7 @@ static size_t GetRestrictedPhysicalMemoryLimit() total_physical = ms.ullAvailPhys; // A sanity check in case someone set a larger limit than there is actual physical memory. - job_physical_memory_limit = (size_t) min (job_physical_memory_limit, ms.ullTotalPhys); + job_physical_memory_limit = (size_t) std::min (job_physical_memory_limit, ms.ullTotalPhys); } } } @@ -1139,7 +1139,7 @@ bool GCToOSInterface::GetNumaInfo(uint16_t* total_nodes, uint32_t* max_procs_per mask &= mask - 1; } - currentProcsOnNode = max(currentProcsOnNode, procsOnNode); + currentProcsOnNode = std::max(currentProcsOnNode, procsOnNode); } *max_procs_per_node = currentProcsOnNode; *total_nodes = (uint16_t)g_nNodes; @@ -1163,7 +1163,7 @@ bool GCToOSInterface::GetCPUGroupInfo(uint16_t* total_groups, uint32_t* max_proc DWORD currentProcsInGroup = 0; for (WORD i = 0; i < g_nGroups; i++) { - currentProcsInGroup = max(currentProcsInGroup, g_CPUGroupInfoArray[i].nr_active); + currentProcsInGroup = std::max(currentProcsInGroup, (DWORD)g_CPUGroupInfoArray[i].nr_active); } *max_procs_per_group = currentProcsInGroup; return true; diff --git a/src/coreclr/hosts/coreshim/CoreShim.h b/src/coreclr/hosts/coreshim/CoreShim.h index 97b630bdb9e19..9be052926ec57 100644 --- a/src/coreclr/hosts/coreshim/CoreShim.h +++ b/src/coreclr/hosts/coreshim/CoreShim.h @@ -5,7 +5,6 @@ #define _CORESHIM_H_ // Platform -#define NOMINMAX #include #include diff --git a/src/coreclr/ildasm/ildasmpch.h b/src/coreclr/ildasm/ildasmpch.h index 219e27f0ba182..5bb192dd14e10 100644 --- a/src/coreclr/ildasm/ildasmpch.h +++ b/src/coreclr/ildasm/ildasmpch.h @@ -6,7 +6,6 @@ #define OEMRESOURCE #define INITGUID -#define NOMINMAX #include #include diff --git a/src/coreclr/inc/contract.inl b/src/coreclr/inc/contract.inl index 1578e87161df4..211b6b5a1d701 100644 --- a/src/coreclr/inc/contract.inl +++ b/src/coreclr/inc/contract.inl @@ -352,7 +352,7 @@ inline void DbgStateLockData::LockTaken(DbgStateLockType dbgStateLockType, // Remember as many of these new entrances in m_rgTakenLockInfos as we can for (UINT i = cCombinedLocks; - i < min (ARRAY_SIZE(m_rgTakenLockInfos), (size_t)(cCombinedLocks + cTakes)); + i < std::min (ARRAY_SIZE(m_rgTakenLockInfos), (size_t)(cCombinedLocks + cTakes)); i++) { m_rgTakenLockInfos[i].m_pvLock = pvLock; @@ -377,7 +377,7 @@ inline void DbgStateLockData::LockReleased(DbgStateLockType dbgStateLockType, UI // If lock count is within range of our m_rgTakenLockInfos buffer size, then // make sure we're releasing locks in reverse order of how we took them for (UINT i = cCombinedLocks - cReleases; - i < min (ARRAY_SIZE(m_rgTakenLockInfos), (size_t)cCombinedLocks); + i < std::min (ARRAY_SIZE(m_rgTakenLockInfos), (size_t)cCombinedLocks); i++) { if (m_rgTakenLockInfos[i].m_pvLock != pvLock) @@ -443,7 +443,7 @@ inline BOOL DbgStateLockState::IsLockRetaken(void * pvLock) // m_cLocksEnteringCannotRetakeLock records the number of locks that were taken // when CANNOT_RETAKE_LOCK contract was constructed. for (UINT i = 0; - i < min(ARRAY_SIZE(m_pLockData->m_rgTakenLockInfos), (size_t)m_cLocksEnteringCannotRetakeLock); + i < std::min(ARRAY_SIZE(m_pLockData->m_rgTakenLockInfos), (size_t)m_cLocksEnteringCannotRetakeLock); ++i) { if (m_pLockData->m_rgTakenLockInfos[i].m_pvLock == pvLock) diff --git a/src/coreclr/inc/utilcode.h b/src/coreclr/inc/utilcode.h index a32e6a3c83ab6..a0fb42e6fb9f2 100644 --- a/src/coreclr/inc/utilcode.h +++ b/src/coreclr/inc/utilcode.h @@ -29,6 +29,7 @@ #include "new.hpp" #include +#include #include "contract.h" diff --git a/src/coreclr/jit/jitpch.h b/src/coreclr/jit/jitpch.h index 001c3fa69b248..07f6ae6631cab 100644 --- a/src/coreclr/jit/jitpch.h +++ b/src/coreclr/jit/jitpch.h @@ -1,7 +1,6 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#define NOMINMAX #include #include #include diff --git a/src/coreclr/md/compiler/import.cpp b/src/coreclr/md/compiler/import.cpp index 9c7d4c5a01b8c..060d3261af6ab 100644 --- a/src/coreclr/md/compiler/import.cpp +++ b/src/coreclr/md/compiler/import.cpp @@ -2172,7 +2172,7 @@ STDMETHODIMP RegMeta::GetUserString( // S_OK or error. memcpy( wszString, userString.GetDataPointer(), - min(userString.GetSize(), cbStringSize)); + min((ULONG)userString.GetSize(), cbStringSize)); if (cbStringSize < userString.GetSize()) { if ((wszString != NULL) && (cchStringSize > 0)) diff --git a/src/coreclr/md/enc/rwutil.cpp b/src/coreclr/md/enc/rwutil.cpp index 69ad55f571c3e..a828249fea6b2 100644 --- a/src/coreclr/md/enc/rwutil.cpp +++ b/src/coreclr/md/enc/rwutil.cpp @@ -230,7 +230,7 @@ HRESULT HENUMInternal::EnumWithCount( } // we can only fill the minimum of what caller asked for or what we have left - cTokens = min ( (pEnum->u.m_ulEnd - pEnum->u.m_ulCur), cMax); + cTokens = min ( (ULONG)(pEnum->u.m_ulEnd - pEnum->u.m_ulCur), cMax); if (pEnum->m_EnumType == MDSimpleEnum) { @@ -296,7 +296,7 @@ HRESULT HENUMInternal::EnumWithCount( _ASSERTE(! ((pEnum->u.m_ulEnd - pEnum->u.m_ulCur) % 2) ); // we can only fill the minimum of what caller asked for or what we have left - cTokens = min ( (pEnum->u.m_ulEnd - pEnum->u.m_ulCur), cMax * 2); + cTokens = min ( (ULONG)(pEnum->u.m_ulEnd - pEnum->u.m_ulCur), cMax * 2); // get the embedded dynamic array TOKENLIST *pdalist = (TOKENLIST *)&(pEnum->m_cursor); diff --git a/src/coreclr/nativeaot/CMakeLists.txt b/src/coreclr/nativeaot/CMakeLists.txt index 4b10cefe57387..74ee982ab9805 100644 --- a/src/coreclr/nativeaot/CMakeLists.txt +++ b/src/coreclr/nativeaot/CMakeLists.txt @@ -1,5 +1,6 @@ if(WIN32) add_definitions(-DUNICODE=1) + add_compile_definitions(NOMINMAX) endif (WIN32) if(MSVC) diff --git a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h index 3d12965de4f8f..4a9f434763e40 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h @@ -12,7 +12,6 @@ #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif // WIN32_LEAN_AND_MEAN -#define NOMINMAX #include #ifdef INTERNAL_BUILD diff --git a/src/coreclr/utilcode/stgpool.cpp b/src/coreclr/utilcode/stgpool.cpp index dd1858be84d70..e7aebc55d6ca9 100644 --- a/src/coreclr/utilcode/stgpool.cpp +++ b/src/coreclr/utilcode/stgpool.cpp @@ -1938,7 +1938,7 @@ CInMemoryStream::CopyTo( _ASSERTE(cb.QuadPart <= UINT32_MAX); ULONG cbTotal = min(static_cast(cb.QuadPart), m_cbSize - m_cbCurrent); - ULONG cbRead=min(1024u, cbTotal); + ULONG cbRead=min((ULONG)1024, cbTotal); CQuickBytes rBuf; void *pBuf = rBuf.AllocNoThrow(cbRead); if (pBuf == 0) @@ -2061,7 +2061,7 @@ CGrowableStream::CGrowableStream(float multiplicativeGrowthRate, DWORD additiveG m_multiplicativeGrowthRate = min(max(1.0F, multiplicativeGrowthRate), 2.0F); _ASSERTE(additiveGrowthRate >= 1); - m_additiveGrowthRate = max(1u, additiveGrowthRate); + m_additiveGrowthRate = max((DWORD)1, additiveGrowthRate); } // CGrowableStream::CGrowableStream #ifndef DACCESS_COMPILE @@ -2115,7 +2115,7 @@ HRESULT CGrowableStream::EnsureCapacity(DWORD newLogicalSize) multSize = (DWORD)multSizeF; } - DWORD newBufferSize = max(max(newLogicalSize, multSize), addSize.Value()); + DWORD newBufferSize = max(max(newLogicalSize, multSize), (DWORD)addSize.Value()); char *tmp = new (nothrow) char[newBufferSize]; if(tmp == NULL) diff --git a/src/coreclr/utilcode/util.cpp b/src/coreclr/utilcode/util.cpp index 13668d244135e..1819f38e0a438 100644 --- a/src/coreclr/utilcode/util.cpp +++ b/src/coreclr/utilcode/util.cpp @@ -816,7 +816,7 @@ DWORD LCM(DWORD u, DWORD v) DWORD currentProcsInGroup = 0; for (WORD i = 0; i < m_nGroups; i++) { - currentProcsInGroup = max(currentProcsInGroup, m_CPUGroupInfoArray[i].nr_active); + currentProcsInGroup = max(currentProcsInGroup, (DWORD)m_CPUGroupInfoArray[i].nr_active); } *max_procs_per_group = currentProcsInGroup; return true; diff --git a/src/coreclr/utilcode/utsem.cpp b/src/coreclr/utilcode/utsem.cpp index d7f1bc04326e9..e8e786cc3af91 100644 --- a/src/coreclr/utilcode/utsem.cpp +++ b/src/coreclr/utilcode/utsem.cpp @@ -84,7 +84,7 @@ SpinConstants g_SpinConstants = { inline void InitializeSpinConstants_NoHost() { - g_SpinConstants.dwMaximumDuration = max(2u, g_SystemInfo.dwNumberOfProcessors) * 20000; + g_SpinConstants.dwMaximumDuration = max((DWORD)2, g_SystemInfo.dwNumberOfProcessors) * 20000; } #else //!SELF_NO_HOST diff --git a/src/coreclr/vm/.vscode/c_cpp_properties.json b/src/coreclr/vm/.vscode/c_cpp_properties.json index d8abc20bf0bcb..4192e236e3548 100644 --- a/src/coreclr/vm/.vscode/c_cpp_properties.json +++ b/src/coreclr/vm/.vscode/c_cpp_properties.json @@ -31,6 +31,7 @@ "_UNICODE", "_WIN32", "_WIN32_WINNT=0x0602", + "NOMINMAX", "HOST_64BIT", "AMD64", "HOST_64BIT=1", diff --git a/src/coreclr/vm/ceemain.cpp b/src/coreclr/vm/ceemain.cpp index b5df6dd84286f..62773c94a3097 100644 --- a/src/coreclr/vm/ceemain.cpp +++ b/src/coreclr/vm/ceemain.cpp @@ -933,7 +933,7 @@ void EEStartupHelper() // retrieve configured max size for the mini-metadata buffer (defaults to 64KB) g_MiniMetaDataBuffMaxSize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MiniMdBufferCapacity); // align up to GetOsPageSize(), with a maximum of 1 MB - g_MiniMetaDataBuffMaxSize = (DWORD) min(ALIGN_UP(g_MiniMetaDataBuffMaxSize, GetOsPageSize()), 1024u * 1024u); + g_MiniMetaDataBuffMaxSize = (DWORD) min(ALIGN_UP(g_MiniMetaDataBuffMaxSize, GetOsPageSize()), (DWORD)(1024 * 1024)); // allocate the buffer. this is never touched while the process is running, so it doesn't // contribute to the process' working set. it is needed only as a "shadow" for a mini-metadata // buffer that will be set up and reported / updated in the Watson process (the diff --git a/src/coreclr/vm/classhash.cpp b/src/coreclr/vm/classhash.cpp index a0eb8eb89c313..1a2af02a05741 100644 --- a/src/coreclr/vm/classhash.cpp +++ b/src/coreclr/vm/classhash.cpp @@ -234,7 +234,7 @@ VOID EEClassHashTable::ConstructKeyFromData(PTR_EEClassHashEntry pEntry, // IN #endif // If IsCaseInsensitiveTable() is true for the hash table, strings passed to the ConstructKeyCallback instance - // will be dynamically allocated. This is to prevent wasting bytes in the Loader Heap. Thusly, it is important + // will be dynamically allocated. This is to prevent wasting bytes in the Loader Heap. Thusly, it is important // to note that in this case, the lifetime of Key is bounded by the lifetime of the single call to UseKeys, and // will be freed when that function returns. @@ -452,7 +452,7 @@ EEClassHashTable *EEClassHashTable::MakeCaseInsensitiveTable(Module *pModule, Al // Allocate the table and verify that we actually got one. EEClassHashTable * pCaseInsTable = EEClassHashTable::Create(pModule, - max(BaseGetElementCount() / 2, 11u), + max(BaseGetElementCount() / 2, (DWORD)11), this, pamTracker); diff --git a/src/coreclr/vm/classlayoutinfo.cpp b/src/coreclr/vm/classlayoutinfo.cpp index 943e29f88a5b8..8336f89066032 100644 --- a/src/coreclr/vm/classlayoutinfo.cpp +++ b/src/coreclr/vm/classlayoutinfo.cpp @@ -198,7 +198,7 @@ namespace COMPlusThrowOM(); // size must be large enough to accommodate layout. If not, we use the layout size instead. - calcTotalSize = max(classSize, calcTotalSize); + calcTotalSize = max((uint32_t)classSize, calcTotalSize); } else { diff --git a/src/coreclr/vm/eetwain.cpp b/src/coreclr/vm/eetwain.cpp index 545bdf7f72102..54a2d12c7d497 100644 --- a/src/coreclr/vm/eetwain.cpp +++ b/src/coreclr/vm/eetwain.cpp @@ -430,7 +430,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, { // This is an explicit (not special) var, so add its varNumber + 1 to our // max count ("+1" because varNumber is zero-based). - oldNumVars = max(oldNumVars, unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1); + oldNumVars = max(oldNumVars, (unsigned int)(unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1)); } } @@ -484,7 +484,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, { // This is an explicit (not special) var, so add its varNumber + 1 to our // max count ("+1" because varNumber is zero-based). - newNumVars = max(newNumVars, unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1); + newNumVars = max(newNumVars, (unsigned int)(unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1)); } } diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp index cfcb1bb3e78c4..a765830c06d6e 100644 --- a/src/coreclr/vm/methodtablebuilder.cpp +++ b/src/coreclr/vm/methodtablebuilder.cpp @@ -8427,7 +8427,7 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach minAlign *= 2; } - if (minAlign != min(dwNumInstanceFieldBytes, (unsigned int)TARGET_POINTER_SIZE)) + if (minAlign != min(dwNumInstanceFieldBytes, (DWORD)TARGET_POINTER_SIZE)) { EnsureOptionalFieldsAreAllocated(GetHalfBakedClass(), m_pAllocMemTracker, GetLoaderAllocator()->GetLowFrequencyHeap()); GetHalfBakedClass()->GetOptionalFields()->m_requiredFieldAlignment = (BYTE)minAlign; diff --git a/src/coreclr/vm/proftoeeinterfaceimpl.cpp b/src/coreclr/vm/proftoeeinterfaceimpl.cpp index d85dc93f95092..4fbefe3c31e6a 100644 --- a/src/coreclr/vm/proftoeeinterfaceimpl.cpp +++ b/src/coreclr/vm/proftoeeinterfaceimpl.cpp @@ -5645,7 +5645,7 @@ HRESULT ProfToEEInterfaceImpl::GetAssemblyInfo(AssemblyID assemblyId, if ((NULL != szName) && (cchName > 0)) { - wcsncpy_s(szName, cchName, name.GetUnicode(), min(nameLength, cchName - 1)); + wcsncpy_s(szName, cchName, name.GetUnicode(), min((size_t)nameLength, (size_t)(cchName - 1))); } if (NULL != pcchName) diff --git a/src/coreclr/vm/stringliteralmap.cpp b/src/coreclr/vm/stringliteralmap.cpp index ac37fd48acfca..55d2267f02c8c 100644 --- a/src/coreclr/vm/stringliteralmap.cpp +++ b/src/coreclr/vm/stringliteralmap.cpp @@ -442,7 +442,7 @@ static void LogStringLiteral(_In_z_ const char* action, EEStringData *pStringDat STATIC_CONTRACT_FORBID_FAULT; ULONG length = pStringData->GetCharCount(); - length = min(length, 128u); + length = min(length, (ULONG)128); WCHAR *szString = (WCHAR *)_alloca((length + 1) * sizeof(WCHAR)); memcpyNoGCRefs((void*)szString, (void*)pStringData->GetStringBuffer(), length * sizeof(WCHAR)); szString[length] = '\0'; From 12a61438a3de05e984efa22e3c72a0f6255c5d57 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 15 Mar 2024 14:17:05 -0700 Subject: [PATCH 43/60] Move standard headers to the top to avoid having the "debug return" macro interfering with those includes. Fix some pointer assignments now that this puts NULL in scope earlier. --- src/coreclr/inc/utilcode.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/coreclr/inc/utilcode.h b/src/coreclr/inc/utilcode.h index a0fb42e6fb9f2..55713550aedc4 100644 --- a/src/coreclr/inc/utilcode.h +++ b/src/coreclr/inc/utilcode.h @@ -10,13 +10,16 @@ #ifndef __UtilCode_h__ #define __UtilCode_h__ +#include +#include +#include +#include + #include "crtwrap.h" #include "winwrap.h" #include -#include #include #include -#include #include "clrtypes.h" #include "safewrap.h" #include "volatile.h" @@ -28,9 +31,6 @@ #include "safemath.h" #include "new.hpp" -#include -#include - #include "contract.h" #include @@ -220,7 +220,7 @@ typedef LPSTR LPUTF8; #define MAKE_UTF8PTR_FROMWIDE_NOTHROW(ptrname, widestr) \ CQuickBytes __qb##ptrname; \ int __l##ptrname = (int)u16_strlen(widestr); \ - LPUTF8 ptrname = 0; \ + LPUTF8 ptrname = NULL; \ if (__l##ptrname <= MAKE_MAX_LENGTH) { \ __l##ptrname = (int)((__l##ptrname + 1) * 2 * sizeof(char)); \ ptrname = (LPUTF8) __qb##ptrname.AllocNoThrow(__l##ptrname); \ @@ -236,12 +236,12 @@ typedef LPSTR LPUTF8; if (WszWideCharToMultiByte(CP_UTF8, 0, widestr, -1, ptrname, __lsize##ptrname, NULL, NULL) != 0) { \ ptrname[__l##ptrname] = 0; \ } else { \ - ptrname = 0; \ + ptrname = NULL; \ } \ } \ } \ else { \ - ptrname = 0; \ + ptrname = NULL; \ } \ } \ } \ @@ -251,7 +251,7 @@ typedef LPSTR LPUTF8; #define MAKE_WIDEPTR_FROMUTF8N_NOTHROW(ptrname, utf8str, n8chrs) \ CQuickBytes __qb##ptrname; \ int __l##ptrname; \ - LPWSTR ptrname = 0; \ + LPWSTR ptrname = NULL; \ __l##ptrname = WszMultiByteToWideChar(CP_UTF8, 0, utf8str, n8chrs, 0, 0); \ if (__l##ptrname <= MAKE_MAX_LENGTH) { \ ptrname = (LPWSTR) __qb##ptrname.AllocNoThrow((__l##ptrname+1)*sizeof(WCHAR)); \ @@ -259,7 +259,7 @@ typedef LPSTR LPUTF8; if (WszMultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, utf8str, n8chrs, ptrname, __l##ptrname) != 0) { \ ptrname[__l##ptrname] = 0; \ } else { \ - ptrname = 0; \ + ptrname = NULL; \ } \ } \ } From a68637656b2ce07245adcf4930590bb210b8f1d4 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 15 Mar 2024 14:26:58 -0700 Subject: [PATCH 44/60] Use `set_property` to append the RC_INVOKED define instead of overwriting all source-file-level compiler definitions. --- src/coreclr/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/CMakeLists.txt b/src/coreclr/CMakeLists.txt index baa99d2a72d6e..aaf4005aa7394 100644 --- a/src/coreclr/CMakeLists.txt +++ b/src/coreclr/CMakeLists.txt @@ -219,7 +219,7 @@ if(CLR_CMAKE_HOST_UNIX) # given Windows .rc file. The target C++ file path is returned in the # variable specified by the TARGET_FILE parameter. function(build_resources SOURCE TARGET_NAME TARGET_FILE) - set_source_files_properties(${SOURCE} PROPERTIES COMPILE_DEFINITIONS "RC_INVOKED") + set_property(SOURCE ${SOURCE} APPEND PROPERTY COMPILE_DEFINITIONS "RC_INVOKED") set(PREPROCESSED_SOURCE ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}.rc.i) preprocess_file(${SOURCE} ${PREPROCESSED_SOURCE}) From cef5418f63d9396b1d2e3b9e4e20c386e87ba3ef Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 15 Mar 2024 14:37:00 -0700 Subject: [PATCH 45/60] Undef __fastcall to fix FreeBSD --- src/coreclr/pal/inc/pal_mstypes.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/coreclr/pal/inc/pal_mstypes.h b/src/coreclr/pal/inc/pal_mstypes.h index b4e04840d4837..d59103002d18a 100644 --- a/src/coreclr/pal/inc/pal_mstypes.h +++ b/src/coreclr/pal/inc/pal_mstypes.h @@ -64,6 +64,13 @@ extern "C" { #define _cdecl #define CDECL +// Some platforms (such as FreeBSD) define the __fastcall macro +// on all targets, even when using it will fail. +// Undefine it here so we can use it on all platforms without error. +#ifdef __fastcall +#undef __fastcall +#endif + #define __fastcall #define _fastcall From 283f2548d4146f567360e405530ff61b783f49f7 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 15 Mar 2024 14:42:23 -0700 Subject: [PATCH 46/60] Add more min/max casts to fix GCC build. --- src/coreclr/gc/gc.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index d40b3dce40c29..bea24e0426781 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -13570,7 +13570,7 @@ void gc_heap::distribute_free_regions() if (ephemeral_elapsed >= DECOMMIT_TIME_STEP_MILLISECONDS) { gc_last_ephemeral_decommit_time = dd_time_clock (dd0); - size_t decommit_step_milliseconds = min (ephemeral_elapsed, (10*1000)); + size_t decommit_step_milliseconds = min (ephemeral_elapsed, (size_t)(10*1000)); decommit_step (decommit_step_milliseconds); } @@ -22415,7 +22415,7 @@ void gc_heap::gc1() if (alloc_contexts_used >= 1) { allocation_quantum = Align (min ((size_t)CLR_SIZE, - (size_t)max (1024, get_new_allocation (0) / (2 * alloc_contexts_used))), + (size_t)max ((size_t)1024, get_new_allocation (0) / (2 * alloc_contexts_used))), get_alignment_constant(FALSE)); dprintf (3, ("New allocation quantum: %zd(0x%zx)", allocation_quantum, allocation_quantum)); } @@ -43357,14 +43357,14 @@ void gc_heap::init_static_data() size_t gen0_max_size = #ifdef MULTIPLE_HEAPS - max (6*1024*1024u, min ( Align(soh_segment_size/2), 200*1024*1024u)); + max ((size_t)6*1024*1024u, min ( Align(soh_segment_size/2), (size_t)200*1024*1024u)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC gc_can_use_concurrent ? 6*1024*1024 : #endif //BACKGROUND_GC - max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024)) + max ((size_t)6*1024*1024, min ( Align(soh_segment_size/2), (size_t)200*1024*1024)) ); #endif //MULTIPLE_HEAPS @@ -51270,11 +51270,11 @@ size_t gc_heap::get_gen0_min_size() #ifdef SERVER_GC // performance data seems to indicate halving the size results // in optimal perf. Ask for adjusted gen0 size. - gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024u)); + gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), (size_t)(256*1024u)); // if gen0 size is too large given the available memory, reduce it. // Get true cache size, as we don't want to reduce below this. - size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024u)); + size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE), (size_t)(256*1024u)); dprintf (1, ("cache: %zd-%zd", GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE))); @@ -51282,8 +51282,8 @@ size_t gc_heap::get_gen0_min_size() int n_heaps = gc_heap::n_heaps; #else //SERVER_GC size_t trueSize = GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE); - gen0size = max((4*trueSize/5),(256*1024)); - trueSize = max(trueSize, (256*1024)); + gen0size = max((4*trueSize/5),(size_t)(256*1024)); + trueSize = max(trueSize, (size_t)(256*1024)); int n_heaps = 1; #endif //SERVER_GC From ee084eca2bb6222916a4738b5e61dfec14c89b57 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 15 Mar 2024 15:05:00 -0700 Subject: [PATCH 47/60] Add more casts to fix GCC and windows x86 builds --- src/coreclr/gc/gc.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index bea24e0426781..408fe66506647 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -28580,7 +28580,7 @@ BOOL gc_heap::background_process_mark_overflow (BOOL concurrent_p) if (grow_mark_array_p) { // Try to grow the array. - size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length); + size_t new_size = max ((size_t)MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length); if ((new_size * sizeof(mark)) > 100*1024) { @@ -42440,8 +42440,8 @@ BOOL gc_heap::best_fit (size_t free_space, #endif // SEG_REUSE_STATS if (free_space_items) { - max_free_space_items = min (MAX_NUM_FREE_SPACES, free_space_items * 2); - max_free_space_items = max (max_free_space_items, MIN_NUM_FREE_SPACES); + max_free_space_items = min ((size_t)MAX_NUM_FREE_SPACES, free_space_items * 2); + max_free_space_items = max (max_free_space_items, (size_t)MIN_NUM_FREE_SPACES); } else { From aa54b3fbe73d6363564b4e5e096b1959de11a223 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 15 Mar 2024 16:56:38 -0700 Subject: [PATCH 48/60] More min/max fixes... --- src/coreclr/debug/daccess/daccess.cpp | 2 +- src/coreclr/gc/gc.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/coreclr/debug/daccess/daccess.cpp b/src/coreclr/debug/daccess/daccess.cpp index e79dab808def3..d9a47429d5b3a 100644 --- a/src/coreclr/debug/daccess/daccess.cpp +++ b/src/coreclr/debug/daccess/daccess.cpp @@ -5789,7 +5789,7 @@ ClrDataAccess::RawGetMethodName( SIZE_T maxPrecodeSize = sizeof(StubPrecode); #ifdef HAS_THISPTR_RETBUF_PRECODE - maxPrecodeSize = max(maxPrecodeSize, sizeof(ThisPtrRetBufPrecode)); + maxPrecodeSize = max((size_t)maxPrecodeSize, sizeof(ThisPtrRetBufPrecode)); #endif for (SIZE_T i = 0; i < maxPrecodeSize / PRECODE_ALIGNMENT; i++) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 408fe66506647..b44bd932946f3 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -28920,7 +28920,7 @@ BOOL gc_heap::process_mark_overflow(int condemned_gen_number) overflow_p = TRUE; // Try to grow the array. size_t new_size = - max (MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length); + max ((size_t)MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length); if ((new_size * sizeof(mark)) > 100*1024) { @@ -51291,7 +51291,7 @@ size_t gc_heap::get_gen0_min_size() if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) { // if we are asked to be stingy with memory, limit gen 0 size - gen0size = min (gen0size, (4*1024*1024)); + gen0size = min (gen0size, (size_t)(4*1024*1024)); } #endif //DYNAMIC_HEAP_COUNT From 03b42c618c7adf63ee11bdecdbf9e79b27ced923 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 18 Mar 2024 10:25:45 -0700 Subject: [PATCH 49/60] More signedness mismatch fixes in gc. --- src/coreclr/debug/di/rsthread.cpp | 4 ++-- src/coreclr/gc/gc.cpp | 10 +++++----- src/coreclr/gc/windows/gcenv.windows.cpp | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/coreclr/debug/di/rsthread.cpp b/src/coreclr/debug/di/rsthread.cpp index 307da3f6d1da1..1f455dad376d1 100644 --- a/src/coreclr/debug/di/rsthread.cpp +++ b/src/coreclr/debug/di/rsthread.cpp @@ -8186,7 +8186,7 @@ HRESULT CordbJITILFrame::FabricateNativeInfo(DWORD dwIndex, // first argument, but thereafter we have to decrement it // before getting the variable's location from it. So increment // it here to be consistent later. - rpCur += max(cbType, cbArchitectureMin); + rpCur += max((ULONG)cbType, cbArchitectureMin); #endif // Grab the IL code's function's method signature so we can see if it's static. @@ -8219,7 +8219,7 @@ HRESULT CordbJITILFrame::FabricateNativeInfo(DWORD dwIndex, IfFailThrow(pArgType->GetUnboxedObjectSize(&cbType)); #if defined(TARGET_X86) // STACK_GROWS_DOWN_ON_ARGS_WALK - rpCur -= max(cbType, cbArchitectureMin); + rpCur -= max((ULONG)cbType, cbArchitectureMin); m_rgNVI[i].loc.vlFixedVarArg.vlfvOffset = (unsigned)(m_FirstArgAddr - rpCur); diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index b44bd932946f3..737f612c4e889 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -42672,8 +42672,8 @@ BOOL gc_heap::can_expand_into_p (heap_segment* seg, size_t min_free_size, size_t memcpy (ordered_free_space_indices, saved_ordered_free_space_indices, sizeof(ordered_free_space_indices)); - max_free_space_items = max (MIN_NUM_FREE_SPACES, free_space_items * 3 / 2); - max_free_space_items = min (MAX_NUM_FREE_SPACES, max_free_space_items); + max_free_space_items = max ((size_t)MIN_NUM_FREE_SPACES, free_space_items * 3 / 2); + max_free_space_items = min ((size_t)MAX_NUM_FREE_SPACES, max_free_space_items); dprintf (SEG_REUSE_LOG_0, ("could fit! %zd free spaces, %zd max", free_space_items, max_free_space_items)); } @@ -43394,14 +43394,14 @@ void gc_heap::init_static_data() // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap. size_t gen1_max_size = (size_t) #ifdef MULTIPLE_HEAPS - max (6*1024*1024, Align(soh_segment_size/2)); + max (6*1024u, Align(soh_segment_size/2)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC gc_can_use_concurrent ? 6*1024*1024 : #endif //BACKGROUND_GC - max (6*1024*1024, Align(soh_segment_size/2)) + max (6*1024*1024u, Align(soh_segment_size/2)) ); #endif //MULTIPLE_HEAPS @@ -44234,7 +44234,7 @@ void gc_heap::decommit_ephemeral_segment_pages() // we do a max of DECOMMIT_SIZE_PER_MILLISECOND per millisecond of elapsed time since the last GC // we limit the elapsed time to 10 seconds to avoid spending too much time decommitting - ptrdiff_t max_decommit_size = min (ephemeral_elapsed, (10*1000)) * DECOMMIT_SIZE_PER_MILLISECOND; + ptrdiff_t max_decommit_size = min (ephemeral_elapsed, (10*1000u)) * DECOMMIT_SIZE_PER_MILLISECOND; decommit_size = min (decommit_size, max_decommit_size); slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment) - decommit_size; diff --git a/src/coreclr/gc/windows/gcenv.windows.cpp b/src/coreclr/gc/windows/gcenv.windows.cpp index c9138d537bb5b..608751dd169af 100644 --- a/src/coreclr/gc/windows/gcenv.windows.cpp +++ b/src/coreclr/gc/windows/gcenv.windows.cpp @@ -299,7 +299,7 @@ static size_t GetRestrictedPhysicalMemoryLimit() total_physical = ms.ullAvailPhys; // A sanity check in case someone set a larger limit than there is actual physical memory. - job_physical_memory_limit = (size_t) std::min (job_physical_memory_limit, ms.ullTotalPhys); + job_physical_memory_limit = (size_t) std::min (job_physical_memory_limit, (size_t)ms.ullTotalPhys); } } } From 80c56dae83423c1a0501529289ceb54c2e486450 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 1 Apr 2024 16:33:20 -0700 Subject: [PATCH 50/60] Remove USE_STL --- src/coreclr/inc/random.h | 4 ++-- src/coreclr/pal/CMakeLists.txt | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/coreclr/inc/random.h b/src/coreclr/inc/random.h index 0bd2164cbb109..adc16e536c394 100644 --- a/src/coreclr/inc/random.h +++ b/src/coreclr/inc/random.h @@ -27,7 +27,7 @@ // Forbid the use of srand()/rand(), as these are globally shared facilities and our use of them would // interfere with native user code in the same process. This override is not compatible with stl headers. // -#if !defined(DO_NOT_DISABLE_RAND) && !defined(USE_STL) +#if !defined(DO_NOT_DISABLE_RAND) #ifdef srand #undef srand @@ -39,7 +39,7 @@ #endif #define rand Do_not_use_rand -#endif //!DO_NOT_DISABLE_RAND && !USE_STL +#endif //!DO_NOT_DISABLE_RAND class CLRRandom diff --git a/src/coreclr/pal/CMakeLists.txt b/src/coreclr/pal/CMakeLists.txt index 4509e9fc0f8b5..9213941ba6da0 100644 --- a/src/coreclr/pal/CMakeLists.txt +++ b/src/coreclr/pal/CMakeLists.txt @@ -7,7 +7,6 @@ include_directories(${COREPAL_SOURCE_DIR}/src) include_directories(${COREPAL_SOURCE_DIR}/../inc) add_compile_options(-fexceptions) -add_definitions(-DUSE_STL) add_subdirectory(src) add_subdirectory(tests) From 0092a9282c5e5b74bf6dafe08d48435bdd57a088 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 1 Apr 2024 16:33:36 -0700 Subject: [PATCH 51/60] PR feedback --- src/coreclr/gc/gc.cpp | 8 ++++---- src/coreclr/nativeaot/Runtime/threadstore.cpp | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 737f612c4e889..4e955079b81ab 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -43357,7 +43357,7 @@ void gc_heap::init_static_data() size_t gen0_max_size = #ifdef MULTIPLE_HEAPS - max ((size_t)6*1024*1024u, min ( Align(soh_segment_size/2), (size_t)200*1024*1024u)); + max ((size_t)6*1024*1024u, min ( Align(soh_segment_size/2), (size_t)200*1024*1024)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC @@ -43394,7 +43394,7 @@ void gc_heap::init_static_data() // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap. size_t gen1_max_size = (size_t) #ifdef MULTIPLE_HEAPS - max (6*1024u, Align(soh_segment_size/2)); + max (6*1024 * 1024u, Align(soh_segment_size/2)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC @@ -51270,11 +51270,11 @@ size_t gc_heap::get_gen0_min_size() #ifdef SERVER_GC // performance data seems to indicate halving the size results // in optimal perf. Ask for adjusted gen0 size. - gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), (size_t)(256*1024u)); + gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), (size_t)(256*1024)); // if gen0 size is too large given the available memory, reduce it. // Get true cache size, as we don't want to reduce below this. - size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE), (size_t)(256*1024u)); + size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE), (size_t)(256*1024)); dprintf (1, ("cache: %zd-%zd", GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE), GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE))); diff --git a/src/coreclr/nativeaot/Runtime/threadstore.cpp b/src/coreclr/nativeaot/Runtime/threadstore.cpp index e04effee0000a..d94ef850a1c9b 100644 --- a/src/coreclr/nativeaot/Runtime/threadstore.cpp +++ b/src/coreclr/nativeaot/Runtime/threadstore.cpp @@ -230,7 +230,7 @@ void SpinWait(int iteration, int usecLimit) int64_t ticksPerSecond = PalQueryPerformanceFrequency(); int64_t endTicks = startTicks + (usecLimit * ticksPerSecond) / 1000000; - int l = (int)min((unsigned)iteration, 30u); + int l = iteration >= 0 ? min(iteration, 30): 30; for (int i = 0; i < l; i++) { for (int j = 0; j < (1 << i); j++) From 93be569793cdee8b2833c9e89cfddf7666c04f39 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 1 Apr 2024 16:38:00 -0700 Subject: [PATCH 52/60] Remove custom min/max from SuperPMI --- .../superpmi/superpmi-shared/methodcontext.cpp | 2 +- .../superpmi/superpmi-shared/spmidumphelper.cpp | 2 +- .../superpmi/superpmi-shared/spmidumphelper.h | 2 +- .../tools/superpmi/superpmi-shared/standardpch.h | 14 +++----------- 4 files changed, 6 insertions(+), 14 deletions(-) diff --git a/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp b/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp index 70c5c627bd404..4033b820bd1a5 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp +++ b/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp @@ -6632,7 +6632,7 @@ size_t MethodContext::repPrint( size_t bytesWritten = 0; if ((buffer != nullptr) && (bufferSize > 0)) { - bytesWritten = min(bufferSize - 1, res.stringBufferSize); + bytesWritten = min(bufferSize - 1, (size_t)res.stringBufferSize); if (bytesWritten > 0) { // The "full buffer" check above ensures this given that diff --git a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.cpp b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.cpp index 725f52cbcc74c..4a2d55c1dbb23 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.cpp +++ b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.cpp @@ -93,7 +93,7 @@ void SpmiDumpHelper::FormatHandleArray(char*& pbuf, int& sizeOfBuffer, const Den sizeOfBuffer -= cch; const unsigned int maxHandleArrayDisplayElems = 5; // Don't display more than this. - const unsigned int handleArrayDisplayElems = min(maxHandleArrayDisplayElems, count); + const unsigned int handleArrayDisplayElems = min(maxHandleArrayDisplayElems, (unsigned int)count); bool first = true; for (DWORD i = startIndex; i < startIndex + handleArrayDisplayElems; i++) diff --git a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h index 4dc1f28991a75..8ecff161dddc1 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h @@ -111,7 +111,7 @@ inline std::string SpmiDumpHelper::DumpPSig( sizeOfBuffer -= cch; const unsigned int maxSigDisplayBytes = 25; // Don't display more than this. - const unsigned int sigDisplayBytes = min(maxSigDisplayBytes, cbSig); + const unsigned int sigDisplayBytes = min(maxSigDisplayBytes, (size_t)cbSig); // TODO: display character representation of the types? diff --git a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h index 4a9f434763e40..9b926556fdbb6 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/standardpch.h @@ -63,17 +63,6 @@ #include #include -template -constexpr auto max(T&& t, U&& u) -> decltype(t > u ? t : u) -{ - return t > u ? t : u; -} - -template -constexpr auto min(T&& t, U&& u) -> decltype(t < u ? t : u) -{ - return t < u ? t : u; -} #ifdef USE_MSVCDIS #define DISLIB @@ -128,6 +117,9 @@ static inline void __debugbreak() } #endif +using std::min; +using std::max; + #include #endif // STANDARDPCH_H From 5dddd14f76589b8a52d0064c9fd21f339d522207 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 1 Apr 2024 16:43:17 -0700 Subject: [PATCH 53/60] Remove custom min/max from the JIT --- src/coreclr/jit/fgopt.cpp | 4 ++-- src/coreclr/jit/gentree.cpp | 2 +- src/coreclr/jit/jitpch.h | 4 ++++ src/coreclr/jit/lclvars.cpp | 2 +- src/coreclr/jit/loopcloning.cpp | 2 +- src/coreclr/jit/utils.h | 12 ------------ 6 files changed, 9 insertions(+), 17 deletions(-) diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 8fcfdfc62c72b..7c24245f517ac 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -2482,7 +2482,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // weight_t targetWeight = target->bbWeight; weight_t blockWeight = block->bbWeight; - target->setBBProfileWeight(max(0, targetWeight - blockWeight)); + target->setBBProfileWeight(max(0.0, targetWeight - blockWeight)); JITDUMP("Decreased " FMT_BB " profile weight from " FMT_WT " to " FMT_WT "\n", target->bbNum, targetWeight, target->bbWeight); } @@ -3065,7 +3065,7 @@ bool Compiler::fgOptimizeSwitchJumps() blockToTargetEdge->setEdgeWeights(blockToTargetWeight, blockToTargetWeight, dominantTarget); blockToTargetEdge->setLikelihood(fraction); blockToNewBlockEdge->setEdgeWeights(blockToNewBlockWeight, blockToNewBlockWeight, block); - blockToNewBlockEdge->setLikelihood(max(0, 1.0 - fraction)); + blockToNewBlockEdge->setLikelihood(max(0.0, 1.0 - fraction)); // There may be other switch cases that lead to this same block, but there's just // one edge in the flowgraph. So we need to subtract off the profile data that now flows diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 2ea7e6b235f11..b609ed2c6e111 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -5769,7 +5769,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) { // Store to an enregistered local. costEx = op1->GetCostEx(); - costSz = max(3, op1->GetCostSz()); // 3 is an estimate for a reg-reg move. + costSz = max(3, (int)op1->GetCostSz()); // 3 is an estimate for a reg-reg move. goto DONE; } diff --git a/src/coreclr/jit/jitpch.h b/src/coreclr/jit/jitpch.h index 07f6ae6631cab..ebb18e5177dfe 100644 --- a/src/coreclr/jit/jitpch.h +++ b/src/coreclr/jit/jitpch.h @@ -16,6 +16,10 @@ #ifdef HOST_WINDOWS #include #endif +#include + +using std::min; +using std::max; // Don't allow using the windows.h #defines for the BitScan* APIs. Using the #defines means our // `BitOperations::BitScan*` functions have their name mapped, which is confusing and messes up diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index da69cb59432f2..931d82106b830 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -586,7 +586,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un const unsigned argSigLen = info.compMethodInfo->args.numArgs; // We will process at most takeArgs arguments from the signature after skipping skipArgs arguments - const int64_t numUserArgs = min(takeArgs, (argSigLen - (int64_t)skipArgs)); + const int64_t numUserArgs = min((int64_t)takeArgs, (argSigLen - (int64_t)skipArgs)); // If there are no user args or less than skipArgs args, return here since there's no work to do. if (numUserArgs <= 0) diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index 5f51c77eb2384..1c8824668a56a 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -2080,7 +2080,7 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex FlowEdge* const falseEdge = fgAddRefPred(fastPreheader, condLast); condLast->SetFalseEdge(falseEdge); FlowEdge* const trueEdge = condLast->GetTrueEdge(); - falseEdge->setLikelihood(max(0, 1.0 - trueEdge->getLikelihood())); + falseEdge->setLikelihood(max(0.0, 1.0 - trueEdge->getLikelihood())); } //------------------------------------------------------------------------- diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h index 82af11924d9e3..549922ad15840 100644 --- a/src/coreclr/jit/utils.h +++ b/src/coreclr/jit/utils.h @@ -160,18 +160,6 @@ int signum(T val) } } -template -constexpr auto max(T&& t, U&& u) -> decltype(t > u ? t : u) -{ - return t > u ? t : u; -} - -template -constexpr auto min(T&& t, U&& u) -> decltype(t < u ? t : u) -{ - return t < u ? t : u; -} - #if defined(DEBUG) // ConfigMethodRange describes a set of methods, specified via their From f9647e59820f43fa2f36699d22577f139a8adf92 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 1 Apr 2024 16:52:13 -0700 Subject: [PATCH 54/60] Remove min/max in the GC --- src/coreclr/gc/gc.cpp | 26 +++++++++++++------------- src/coreclr/gc/gcpriv.h | 13 ++----------- 2 files changed, 15 insertions(+), 24 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 4e955079b81ab..f3689f50a3025 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -3113,7 +3113,7 @@ void gc_history_global::print() uint32_t limit_time_to_uint32 (uint64_t time) { - time = min (time, UINT32_MAX); + time = min (time, (uint64_t)UINT32_MAX); return (uint32_t)time; } @@ -12191,7 +12191,7 @@ void gc_heap::clear_region_demoted (heap_segment* region) int gc_heap::get_plan_gen_num (int gen_number) { - return ((settings.promotion) ? min ((gen_number + 1), max_generation) : gen_number); + return ((settings.promotion) ? min ((gen_number + 1), (int)max_generation) : gen_number); } uint8_t* gc_heap::get_uoh_start_object (heap_segment* region, generation* gen) @@ -21859,13 +21859,13 @@ size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps) dprintf (GTC_LOG, ("min av: %zd, 10%% gen2: %zd, 3%% mem: %zd", min_mem_based_on_available, ten_percent_size, three_percent_mem)); #endif //SIMPLE_DPRINTF - return (size_t)(min (min_mem_based_on_available, min (ten_percent_size, three_percent_mem))); + return (size_t)(min ((uint64_t)min_mem_based_on_available, min ((uint64_t)ten_percent_size, three_percent_mem))); } inline uint64_t gc_heap::min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps) { - return min (available_mem, (256*1024*1024u)) / num_heaps; + return min (available_mem, (uint64_t)(256*1024*1024)) / num_heaps; } enum { @@ -22235,7 +22235,7 @@ void gc_heap::gc1() } //adjust the allocation size from the pinned quantities. - for (int gen_number = 0; gen_number <= min (max_generation,n+1); gen_number++) + for (int gen_number = 0; gen_number <= min ((int)max_generation,n+1); gen_number++) { generation* gn = generation_of (gen_number); if (settings.compaction) @@ -29223,7 +29223,7 @@ BOOL gc_heap::decide_on_promotion_surv (size_t threshold) { gc_heap* hp = pGenGCHeap; #endif //MULTIPLE_HEAPS - dynamic_data* dd = hp->dynamic_data_of (min ((settings.condemned_generation + 1), max_generation)); + dynamic_data* dd = hp->dynamic_data_of (min ((int)(settings.condemned_generation + 1), (int)max_generation)); size_t older_gen_size = dd_current_size (dd) + (dd_desired_allocation (dd) - dd_new_allocation (dd)); size_t promoted = hp->total_promoted_bytes; @@ -32532,7 +32532,7 @@ void gc_heap::plan_phase (int condemned_gen_number) if ((condemned_gen_number < max_generation)) { - older_gen = generation_of (min (max_generation, 1 + condemned_gen_number)); + older_gen = generation_of (min ((int)max_generation, 1 + condemned_gen_number)); generation_allocator (older_gen)->copy_to_alloc_list (r_free_list); r_free_list_space = generation_free_list_space (older_gen); @@ -34103,7 +34103,7 @@ void gc_heap::plan_phase (int condemned_gen_number) { reset_pinned_queue_bos(); #ifndef USE_REGIONS - unsigned int gen_number = min (max_generation, 1 + condemned_gen_number); + unsigned int gen_number = (unsigned int)min ((int)max_generation, 1 + condemned_gen_number); generation* gen = generation_of (gen_number); uint8_t* low = generation_allocation_start (generation_of (gen_number-1)); uint8_t* high = heap_segment_allocated (ephemeral_heap_segment); @@ -43548,7 +43548,7 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd, } else { - new_size = (size_t) min (max ( (f * current_size), min_gc_size), max_size); + new_size = (size_t) min (max ( (size_t)(f * current_size), min_gc_size), max_size); } assert ((new_size >= current_size) || (new_size == max_size)); @@ -43620,7 +43620,7 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd, size_t survivors = out; cst = float (survivors) / float (dd_begin_data_size (dd)); f = surv_to_growth (cst, limit, max_limit); - new_allocation = (size_t) min (max ((f * (survivors)), min_gc_size), max_size); + new_allocation = (size_t) min (max ((size_t)(f * (survivors)), min_gc_size), max_size); new_allocation = linear_allocation_model (allocation_fraction, new_allocation, dd_desired_allocation (dd), time_since_previous_collection_secs); @@ -49817,7 +49817,7 @@ GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode) gc_heap* hpt = 0; #endif //MULTIPLE_HEAPS - generation = (generation < 0) ? max_generation : min (generation, max_generation); + generation = (generation < 0) ? max_generation : min (generation, (int)max_generation); dynamic_data* dd = hpt->dynamic_data_of (generation); #ifdef BACKGROUND_GC @@ -49915,7 +49915,7 @@ size_t GCHeap::GarbageCollectTry (int generation, BOOL low_memory_p, int mode) { int gen = (generation < 0) ? - max_generation : min (generation, max_generation); + max_generation : min (generation, (int)max_generation); gc_reason reason = reason_empty; @@ -51912,7 +51912,7 @@ CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p) // it was promoted or not if (gen_0_empty_p) { - for (int i = min (gen+1, max_generation); i > 0; i--) + for (int i = min (gen+1, (int)max_generation); i > 0; i--) { m_FillPointers [gen_segment(i)] = m_FillPointers [gen_segment(i-1)]; } diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index 0e44cf8aa12ef..0f56e63a8a77c 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -5962,14 +5962,5 @@ class card_marking_enumerator #define THIS_ARG #endif // FEATURE_CARD_MARKING_STEALING -template -auto max(T&& t, U&& u) -> decltype(t > u ? t : u) -{ - return t > u ? t : u; -} - -template -auto min(T&& t, U&& u) -> decltype(t < u ? t : u) -{ - return t < u ? t : u; -} +using std::min; +using std::max; From 25c9976e9208f81bef50668df120240654a7b7bd Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 2 Apr 2024 09:46:58 -0700 Subject: [PATCH 55/60] Fix more cases that only pop in 64-bit windows builds. --- src/coreclr/gc/gc.cpp | 32 +++++++++---------- src/coreclr/jit/hashbv.cpp | 2 +- .../superpmi/superpmi-shared/spmidumphelper.h | 4 +-- src/coreclr/vm/eetwain.cpp | 4 +-- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index f3689f50a3025..17025f9602c29 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -12330,7 +12330,7 @@ void gc_heap::init_heap_segment (heap_segment* seg, gc_heap* hp #endif //MULTIPLE_HEAPS #ifdef USE_REGIONS - int gen_num_for_region = min (gen_num, max_generation); + int gen_num_for_region = min (gen_num, (int)max_generation); set_region_gen_num (seg, gen_num_for_region); heap_segment_plan_gen_num (seg) = gen_num_for_region; heap_segment_swept_in_plan (seg) = false; @@ -13350,7 +13350,7 @@ void gc_heap::distribute_free_regions() const int i = 0; const int n_heaps = 1; #endif //MULTIPLE_HEAPS - ptrdiff_t budget_gen = max (hp->estimate_gen_growth (gen), 0); + ptrdiff_t budget_gen = max (hp->estimate_gen_growth (gen), (ptrdiff_t)0); int kind = gen >= loh_generation; size_t budget_gen_in_region_units = (budget_gen + (region_size[kind] - 1)) / region_size[kind]; dprintf (REGIONS_LOG, ("h%2d gen %d has an estimated growth of %zd bytes (%zd regions)", i, gen, budget_gen, budget_gen_in_region_units)); @@ -14350,7 +14350,7 @@ gc_heap::init_semi_shared() #endif //!USE_REGIONS #ifdef MULTIPLE_HEAPS - mark_list_size = min (100*1024u, max (8192u, soh_segment_size/(2*10*32))); + mark_list_size = min ((size_t)100*1024, max ((size_t)8192, soh_segment_size/(2*10*32))); #ifdef DYNAMIC_HEAP_COUNT if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) { @@ -14372,7 +14372,7 @@ gc_heap::init_semi_shared() } #else //MULTIPLE_HEAPS - mark_list_size = min(100*1024u, max (8192u, soh_segment_size/(64*32))); + mark_list_size = min((size_t)100*1024, max ((size_t)8192, soh_segment_size/(64*32))); g_mark_list_total_size = mark_list_size; g_mark_list = make_mark_list (mark_list_size); @@ -22126,7 +22126,7 @@ size_t gc_heap::exponential_smoothing (int gen, size_t collection_count, size_t { // to avoid spikes in mem usage due to short terms fluctuations in survivorship, // apply some smoothing. - size_t smoothing = min(3u, collection_count); + size_t smoothing = min((size_t)3, collection_count); size_t desired_total = desired_per_heap * n_heaps; size_t new_smoothed_desired_total = desired_total / smoothing + ((smoothed_desired_total[gen] / smoothing) * (smoothing - 1)); @@ -29299,7 +29299,7 @@ void gc_heap::verify_region_to_generation_map() } size_t region_index_start = get_basic_region_index_for_address (get_region_start (region)); size_t region_index_end = get_basic_region_index_for_address (heap_segment_reserved (region)); - int gen_num = min (gen_number, soh_gen2); + int gen_num = min (gen_number, (int)soh_gen2); assert (gen_num == heap_segment_gen_num (region)); int plan_gen_num = heap_segment_plan_gen_num (region); bool is_demoted = (region->flags & heap_segment_flags_demoted) != 0; @@ -43394,14 +43394,14 @@ void gc_heap::init_static_data() // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap. size_t gen1_max_size = (size_t) #ifdef MULTIPLE_HEAPS - max (6*1024 * 1024u, Align(soh_segment_size/2)); + max ((size_t)6*1024 * 1024, Align(soh_segment_size/2)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC gc_can_use_concurrent ? 6*1024*1024 : #endif //BACKGROUND_GC - max (6*1024*1024u, Align(soh_segment_size/2)) + max ((size_t)6*1024*1024, Align(soh_segment_size/2)) ); #endif //MULTIPLE_HEAPS @@ -43715,9 +43715,9 @@ size_t gc_heap::generation_plan_size (int gen_number) return result; #else //USE_REGIONS if (0 == gen_number) - return max((heap_segment_plan_allocated (ephemeral_heap_segment) - + return (size_t)max((heap_segment_plan_allocated (ephemeral_heap_segment) - generation_plan_allocation_start (generation_of (gen_number))), - (int)Align (min_obj_size)); + (ptrdiff_t)Align (min_obj_size)); else { generation* gen = generation_of (gen_number); @@ -43766,9 +43766,9 @@ size_t gc_heap::generation_size (int gen_number) return result; #else //USE_REGIONS if (0 == gen_number) - return max((heap_segment_allocated (ephemeral_heap_segment) - + return (size_t)max((heap_segment_allocated (ephemeral_heap_segment) - generation_allocation_start (generation_of (gen_number))), - (int)Align (min_obj_size)); + (ptrdiff_t)Align (min_obj_size)); else { generation* gen = generation_of (gen_number); @@ -44185,7 +44185,7 @@ void gc_heap::decommit_ephemeral_segment_pages() dynamic_data* dd0 = dynamic_data_of (0); ptrdiff_t desired_allocation = dd_new_allocation (dd0) + - max (estimate_gen_growth (soh_gen1), 0) + + max (estimate_gen_growth (soh_gen1), (ptrdiff_t)0) + loh_size_threshold; size_t slack_space = @@ -44234,7 +44234,7 @@ void gc_heap::decommit_ephemeral_segment_pages() // we do a max of DECOMMIT_SIZE_PER_MILLISECOND per millisecond of elapsed time since the last GC // we limit the elapsed time to 10 seconds to avoid spending too much time decommitting - ptrdiff_t max_decommit_size = min (ephemeral_elapsed, (10*1000u)) * DECOMMIT_SIZE_PER_MILLISECOND; + ptrdiff_t max_decommit_size = min (ephemeral_elapsed, (size_t)(10*1000)) * DECOMMIT_SIZE_PER_MILLISECOND; decommit_size = min (decommit_size, max_decommit_size); slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment) - decommit_size; @@ -47214,7 +47214,7 @@ enable_no_gc_region_callback_status gc_heap::enable_no_gc_callback(NoGCRegionCal soh_withheld_budget = soh_withheld_budget / gc_heap::n_heaps; loh_withheld_budget = loh_withheld_budget / gc_heap::n_heaps; #endif - soh_withheld_budget = max(soh_withheld_budget, 1u); + soh_withheld_budget = max(soh_withheld_budget, (size_t)1); soh_withheld_budget = Align(soh_withheld_budget, get_alignment_constant (TRUE)); loh_withheld_budget = Align(loh_withheld_budget, get_alignment_constant (FALSE)); #ifdef MULTIPLE_HEAPS @@ -47629,7 +47629,7 @@ void gc_heap::verify_regions (int gen_number, bool can_verify_gen_num, bool can_ } if (can_verify_gen_num) { - if (heap_segment_gen_num (seg_in_gen) != min (gen_number, max_generation)) + if (heap_segment_gen_num (seg_in_gen) != min (gen_number, (int)max_generation)) { dprintf (REGIONS_LOG, ("h%d gen%d region %p(%p) gen is %d!", heap_number, gen_number, seg_in_gen, heap_segment_mem (seg_in_gen), diff --git a/src/coreclr/jit/hashbv.cpp b/src/coreclr/jit/hashbv.cpp index 6a8667af1d754..3a648d4dfe7fa 100644 --- a/src/coreclr/jit/hashbv.cpp +++ b/src/coreclr/jit/hashbv.cpp @@ -824,7 +824,7 @@ void hashBv::setAll(indexType numToSet) for (unsigned int i = 0; i < numToSet; i += BITS_PER_NODE) { hashBvNode* node = getOrAddNodeForIndex(i); - indexType bits_to_set = min((unsigned int)BITS_PER_NODE, numToSet - i); + indexType bits_to_set = min((indexType)BITS_PER_NODE, numToSet - i); node->setLowest(bits_to_set); } } diff --git a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h index 8ecff161dddc1..b989fb50d1c46 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/spmidumphelper.h @@ -110,8 +110,8 @@ inline std::string SpmiDumpHelper::DumpPSig( pbuf += cch; sizeOfBuffer -= cch; - const unsigned int maxSigDisplayBytes = 25; // Don't display more than this. - const unsigned int sigDisplayBytes = min(maxSigDisplayBytes, (size_t)cbSig); + const size_t maxSigDisplayBytes = 25; // Don't display more than this. + const size_t sigDisplayBytes = min(maxSigDisplayBytes, (size_t)cbSig); // TODO: display character representation of the types? diff --git a/src/coreclr/vm/eetwain.cpp b/src/coreclr/vm/eetwain.cpp index 54a2d12c7d497..b5f1658e13319 100644 --- a/src/coreclr/vm/eetwain.cpp +++ b/src/coreclr/vm/eetwain.cpp @@ -430,7 +430,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, { // This is an explicit (not special) var, so add its varNumber + 1 to our // max count ("+1" because varNumber is zero-based). - oldNumVars = max(oldNumVars, (unsigned int)(unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1)); + oldNumVars = max(oldNumVars, (unsigned)(unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1)); } } @@ -484,7 +484,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, { // This is an explicit (not special) var, so add its varNumber + 1 to our // max count ("+1" because varNumber is zero-based). - newNumVars = max(newNumVars, (unsigned int)(unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1)); + newNumVars = max(newNumVars, (unsigned)(unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1)); } } From 7315bebbcafb993e6dd95c6f86d4796be1b4dab1 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 2 Apr 2024 17:03:41 +0000 Subject: [PATCH 56/60] Fix failures on linux-x64 and when using a compiler that can actually enforce C++11 compliance vs C++14 compliance. --- src/coreclr/gc/gc.cpp | 2 +- src/coreclr/jit/codegencommon.cpp | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 17025f9602c29..7572af9688c1f 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -52815,7 +52815,7 @@ bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uin if (is_initialization) #endif //USE_REGIONS { - heap_hard_limit = (size_t)max ((20ull * 1024 * 1024), physical_mem_for_gc); + heap_hard_limit = (size_t)max ((uint64_t)(20 * 1024 * 1024), physical_mem_for_gc); } } } diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 3cf41984ec197..f21350a8a0775 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2838,6 +2838,12 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX * assigned location, in the function prolog. */ +// std::max isn't constexpr until C++14 and we're still on C++11 +constexpr size_t const_max(size_t a, size_t b) +{ + return a > b ? a : b; +} + #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function @@ -2931,7 +2937,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere bool circular; // true if this register participates in a circular dependency loop. bool hfaConflict; // arg is part of an HFA that will end up in the same register // but in a different slot (eg arg in s3 = v3.s[0], needs to end up in v3.s[3]) - } regArgTab[max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {}; + } regArgTab[const_max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {}; unsigned varNum; LclVarDsc* varDsc; From e71df2d7a876fc60c499edc43e974c872bea1a34 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 2 Apr 2024 16:21:42 -0700 Subject: [PATCH 57/60] Fix build --- src/coreclr/gc/gc.cpp | 2 +- src/coreclr/jit/targetarm64.cpp | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 40fb34e98f700..a10fc24c076e1 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -43809,7 +43809,7 @@ size_t gc_heap::trim_youngest_desired (uint32_t memory_load, } else { - size_t total_max_allocation = max (mem_one_percent, total_min_allocation); + size_t total_max_allocation = max ((size_t)mem_one_percent, total_min_allocation); return min (total_new_allocation, total_max_allocation); } } diff --git a/src/coreclr/jit/targetarm64.cpp b/src/coreclr/jit/targetarm64.cpp index 6b8e0d84afb04..4d4d6ae12aa06 100644 --- a/src/coreclr/jit/targetarm64.cpp +++ b/src/coreclr/jit/targetarm64.cpp @@ -84,9 +84,10 @@ ABIPassingInformation Arm64Classifier::Classify(Compiler* comp, } else { - unsigned alignment = compAppleArm64Abi() ? min(elemSize, (unsigned)TARGET_POINTER_SIZE) : TARGET_POINTER_SIZE; - m_stackArgSize = roundUp(m_stackArgSize, alignment); - info = ABIPassingInformation::FromSegment(comp, ABIPassingSegment::OnStack(m_stackArgSize, 0, + unsigned alignment = + compAppleArm64Abi() ? min(elemSize, (unsigned)TARGET_POINTER_SIZE) : TARGET_POINTER_SIZE; + m_stackArgSize = roundUp(m_stackArgSize, alignment); + info = ABIPassingInformation::FromSegment(comp, ABIPassingSegment::OnStack(m_stackArgSize, 0, structLayout->GetSize())); m_stackArgSize += roundUp(structLayout->GetSize(), alignment); // After passing any float value on the stack, we should not enregister more float values. From 140313aead80dd51d83720d105b35432235eacc7 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Wed, 3 Apr 2024 14:36:04 -0700 Subject: [PATCH 58/60] PR feedback --- src/coreclr/gc/gc.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index a10fc24c076e1..617b2efd29e5f 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -43316,7 +43316,7 @@ void gc_heap::init_static_data() size_t gen0_max_size = #ifdef MULTIPLE_HEAPS - max ((size_t)6*1024*1024u, min ( Align(soh_segment_size/2), (size_t)200*1024*1024)); + max ((size_t)6*1024*1024, min ( Align(soh_segment_size/2), (size_t)200*1024*1024)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC @@ -43353,7 +43353,7 @@ void gc_heap::init_static_data() // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap. size_t gen1_max_size = (size_t) #ifdef MULTIPLE_HEAPS - max ((size_t)6*1024 * 1024, Align(soh_segment_size/2)); + max ((size_t)6*1024*1024, Align(soh_segment_size/2)); #else //MULTIPLE_HEAPS ( #ifdef BACKGROUND_GC From fca86d3015c329cb75c7d565dc47ed805cb8ad0f Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Wed, 3 Apr 2024 16:14:32 -0700 Subject: [PATCH 59/60] Convert (TADDR)NULL to (TADDR)0 in places where (TADDR)NULL was not already in use before this PR. --- src/coreclr/debug/inc/dbgipcevents.h | 2 +- src/coreclr/vm/codeman.cpp | 10 +++++----- src/coreclr/vm/dllimportcallback.h | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/coreclr/debug/inc/dbgipcevents.h b/src/coreclr/debug/inc/dbgipcevents.h index aa18cf328326b..1545aa2808370 100644 --- a/src/coreclr/debug/inc/dbgipcevents.h +++ b/src/coreclr/debug/inc/dbgipcevents.h @@ -768,7 +768,7 @@ class MSLAYOUT VMPTR_Base // // Operators to emulate Pointer semantics. // - bool IsNull() { SUPPORTS_DAC; return m_addr == (TADDR)NULL; } + bool IsNull() { SUPPORTS_DAC; return m_addr == (TADDR)0; } static VMPTR_This NullPtr() { diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index b6ffe31c8314c..facf2527feae7 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -3217,7 +3217,7 @@ JumpStubBlockHeader * EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD n CrstHolder ch(&m_CodeHeapCritSec); mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, CODE_SIZE_ALIGN, &pCodeHeap); - if (mem == (TADDR)NULL) + if (mem == (TADDR)0) { _ASSERTE(!throwOnOutOfMemoryWithinRange); RETURN(NULL); @@ -3767,7 +3767,7 @@ static CodeHeader * GetCodeHeaderFromDebugInfoRequest(const DebugInfoRequest & r } CONTRACTL_END; TADDR address = (TADDR) request.GetStartAddress(); - _ASSERTE(address != (TADDR)NULL); + _ASSERTE(address != (TADDR)0); CodeHeader * pHeader = dac_cast(address & ~3) - 1; _ASSERTE(pHeader != NULL); @@ -3937,7 +3937,7 @@ BOOL EEJitManager::JitCodeToMethodInfo( return FALSE; TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); - if (start == (TADDR)NULL) + if (start == (TADDR)0) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); @@ -3982,7 +3982,7 @@ StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSectio } TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); - if (start == (TADDR)NULL) + if (start == (TADDR)0) return STUB_CODE_BLOCK_NOCODE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); return pCHdr->IsStubCodeBlock() ? pCHdr->GetStubCodeBlockKind() : STUB_CODE_BLOCK_MANAGED; @@ -4609,7 +4609,7 @@ BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC, RangeSectionLockStat // but on we could also be in a stub, so we check for that // as well and we don't consider stub to be real managed code. TADDR start = dac_cast(pRS->_pjit)->FindMethodCode(pRS, currentPC); - if (start == (TADDR)NULL) + if (start == (TADDR)0) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); if (!pCHdr->IsStubCodeBlock()) diff --git a/src/coreclr/vm/dllimportcallback.h b/src/coreclr/vm/dllimportcallback.h index 3589ecee130ea..ac2f2e93cdfd0 100644 --- a/src/coreclr/vm/dllimportcallback.h +++ b/src/coreclr/vm/dllimportcallback.h @@ -185,7 +185,7 @@ class UMEntryThunk uMThunkMarshInfoWriterHolder.GetRW()->RunTimeInit(); // Ensure that we have either the managed target or the delegate. - if (m_pObjectHandle == NULL && m_pManagedTarget == (TADDR)NULL) + if (m_pObjectHandle == NULL && m_pManagedTarget == (TADDR)0) m_pManagedTarget = m_pMD->GetMultiCallableAddrOfCode(); m_code.Encode(&pUMEntryThunkRX->m_code, (BYTE*)m_pUMThunkMarshInfo->GetExecStubEntryPoint(), pUMEntryThunkRX); @@ -223,7 +223,7 @@ class UMEntryThunk } else { - if (m_pManagedTarget != (TADDR)NULL) + if (m_pManagedTarget != (TADDR)0) { RETURN m_pManagedTarget; } From 3701d491b0886a266cb97940bc3eb8ad1553f99f Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Wed, 3 Apr 2024 16:27:43 -0700 Subject: [PATCH 60/60] Merge dotnet/runtime into jkoritzinsky/misc-pal --- eng/formatting/download-tools.ps1 | 14 +- eng/formatting/download-tools.sh | 21 +- eng/pipelines/common/xplat-setup.yml | 4 +- .../coreclr/templates/format-job.yml | 3 +- eng/pipelines/runtime-official.yml | 36 +- src/coreclr/gc/gc.cpp | 2 +- src/coreclr/jit/.clang-format | 119 ++- src/coreclr/jit/_typeinfo.h | 19 +- src/coreclr/jit/abi.h | 7 +- src/coreclr/jit/alloc.cpp | 5 +- src/coreclr/jit/alloc.h | 13 +- src/coreclr/jit/arraystack.h | 3 +- src/coreclr/jit/assertionprop.cpp | 24 +- src/coreclr/jit/bitset.h | 13 +- src/coreclr/jit/bitsetasshortlong.h | 54 +- src/coreclr/jit/bitsetasuint64.h | 4 +- src/coreclr/jit/bitsetasuint64inclass.h | 7 +- src/coreclr/jit/block.cpp | 8 +- src/coreclr/jit/block.h | 160 ++-- src/coreclr/jit/blockset.h | 9 +- src/coreclr/jit/buildstring.cpp | 4 +- src/coreclr/jit/codegen.h | 155 +-- src/coreclr/jit/codegenarm.cpp | 10 +- src/coreclr/jit/codegenarm64.cpp | 16 +- src/coreclr/jit/codegenarm64test.cpp | 8 +- src/coreclr/jit/codegenarmarch.cpp | 92 +- src/coreclr/jit/codegencommon.cpp | 40 +- src/coreclr/jit/codegeninterface.h | 16 +- src/coreclr/jit/codegenlinear.cpp | 11 +- src/coreclr/jit/codegenloongarch64.cpp | 46 +- src/coreclr/jit/codegenriscv64.cpp | 46 +- src/coreclr/jit/codegenxarch.cpp | 56 +- src/coreclr/jit/compiler.cpp | 228 ++--- src/coreclr/jit/compiler.h | 387 ++++---- src/coreclr/jit/compiler.hpp | 40 +- src/coreclr/jit/compilerbitsettraits.h | 4 +- src/coreclr/jit/copyprop.cpp | 4 +- src/coreclr/jit/debuginfo.h | 18 +- src/coreclr/jit/decomposelongs.h | 5 +- src/coreclr/jit/disasm.cpp | 34 +- src/coreclr/jit/ee_il_dll.cpp | 8 +- src/coreclr/jit/ee_il_dll.hpp | 4 +- src/coreclr/jit/eeinterface.cpp | 43 +- src/coreclr/jit/emit.cpp | 152 +-- src/coreclr/jit/emit.h | 399 ++++---- src/coreclr/jit/emitarm.cpp | 42 +- src/coreclr/jit/emitarm.h | 4 +- src/coreclr/jit/emitarm64.cpp | 128 +-- src/coreclr/jit/emitarm64.h | 158 ++-- src/coreclr/jit/emitarm64sve.cpp | 358 +++---- src/coreclr/jit/emitloongarch64.cpp | 34 +- src/coreclr/jit/emitloongarch64.h | 16 +- src/coreclr/jit/emitpub.h | 36 +- src/coreclr/jit/emitriscv64.cpp | 28 +- src/coreclr/jit/emitriscv64.h | 24 +- src/coreclr/jit/emitxarch.cpp | 118 +-- src/coreclr/jit/emitxarch.h | 50 +- src/coreclr/jit/error.cpp | 4 +- src/coreclr/jit/fgbasic.cpp | 27 +- src/coreclr/jit/fgdiagnostic.cpp | 49 +- src/coreclr/jit/fgehopt.cpp | 8 +- src/coreclr/jit/fginline.cpp | 127 +-- src/coreclr/jit/fgopt.cpp | 38 +- src/coreclr/jit/fgprofile.cpp | 76 +- src/coreclr/jit/fgprofilesynthesis.h | 3 +- src/coreclr/jit/flowgraph.cpp | 35 +- src/coreclr/jit/forwardsub.cpp | 8 +- src/coreclr/jit/gcencode.cpp | 41 +- src/coreclr/jit/gcinfo.cpp | 3 +- src/coreclr/jit/gentree.cpp | 165 ++-- src/coreclr/jit/gentree.h | 517 ++++++---- src/coreclr/jit/gschecks.cpp | 3 +- src/coreclr/jit/hashbv.cpp | 4 +- src/coreclr/jit/hashbv.h | 31 +- src/coreclr/jit/helperexpansion.cpp | 12 +- src/coreclr/jit/host.h | 6 +- src/coreclr/jit/hostallocator.h | 2 +- src/coreclr/jit/hwintrinsic.cpp | 2 +- src/coreclr/jit/hwintrinsic.h | 25 +- src/coreclr/jit/hwintrinsiccodegenarm64.cpp | 5 +- src/coreclr/jit/hwintrinsiccodegenxarch.cpp | 30 +- src/coreclr/jit/hwintrinsicxarch.cpp | 14 +- src/coreclr/jit/importer.cpp | 85 +- src/coreclr/jit/importercalls.cpp | 886 +++++++++--------- src/coreclr/jit/importervectorization.cpp | 12 +- src/coreclr/jit/indirectcalltransformer.cpp | 16 +- src/coreclr/jit/inductionvariableopts.cpp | 7 +- src/coreclr/jit/inline.cpp | 2 +- src/coreclr/jit/inline.h | 42 +- src/coreclr/jit/inlinepolicy.cpp | 25 +- src/coreclr/jit/inlinepolicy.h | 39 +- src/coreclr/jit/instr.cpp | 12 +- src/coreclr/jit/instrsarm.h | 2 +- src/coreclr/jit/instrsarm64.h | 2 +- src/coreclr/jit/instrsloongarch64.h | 2 +- src/coreclr/jit/instrsxarch.h | 2 +- src/coreclr/jit/jit.h | 101 +- src/coreclr/jit/jitconfig.cpp | 4 +- src/coreclr/jit/jitconfig.h | 8 +- src/coreclr/jit/jitconfigvalues.h | 6 +- src/coreclr/jit/jitee.h | 6 +- src/coreclr/jit/jiteh.cpp | 20 +- src/coreclr/jit/jiteh.h | 7 +- src/coreclr/jit/jitexpandarray.h | 9 +- src/coreclr/jit/jitgcinfo.h | 23 +- src/coreclr/jit/jithashtable.h | 40 +- src/coreclr/jit/jitpch.h | 2 +- src/coreclr/jit/layout.cpp | 8 +- src/coreclr/jit/layout.h | 5 +- src/coreclr/jit/lclmorph.cpp | 12 +- src/coreclr/jit/lclvars.cpp | 109 ++- src/coreclr/jit/likelyclass.cpp | 8 +- src/coreclr/jit/lir.cpp | 47 +- src/coreclr/jit/lir.h | 22 +- src/coreclr/jit/liveness.cpp | 32 +- src/coreclr/jit/loopcloning.cpp | 4 +- src/coreclr/jit/loopcloning.h | 75 +- src/coreclr/jit/lower.cpp | 28 +- src/coreclr/jit/lower.h | 136 +-- src/coreclr/jit/lowerarmarch.cpp | 2 +- src/coreclr/jit/lowerxarch.cpp | 40 +- src/coreclr/jit/lsra.cpp | 131 ++- src/coreclr/jit/lsra.h | 348 ++++--- src/coreclr/jit/lsraarmarch.cpp | 34 +- src/coreclr/jit/lsrabuild.cpp | 179 ++-- src/coreclr/jit/lsraxarch.cpp | 38 +- src/coreclr/jit/morph.cpp | 102 +- src/coreclr/jit/morphblock.cpp | 10 +- src/coreclr/jit/objectalloc.cpp | 10 +- src/coreclr/jit/objectalloc.h | 30 +- src/coreclr/jit/optcse.cpp | 34 +- src/coreclr/jit/optcse.h | 24 +- src/coreclr/jit/optimizebools.cpp | 8 +- src/coreclr/jit/optimizer.cpp | 37 +- src/coreclr/jit/patchpoint.cpp | 4 +- src/coreclr/jit/phase.h | 17 +- src/coreclr/jit/promotion.cpp | 24 +- src/coreclr/jit/promotion.h | 71 +- src/coreclr/jit/promotiondecomposition.cpp | 8 +- src/coreclr/jit/rangecheck.cpp | 11 +- src/coreclr/jit/rangecheck.h | 29 +- src/coreclr/jit/rationalize.cpp | 3 +- src/coreclr/jit/rationalize.h | 3 +- src/coreclr/jit/redundantbranchopts.cpp | 4 +- src/coreclr/jit/regset.cpp | 10 +- src/coreclr/jit/regset.h | 10 +- src/coreclr/jit/scev.cpp | 8 +- src/coreclr/jit/scev.h | 34 +- src/coreclr/jit/scopeinfo.cpp | 14 +- src/coreclr/jit/sideeffects.cpp | 24 +- src/coreclr/jit/sideeffects.h | 3 +- src/coreclr/jit/simd.h | 18 +- src/coreclr/jit/simdashwintrinsic.cpp | 6 +- src/coreclr/jit/sm.cpp | 4 +- src/coreclr/jit/smallhash.h | 26 +- src/coreclr/jit/smcommon.h | 2 +- src/coreclr/jit/smopenum.h | 3 +- src/coreclr/jit/ssabuilder.cpp | 5 +- src/coreclr/jit/ssabuilder.h | 2 +- src/coreclr/jit/ssarenamestate.cpp | 5 +- src/coreclr/jit/ssarenamestate.h | 7 +- src/coreclr/jit/stacklevelsetter.h | 4 +- src/coreclr/jit/switchrecognition.cpp | 2 +- src/coreclr/jit/target.h | 39 +- src/coreclr/jit/targetamd64.cpp | 6 +- src/coreclr/jit/targetarm.cpp | 3 +- src/coreclr/jit/targetarm64.cpp | 6 +- src/coreclr/jit/targetx86.cpp | 3 +- src/coreclr/jit/treelifeupdater.cpp | 2 +- src/coreclr/jit/typelist.h | 2 +- src/coreclr/jit/unwind.cpp | 2 +- src/coreclr/jit/unwind.h | 70 +- src/coreclr/jit/unwindamd64.cpp | 2 +- src/coreclr/jit/unwindarm64.cpp | 6 +- src/coreclr/jit/unwindarmarch.cpp | 37 +- src/coreclr/jit/unwindloongarch64.cpp | 12 +- src/coreclr/jit/unwindriscv64.cpp | 2 +- src/coreclr/jit/utils.cpp | 19 +- src/coreclr/jit/utils.h | 44 +- src/coreclr/jit/valuenum.cpp | 40 +- src/coreclr/jit/valuenum.h | 111 ++- src/coreclr/jit/valuenumtype.h | 4 +- src/coreclr/jit/varset.h | 2 +- src/coreclr/jit/vartype.h | 12 +- 184 files changed, 4578 insertions(+), 3662 deletions(-) diff --git a/eng/formatting/download-tools.ps1 b/eng/formatting/download-tools.ps1 index 603a015c5e5f4..62d518bb11a8b 100644 --- a/eng/formatting/download-tools.ps1 +++ b/eng/formatting/download-tools.ps1 @@ -8,17 +8,25 @@ function DownloadClangTool { $downloadOutputPath ) - $baseUri = "https://clrjit.blob.core.windows.net/clang-tools/windows" + $clangVersion = "17.0.6" + $clangToolsRootUrl = "https://clrjit2.blob.core.windows.net/clang-tools" + $clangPlatform = "windows-x64" + + $toolUrl = "$clangToolsRootUrl/$clangVersion/$clangPlatform/$toolName.exe" + $targetPath = "$downloadOutputPath\$toolName.exe" if (-not $(ls $downloadOutputPath | Where-Object { $_.Name -eq "$toolName.exe" })) { Retry({ - Write-Output "Downloading '$baseUri/$toolName.exe'" + Write-Output "Downloading '$toolUrl' to '$targetPath'" # Pass -PassThru as otherwise Invoke-WebRequest leaves a corrupted file if the download fails. With -PassThru the download is buffered first. # -UseBasicParsing is necessary for older PowerShells when Internet Explorer might not be installed/configured - $null = Invoke-WebRequest -Uri "$baseUri/$toolName.exe" -OutFile $(Join-Path $downloadOutputPath -ChildPath "$toolName.exe") -PassThru -UseBasicParsing + $null = Invoke-WebRequest -Uri "$toolUrl" -OutFile $(Join-Path $downloadOutputPath -ChildPath "$toolName.exe") -PassThru -UseBasicParsing }) } + else { + Write-Output "Found '$targetPath'" + } } $downloadPathFolder = Split-Path $PSScriptRoot -Parent | Split-Path -Parent | Join-Path -ChildPath "artifacts" | Join-Path -ChildPath "tools" diff --git a/eng/formatting/download-tools.sh b/eng/formatting/download-tools.sh index 44459dbc885ba..023ed55ed6e00 100755 --- a/eng/formatting/download-tools.sh +++ b/eng/formatting/download-tools.sh @@ -15,21 +15,22 @@ done scriptroot="$( cd -P "$( dirname "$source" )" && pwd )" function DownloadClangTool { - targetPlatform=$(dotnet --info |grep RID:) - targetPlatform=${targetPlatform##*RID:* } - echo "dotnet RID: ${targetPlatform}" + + clangVersion="17.0.6" + clangToolsRootUrl="https://clrjit2.blob.core.windows.net/clang-tools" + + clangPlatform="$(dotnet --info | grep 'RID:')" + clangPlatform="${clangPlatform##*RID:* }" + echo "dotnet RID: ${clangPlatform}" # override common RIDs with compatible version so we don't need to upload binaries for each RID - case $targetPlatform in - osx.*-x64) - targetPlatform=osx.10.15-x64 - ;; + case $clangPlatform in ubuntu.*-x64) - targetPlatform=ubuntu.18.04-x64 - ;; + clangPlatform=linux-x64 + ;; esac - toolUrl=https://clrjit.blob.core.windows.net/clang-tools/${targetPlatform}/$1 + toolUrl="${clangToolsRootUrl}/${clangVersion}/${clangPlatform}/$1" toolOutput=$2/$1 echo "Downloading $1 from ${toolUrl} to ${toolOutput}" diff --git a/eng/pipelines/common/xplat-setup.yml b/eng/pipelines/common/xplat-setup.yml index 068a506262c00..743f6a42531bc 100644 --- a/eng/pipelines/common/xplat-setup.yml +++ b/eng/pipelines/common/xplat-setup.yml @@ -181,12 +181,12 @@ jobs: # Official Build Windows Pool ${{ if and(or(eq(parameters.osGroup, 'windows'), eq(parameters.jobParameters.hostedOs, 'windows')), ne(variables['System.TeamProject'], 'public')) }}: name: $(DncEngInternalBuildPool) - demands: ImageOverride -equals windows.vs2022preview.amd64 + demands: ImageOverride -equals windows.vs2022.amd64 # Public Windows Build Pool ${{ if and(or(eq(parameters.osGroup, 'windows'), eq(parameters.jobParameters.hostedOs, 'windows')), eq(variables['System.TeamProject'], 'public')) }}: name: $(DncEngPublicBuildPool) - demands: ImageOverride -equals windows.vs2022preview.amd64.open + demands: ImageOverride -equals windows.vs2022.amd64.open ${{ if eq(parameters.helixQueuesTemplate, '') }}: diff --git a/eng/pipelines/coreclr/templates/format-job.yml b/eng/pipelines/coreclr/templates/format-job.yml index 7850b9b43ec46..a4d5181fd6b5b 100644 --- a/eng/pipelines/coreclr/templates/format-job.yml +++ b/eng/pipelines/coreclr/templates/format-job.yml @@ -48,8 +48,7 @@ jobs: displayName: 'Install .NET SDK' inputs: packageType: 'sdk' - version: '6.x' - includePreviewVersions: true + version: '8.x' installationPath: $(Agent.ToolsDirectory)/dotnet - script: $(PythonSetupScript) diff --git a/eng/pipelines/runtime-official.yml b/eng/pipelines/runtime-official.yml index feb6b016d46c7..b8e74630b856e 100644 --- a/eng/pipelines/runtime-official.yml +++ b/eng/pipelines/runtime-official.yml @@ -65,6 +65,7 @@ extends: buildConfig: release platforms: - windows_x64 + - windows_x86 - windows_arm64 jobParameters: templatePath: 'templates-official' @@ -89,41 +90,6 @@ extends: parameters: name: $(osGroup)$(osSubgroup)_$(archType) - - # - # Build CoreCLR runtime packs - # Windows x86 - # No NativeAOT as NativeAOT is not supported on x86 - # Sign diagnostic files after native build - # - - template: /eng/pipelines/common/platform-matrix.yml - parameters: - jobTemplate: /eng/pipelines/common/global-build-job.yml - buildConfig: release - platforms: - - windows_x86 - jobParameters: - templatePath: 'templates-official' - buildArgs: -s clr.runtime+clr.alljits -c $(_BuildConfig) /bl:$(Build.SourcesDirectory)/artifacts/logs/$(_BuildConfig)/CoreClrNativeBuild.binlog - nameSuffix: CoreCLR - isOfficialBuild: ${{ variables.isOfficialBuild }} - timeoutInMinutes: 120 - postBuildSteps: - - template: /eng/pipelines/coreclr/templates/sign-diagnostic-files.yml - parameters: - basePath: $(Build.SourcesDirectory)/artifacts/bin/coreclr - isOfficialBuild: ${{ variables.isOfficialBuild }} - timeoutInMinutes: 30 - # Now that we've signed the diagnostic files, do the rest of the build. - - template: /eng/pipelines/common/templates/global-build-step.yml - parameters: - buildArgs: -s clr.corelib+clr.nativecorelib+clr.tools+clr.packages+libs+host+packs -c $(_BuildConfig) - displayName: Build managed CoreCLR components, all libraries, hosts, and packs - - # Upload the results. - - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - parameters: - name: $(osGroup)$(osSubgroup)_$(archType) # # Build CoreCLR runtime packs # Mac x64/arm64 diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 53fb942aa9669..e43047cf6e113 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -48838,7 +48838,7 @@ HRESULT GCHeap::Initialize() gc_heap::dynamic_heap_count_data.inc_recheck_threshold = 5; gc_heap::dynamic_heap_count_data.dec_failure_recheck_threshold = 5; // This should really be set as part of computing static data and should take conserve_mem_setting into consideration. - gc_heap::dynamic_heap_count_data.max_gen0_new_allocation = min (dd_max_size (gc_heap::g_heaps[0]->dynamic_data_of (0)), (64 * 1024 * 1024)); + gc_heap::dynamic_heap_count_data.max_gen0_new_allocation = min (dd_max_size (gc_heap::g_heaps[0]->dynamic_data_of (0)), (size_t)(64 * 1024 * 1024)); gc_heap::dynamic_heap_count_data.min_gen0_new_allocation = dd_min_size (gc_heap::g_heaps[0]->dynamic_data_of (0)); dprintf (6666, ("datas max gen0 budget %Id, min %Id", diff --git a/src/coreclr/jit/.clang-format b/src/coreclr/jit/.clang-format index 1e3930f7379d1..307b1d7128bdf 100644 --- a/src/coreclr/jit/.clang-format +++ b/src/coreclr/jit/.clang-format @@ -1,80 +1,131 @@ --- -Language: Cpp +Language: Cpp AccessModifierOffset: -4 AlignAfterOpenBracket: Align -AlignConsecutiveAssignments: true -AlignConsecutiveDeclarations: true -AlignEscapedNewlinesLeft: false -AlignOperands: true -AlignTrailingComments: true + +AlignConsecutiveAssignments: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + PadOperators: true + +AlignConsecutiveBitFields: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + +AlignConsecutiveDeclarations: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + +AlignConsecutiveMacros: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + +AlignEscapedNewlines: Right +AlignOperands: true + +AlignTrailingComments: + Kind: Always + OverEmptyLines: 0 + +AllowAllArgumentsOnNextLine: true AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: false +AllowShortBlocksOnASingleLine: Never AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: Empty -AllowShortIfStatementsOnASingleLine: false +AllowShortEnumsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Empty AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: true BinPackArguments: true BinPackParameters: false + +BreakBeforeBraces: Custom BraceWrapping: - AfterClass: true - AfterControlStatement: true - AfterEnum: false - AfterFunction: true - AfterNamespace: false - AfterObjCDeclaration: false - AfterStruct: true - AfterUnion: true - BeforeCatch: true - BeforeElse: true - IndentBraces: false + AfterCaseLabel: true + AfterClass: true + AfterControlStatement: Always + AfterEnum: true + AfterFunction: true + AfterNamespace: true + AfterStruct: true + AfterUnion: true + AfterExternBlock: true + BeforeCatch: true + BeforeElse: true + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true + BreakBeforeBinaryOperators: None -BreakBeforeBraces: Allman BreakBeforeTernaryOperators: true -BreakConstructorInitializersBeforeComma: true -ColumnLimit: 120 -CommentPragmas: '^ IWYU pragma:' -ConstructorInitializerAllOnOneLineOrOnePerLine: true +BreakConstructorInitializers: BeforeComma +BreakInheritanceList: BeforeComma +BreakStringLiterals: false + +ColumnLimit: 120 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false -DisableFormat: false +DisableFormat: false + +EmptyLineAfterAccessModifier: Leave +EmptyLineBeforeAccessModifier: Leave ExperimentalAutoDetectBinPacking: false -ForEachMacros: [ ] +ForEachMacros: [ ] +IndentAccessModifiers: false +IndentCaseBlocks: false IndentCaseLabels: true -IndentWidth: 4 +IndentExternBlock: false +IndentGotoLabels: true +IndentPPDirectives: None +IndentWidth: 4 IndentWrappedFunctionNames: false + +InsertNewlineAtEOF: true KeepEmptyLinesAtTheStartOfBlocks: true +LambdaBodyIndentation: OuterScope MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None -ObjCBlockIndentWidth: 2 -ObjCSpaceAfterProperty: false -ObjCSpaceBeforeProtocolList: true + PenaltyBreakBeforeFirstCallParameter: 400 PenaltyBreakComment: 50 PenaltyBreakFirstLessLess: 500 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 + PointerAlignment: Left ReflowComments: true -SortIncludes: false +SortIncludes: Never + SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 -SpacesInAngles: false +SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false -Standard: Cpp11 + +Standard: Latest TabWidth: 4 UseTab: Never ... diff --git a/src/coreclr/jit/_typeinfo.h b/src/coreclr/jit/_typeinfo.h index 42526eeb8de4b..9285535b5531c 100644 --- a/src/coreclr/jit/_typeinfo.h +++ b/src/coreclr/jit/_typeinfo.h @@ -41,25 +41,34 @@ class typeInfo private: var_types m_type; - union { + union + { CORINFO_CLASS_HANDLE m_cls; // Valid, but not always available, for TYP_REFs. methodPointerInfo* m_methodPointerInfo; // Valid only for function pointers. }; public: - typeInfo() : m_type(TYP_UNDEF), m_cls(NO_CLASS_HANDLE) + typeInfo() + : m_type(TYP_UNDEF) + , m_cls(NO_CLASS_HANDLE) { } - typeInfo(var_types type) : m_type(type), m_cls(NO_CLASS_HANDLE) + typeInfo(var_types type) + : m_type(type) + , m_cls(NO_CLASS_HANDLE) { } - typeInfo(CORINFO_CLASS_HANDLE cls) : m_type(TYP_REF), m_cls(cls) + typeInfo(CORINFO_CLASS_HANDLE cls) + : m_type(TYP_REF) + , m_cls(cls) { } - typeInfo(methodPointerInfo* methodPointerInfo) : m_type(TYP_I_IMPL), m_methodPointerInfo(methodPointerInfo) + typeInfo(methodPointerInfo* methodPointerInfo) + : m_type(TYP_I_IMPL) + , m_methodPointerInfo(methodPointerInfo) { assert(methodPointerInfo != nullptr); assert(methodPointerInfo->m_token.hMethod != nullptr); diff --git a/src/coreclr/jit/abi.h b/src/coreclr/jit/abi.h index 27e53c27efc7e..82ec58b5d807f 100644 --- a/src/coreclr/jit/abi.h +++ b/src/coreclr/jit/abi.h @@ -63,7 +63,9 @@ class RegisterQueue unsigned int m_index = 0; public: - RegisterQueue(const regNumber* regs, unsigned int numRegs) : m_regs(regs), m_numRegs(numRegs) + RegisterQueue(const regNumber* regs, unsigned int numRegs) + : m_regs(regs) + , m_numRegs(numRegs) { } @@ -187,7 +189,8 @@ class SwiftABIClassifier PlatformClassifier m_classifier; public: - SwiftABIClassifier(const ClassifierInfo& info) : m_classifier(info) + SwiftABIClassifier(const ClassifierInfo& info) + : m_classifier(info) { } diff --git a/src/coreclr/jit/alloc.cpp b/src/coreclr/jit/alloc.cpp index d9fc96458849b..2fcb3f877418b 100644 --- a/src/coreclr/jit/alloc.cpp +++ b/src/coreclr/jit/alloc.cpp @@ -42,7 +42,10 @@ size_t ArenaAllocator::getDefaultPageSize() // ArenaAllocator::ArenaAllocator: // Default-constructs an arena allocator. ArenaAllocator::ArenaAllocator() - : m_firstPage(nullptr), m_lastPage(nullptr), m_nextFreeByte(nullptr), m_lastFreeByte(nullptr) + : m_firstPage(nullptr) + , m_lastPage(nullptr) + , m_nextFreeByte(nullptr) + , m_lastFreeByte(nullptr) { #if MEASURE_MEM_ALLOC memset(&m_stats, 0, sizeof(m_stats)); diff --git a/src/coreclr/jit/alloc.h b/src/coreclr/jit/alloc.h index cb3da79232f8b..8899b87ad3552 100644 --- a/src/coreclr/jit/alloc.h +++ b/src/coreclr/jit/alloc.h @@ -22,9 +22,9 @@ enum CompMemKind class ArenaAllocator { private: - ArenaAllocator(const ArenaAllocator& other) = delete; + ArenaAllocator(const ArenaAllocator& other) = delete; ArenaAllocator& operator=(const ArenaAllocator& other) = delete; - ArenaAllocator& operator=(ArenaAllocator&& other) = delete; + ArenaAllocator& operator=(ArenaAllocator&& other) = delete; struct PageDescriptor { @@ -52,7 +52,7 @@ class ArenaAllocator void* allocateNewPage(size_t size); static void* allocateHostMemory(size_t size, size_t* pActualSize); - static void freeHostMemory(void* block, size_t size); + static void freeHostMemory(void* block, size_t size); #if MEASURE_MEM_ALLOC struct MemStats @@ -125,8 +125,8 @@ class ArenaAllocator public: MemStatsAllocator* getMemStatsAllocator(CompMemKind kind); - void finishMemStats(); - void dumpMemStats(FILE* file); + void finishMemStats(); + void dumpMemStats(FILE* file); static void dumpMaxMemStats(FILE* file); static void dumpAggregateMemStats(FILE* file); @@ -276,7 +276,8 @@ class CompIAllocator : public IAllocator char m_zeroLenAllocTarg; public: - CompIAllocator(CompAllocator alloc) : m_alloc(alloc) + CompIAllocator(CompAllocator alloc) + : m_alloc(alloc) { } diff --git a/src/coreclr/jit/arraystack.h b/src/coreclr/jit/arraystack.h index 83a43c9432ba0..5d8a697a3820d 100644 --- a/src/coreclr/jit/arraystack.h +++ b/src/coreclr/jit/arraystack.h @@ -10,7 +10,8 @@ class ArrayStack static const int builtinSize = 8; public: - explicit ArrayStack(CompAllocator alloc, int initialCapacity = builtinSize) : m_alloc(alloc) + explicit ArrayStack(CompAllocator alloc, int initialCapacity = builtinSize) + : m_alloc(alloc) { if (initialCapacity > builtinSize) { diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index e9670d21df2cf..ebcb101663a30 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -1524,9 +1524,8 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, assertion.op1.lcl.ssaNum = op1->AsLclVarCommon()->GetSsaNum(); assert((assertion.op1.lcl.ssaNum == SsaConfig::RESERVED_SSA_NUM) || - (assertion.op1.vn == - vnStore->VNConservativeNormalValue( - lvaGetDesc(lclNum)->GetPerSsaData(assertion.op1.lcl.ssaNum)->m_vnPair))); + (assertion.op1.vn == vnStore->VNConservativeNormalValue( + lvaGetDesc(lclNum)->GetPerSsaData(assertion.op1.lcl.ssaNum)->m_vnPair))); ssize_t cnsValue = 0; GenTreeFlags iconFlags = GTF_EMPTY; @@ -2770,7 +2769,7 @@ GenTree* Compiler::optVNBasedFoldExpr(BasicBlock* block, GenTree* parent, GenTre case GT_CALL: return optVNBasedFoldExpr_Call(block, parent, tree->AsCall()); - // We can add more VN-based foldings here. + // We can add more VN-based foldings here. default: break; @@ -3325,7 +3324,7 @@ bool Compiler::optIsProfitableToSubstitute(GenTree* dest, BasicBlock* destBlock, // GenTree* Compiler::optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, - Statement* stmt DEBUGARG(AssertionIndex index)) + Statement* stmt DEBUGARG(AssertionIndex index)) { const unsigned lclNum = tree->GetLclNum(); @@ -3580,7 +3579,7 @@ bool Compiler::optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVar // GenTree* Compiler::optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, - Statement* stmt DEBUGARG(AssertionIndex index)) + Statement* stmt DEBUGARG(AssertionIndex index)) { const AssertionDsc::AssertionDscOp1& op1 = curAssertion->op1; const AssertionDsc::AssertionDscOp2& op2 = curAssertion->op2; @@ -4529,8 +4528,9 @@ GenTree* Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, Gen { printf("\nVN relop based copy assertion prop in " FMT_BB ":\n", compCurBB->bbNum); printf("Assertion index=#%02u: V%02d.%02d %s V%02d.%02d\n", index, op1->AsLclVar()->GetLclNum(), - op1->AsLclVar()->GetSsaNum(), (curAssertion->assertionKind == OAK_EQUAL) ? "==" : "!=", - op2->AsLclVar()->GetLclNum(), op2->AsLclVar()->GetSsaNum()); + op1->AsLclVar()->GetSsaNum(), + (curAssertion->assertionKind == OAK_EQUAL) ? "==" : "!=", op2->AsLclVar()->GetLclNum(), + op2->AsLclVar()->GetSsaNum()); gtDispTree(tree, nullptr, nullptr, true); } #endif @@ -4824,7 +4824,7 @@ GenTree* Compiler::optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tr // If both VN and assertion table yield a matching assertion, "pVnBased" // is only set and the return value is "NO_ASSERTION_INDEX." // -bool Compiler::optAssertionIsNonNull(GenTree* op, +bool Compiler::optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)) { @@ -4871,7 +4871,7 @@ bool Compiler::optAssertionIsNonNull(GenTree* op, // Return Value: // index of assertion, or NO_ASSERTION_INDEX // -AssertionIndex Compiler::optAssertionIsNonNullInternal(GenTree* op, +AssertionIndex Compiler::optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)) { @@ -6283,7 +6283,9 @@ struct VNAssertionPropVisitorInfo Statement* stmt; BasicBlock* block; VNAssertionPropVisitorInfo(Compiler* pThis, BasicBlock* block, Statement* stmt) - : pThis(pThis), stmt(stmt), block(block) + : pThis(pThis) + , stmt(stmt) + , block(block) { } }; diff --git a/src/coreclr/jit/bitset.h b/src/coreclr/jit/bitset.h index b34d1f04b85f1..6f1e3d8dcd0db 100644 --- a/src/coreclr/jit/bitset.h +++ b/src/coreclr/jit/bitset.h @@ -59,7 +59,10 @@ class BitSetSupport FILE* OpOutputFile; public: - BitSetOpCounter(const char* fileName) : TotalOps(0), m_fileName(fileName), OpOutputFile(nullptr) + BitSetOpCounter(const char* fileName) + : TotalOps(0) + , m_fileName(fileName) + , OpOutputFile(nullptr) { for (unsigned i = 0; i < BSOP_NUMOPS; i++) { @@ -435,7 +438,9 @@ class BitSetOpsWithCounter Env m_env; public: - Iter(Env env, BitSetValueArgType bs) : m_iter(env, bs), m_env(env) + Iter(Env env, BitSetValueArgType bs) + : m_iter(env, bs) + , m_env(env) { } @@ -449,8 +454,8 @@ class BitSetOpsWithCounter // We define symbolic names for the various bitset implementations available, to allow choices between them. -#define BSUInt64 0 -#define BSShortLong 1 +#define BSUInt64 0 +#define BSShortLong 1 #define BSUInt64Class 2 /*****************************************************************************/ diff --git a/src/coreclr/jit/bitsetasshortlong.h b/src/coreclr/jit/bitsetasshortlong.h index 2ef293820fd26..006f66fc178dc 100644 --- a/src/coreclr/jit/bitsetasshortlong.h +++ b/src/coreclr/jit/bitsetasshortlong.h @@ -32,36 +32,36 @@ class BitSetOps m_bs; public: - BitSetUint64ValueRetType(const BitSetUint64& bs) : m_bs(bs) + BitSetUint64ValueRetType(const BitSetUint64& bs) + : m_bs(bs) { } }; @@ -451,7 +452,9 @@ class BitSetOps, unsigned m_bitNum; public: - Iter(Env env, const BitSetUint64& bs) : m_bits(bs.m_bits), m_bitNum(0) + Iter(Env env, const BitSetUint64& bs) + : m_bits(bs.m_bits) + , m_bitNum(0) { } diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 6eea265871c04..6cde9e0e93d8b 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -34,7 +34,7 @@ unsigned BasicBlock::s_nMaxTrees; FlowEdge* ShuffleHelper(unsigned hash, FlowEdge* res) { FlowEdge* head = res; - for (FlowEdge *prev = nullptr; res != nullptr; prev = res, res = res->getNextPredEdge()) + for (FlowEdge* prev = nullptr; res != nullptr; prev = res, res = res->getNextPredEdge()) { unsigned blkHash = (hash ^ (res->getSourceBlock()->bbNum << 16) ^ res->getSourceBlock()->bbNum); if (((blkHash % 1879) & 1) && prev != nullptr) @@ -140,7 +140,8 @@ void FlowEdge::addLikelihood(weight_t addedLikelihood) // comp - Compiler instance // block - The block whose successors are to be iterated // -AllSuccessorEnumerator::AllSuccessorEnumerator(Compiler* comp, BasicBlock* block) : m_block(block) +AllSuccessorEnumerator::AllSuccessorEnumerator(Compiler* comp, BasicBlock* block) + : m_block(block) { m_numSuccs = 0; block->VisitAllSuccs(comp, [this](BasicBlock* succ) { @@ -1891,7 +1892,8 @@ BBswtDesc::BBswtDesc(Compiler* comp, const BBswtDesc* other) // comp - compiler instance // other - existing descriptor to copy // -BBehfDesc::BBehfDesc(Compiler* comp, const BBehfDesc* other) : bbeCount(other->bbeCount) +BBehfDesc::BBehfDesc(Compiler* comp, const BBehfDesc* other) + : bbeCount(other->bbeCount) { // Allocate and fill in a new dst tab // diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 68f41e3610173..16321157664a5 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -162,7 +162,8 @@ class MemoryKindIterator int value; public: - explicit inline MemoryKindIterator(int val) : value(val) + explicit inline MemoryKindIterator(int val) + : value(val) { } inline MemoryKindIterator& operator++() @@ -244,7 +245,8 @@ class PredEdgeList }; public: - PredEdgeList(FlowEdge* pred) : m_begin(pred) + PredEdgeList(FlowEdge* pred) + : m_begin(pred) { } @@ -297,7 +299,8 @@ class PredBlockList }; public: - PredBlockList(FlowEdge* pred) : m_begin(pred) + PredBlockList(FlowEdge* pred) + : m_begin(pred) { } @@ -322,7 +325,8 @@ class BBArrayIterator FlowEdge* const* m_edgeEntry; public: - BBArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry) + BBArrayIterator(FlowEdge* const* edgeEntry) + : m_edgeEntry(edgeEntry) { } @@ -351,7 +355,8 @@ class FlowEdgeArrayIterator FlowEdge* const* m_edgeEntry; public: - FlowEdgeArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry) + FlowEdgeArrayIterator(FlowEdge* const* edgeEntry) + : m_edgeEntry(edgeEntry) { } @@ -727,7 +732,8 @@ struct BasicBlock : private LIR::Range BBKinds bbKind; // jump (if any) at the end of this block /* The following union describes the jump target(s) of this block */ - union { + union + { unsigned bbTargetOffs; // PC offset (temporary only) FlowEdge* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc) FlowEdge* bbTrueEdge; // BBJ_COND successor edge when its condition is true (alias for bbTargetEdge) @@ -1165,11 +1171,11 @@ struct BasicBlock : private LIR::Range } #ifdef DEBUG - void dspFlags() const; // Print the flags - unsigned dspPreds() const; // Print the predecessors (bbPreds) - void dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH - // regions are printed: see NumSucc() for details. - void dspKind() const; // Print the block jump kind (e.g., BBJ_ALWAYS, BBJ_COND, etc.). + void dspFlags() const; // Print the flags + unsigned dspPreds() const; // Print the predecessors (bbPreds) + void dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH + // regions are printed: see NumSucc() for details. + void dspKind() const; // Print the block jump kind (e.g., BBJ_ALWAYS, BBJ_COND, etc.). // Print a simple basic block header for various output, including a list of predecessors and successors. void dspBlockHeader(Compiler* compiler, bool showKind = true, bool showFlags = false, bool showPreds = true); @@ -1177,11 +1183,11 @@ struct BasicBlock : private LIR::Range const char* dspToString(int blockNumPadding = 0) const; #endif // DEBUG -#define BB_UNITY_WEIGHT 100.0 // how much a normal execute once block weighs -#define BB_UNITY_WEIGHT_UNSIGNED 100 // how much a normal execute once block weighs -#define BB_LOOP_WEIGHT_SCALE 8.0 // synthetic profile scale factor for loops -#define BB_ZERO_WEIGHT 0.0 -#define BB_MAX_WEIGHT FLT_MAX // maximum finite weight -- needs rethinking. +#define BB_UNITY_WEIGHT 100.0 // how much a normal execute once block weighs +#define BB_UNITY_WEIGHT_UNSIGNED 100 // how much a normal execute once block weighs +#define BB_LOOP_WEIGHT_SCALE 8.0 // synthetic profile scale factor for loops +#define BB_ZERO_WEIGHT 0.0 +#define BB_MAX_WEIGHT FLT_MAX // maximum finite weight -- needs rethinking. weight_t bbWeight; // The dynamic execution weight of this block @@ -1402,12 +1408,14 @@ struct BasicBlock : private LIR::Range #define NO_BASE_TMP UINT_MAX // base# to use when we have none - union { + union + { unsigned bbStkTempsIn; // base# for input stack temps int bbCountSchemaIndex; // schema index for count instrumentation }; - union { + union + { unsigned bbStkTempsOut; // base# for output stack temps int bbHistogramSchemaIndex; // schema index for histogram instrumentation }; @@ -1527,11 +1535,11 @@ struct BasicBlock : private LIR::Range bool hasEHBoundaryOut() const; // Some non-zero value that will not collide with real tokens for bbCatchTyp -#define BBCT_NONE 0x00000000 -#define BBCT_FAULT 0xFFFFFFFC -#define BBCT_FINALLY 0xFFFFFFFD -#define BBCT_FILTER 0xFFFFFFFE -#define BBCT_FILTER_HANDLER 0xFFFFFFFF +#define BBCT_NONE 0x00000000 +#define BBCT_FAULT 0xFFFFFFFC +#define BBCT_FINALLY 0xFFFFFFFD +#define BBCT_FILTER 0xFFFFFFFE +#define BBCT_FILTER_HANDLER 0xFFFFFFFF #define handlerGetsXcptnObj(hndTyp) ((hndTyp) != BBCT_NONE && (hndTyp) != BBCT_FAULT && (hndTyp) != BBCT_FINALLY) // TODO-Cleanup: Get rid of bbStkDepth and use bbStackDepthOnEntry() instead @@ -1574,7 +1582,8 @@ struct BasicBlock : private LIR::Range void ensurePredListOrder(Compiler* compiler); void reorderPredList(Compiler* compiler); - union { + union + { BasicBlock* bbIDom; // Represent the closest dominator to this block (called the Immediate // Dominator) used to compute the dominance tree. FlowEdge* bbLastPred; // Used early on by fgLinkBasicBlock/fgAddRefPred @@ -1623,7 +1632,9 @@ struct BasicBlock : private LIR::Range return m_ssaNum; } - MemoryPhiArg(unsigned ssaNum, MemoryPhiArg* nextArg = nullptr) : m_ssaNum(ssaNum), m_nextArg(nextArg) + MemoryPhiArg(unsigned ssaNum, MemoryPhiArg* nextArg = nullptr) + : m_ssaNum(ssaNum) + , m_nextArg(nextArg) { } @@ -1649,18 +1660,21 @@ struct BasicBlock : private LIR::Range * thus we can union them since the two operations are completely disjunct. */ - union { + union + { EXPSET_TP bbCseGen; // CSEs computed by block ASSERT_TP bbAssertionGen; // assertions created by block (global prop) ASSERT_TP bbAssertionOutIfTrue; // assertions available on exit along true/jump edge (BBJ_COND, local prop) }; - union { + union + { EXPSET_TP bbCseIn; // CSEs available on entry ASSERT_TP bbAssertionIn; // assertions available on entry (global prop) }; - union { + union + { EXPSET_TP bbCseOut; // CSEs available on exit ASSERT_TP bbAssertionOut; // assertions available on exit (global prop, local prop & !BBJ_COND) ASSERT_TP bbAssertionOutIfFalse; // assertions available on exit along false/next edge (BBJ_COND, local prop) @@ -1668,7 +1682,7 @@ struct BasicBlock : private LIR::Range void* bbEmitCookie; -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- #if MEASURE_BLOCK_SIZE static size_t s_Size; @@ -1703,8 +1717,8 @@ struct BasicBlock : private LIR::Range unsigned bbID; #endif // DEBUG - unsigned bbStackDepthOnEntry() const; - void bbSetStack(StackEntry* stack); + unsigned bbStackDepthOnEntry() const; + void bbSetStack(StackEntry* stack); StackEntry* bbStackOnEntry() const; // "bbNum" is one-based (for unknown reasons); it is sometimes useful to have the corresponding @@ -1754,7 +1768,10 @@ struct BasicBlock : private LIR::Range Statement* FirstNonPhiDef() const; Statement* FirstNonPhiDefOrCatchArgStore() const; - BasicBlock() : bbStmtList(nullptr), bbLiveIn(VarSetOps::UninitVal()), bbLiveOut(VarSetOps::UninitVal()) + BasicBlock() + : bbStmtList(nullptr) + , bbLiveIn(VarSetOps::UninitVal()) + , bbLiveOut(VarSetOps::UninitVal()) { } @@ -1766,7 +1783,9 @@ struct BasicBlock : private LIR::Range BasicBlock* m_block; public: - Successors(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) + Successors(Compiler* comp, BasicBlock* block) + : m_comp(comp) + , m_block(block) { } @@ -1777,11 +1796,15 @@ struct BasicBlock : private LIR::Range TPosition m_pos; public: - iterator(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block), m_pos(comp, block) + iterator(Compiler* comp, BasicBlock* block) + : m_comp(comp) + , m_block(block) + , m_pos(comp, block) { } - iterator() : m_pos() + iterator() + : m_pos() { } @@ -1854,7 +1877,8 @@ struct BasicBlock : private LIR::Range class BBSuccList : private SuccList { public: - BBSuccList(const BasicBlock* block) : SuccList(block) + BBSuccList(const BasicBlock* block) + : SuccList(block) { } @@ -1876,7 +1900,8 @@ struct BasicBlock : private LIR::Range class BBSuccEdgeList : private SuccList { public: - BBSuccEdgeList(const BasicBlock* block) : SuccList(block) + BBSuccEdgeList(const BasicBlock* block) + : SuccList(block) { } @@ -1912,7 +1937,9 @@ struct BasicBlock : private LIR::Range public: iterator(Compiler* comp, BasicBlock* block, unsigned succNum) - : m_comp(comp), m_block(block), m_succNum(succNum) + : m_comp(comp) + , m_block(block) + , m_succNum(succNum) { } @@ -1937,7 +1964,9 @@ struct BasicBlock : private LIR::Range }; public: - BBCompilerSuccList(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) + BBCompilerSuccList(Compiler* comp, BasicBlock* block) + : m_comp(comp) + , m_block(block) { } @@ -1973,7 +2002,9 @@ struct BasicBlock : private LIR::Range public: iterator(Compiler* comp, BasicBlock* block, unsigned succNum) - : m_comp(comp), m_block(block), m_succNum(succNum) + : m_comp(comp) + , m_block(block) + , m_succNum(succNum) { } @@ -1998,7 +2029,9 @@ struct BasicBlock : private LIR::Range }; public: - BBCompilerSuccEdgeList(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) + BBCompilerSuccEdgeList(Compiler* comp, BasicBlock* block) + : m_comp(comp) + , m_block(block) { } @@ -2108,7 +2141,8 @@ class BasicBlockIterator BasicBlock* m_block; public: - BasicBlockIterator(BasicBlock* block) : m_block(block) + BasicBlockIterator(BasicBlock* block) + : m_block(block) { } @@ -2144,7 +2178,8 @@ class BasicBlockSimpleList BasicBlock* m_begin; public: - BasicBlockSimpleList(BasicBlock* begin) : m_begin(begin) + BasicBlockSimpleList(BasicBlock* begin) + : m_begin(begin) { } @@ -2174,7 +2209,9 @@ class BasicBlockRangeList BasicBlock* m_end; public: - BasicBlockRangeList(BasicBlock* begin, BasicBlock* end) : m_begin(begin), m_end(end) + BasicBlockRangeList(BasicBlock* begin, BasicBlock* end) + : m_begin(begin) + , m_end(end) { assert(begin != nullptr); assert(end != nullptr); @@ -2214,7 +2251,9 @@ struct BBswtDesc bool bbsHasDefault; // true if last switch case is a default case bool bbsHasDominantCase; // true if switch has a dominant case - BBswtDesc() : bbsHasDefault(true), bbsHasDominantCase(false) + BBswtDesc() + : bbsHasDefault(true) + , bbsHasDominantCase(false) { } @@ -2241,7 +2280,8 @@ struct BBswtDesc // BBSwitchTargetList out-of-class-declaration implementations (here due to C++ ordering requirements). // -inline BBSwitchTargetList::BBSwitchTargetList(BBswtDesc* bbsDesc) : m_bbsDesc(bbsDesc) +inline BBSwitchTargetList::BBSwitchTargetList(BBswtDesc* bbsDesc) + : m_bbsDesc(bbsDesc) { assert(m_bbsDesc != nullptr); assert(m_bbsDesc->bbsDstTab != nullptr); @@ -2264,7 +2304,9 @@ struct BBehfDesc FlowEdge** bbeSuccs; // array of `FlowEdge*` pointing to BBJ_EHFINALLYRET block successors unsigned bbeCount; // size of `bbeSuccs` array - BBehfDesc() : bbeSuccs(nullptr), bbeCount(0) + BBehfDesc() + : bbeSuccs(nullptr) + , bbeCount(0) { } @@ -2274,7 +2316,8 @@ struct BBehfDesc // BBEhfSuccList out-of-class-declaration implementations (here due to C++ ordering requirements). // -inline BBEhfSuccList::BBEhfSuccList(BBehfDesc* bbeDesc) : m_bbeDesc(bbeDesc) +inline BBEhfSuccList::BBEhfSuccList(BBehfDesc* bbeDesc) + : m_bbeDesc(bbeDesc) { assert(m_bbeDesc != nullptr); assert((m_bbeDesc->bbeSuccs != nullptr) || (m_bbeDesc->bbeCount == 0)); @@ -2373,11 +2416,15 @@ struct BasicBlockList BasicBlockList* next; // The next BasicBlock in the list, nullptr for end of list. BasicBlock* block; // The BasicBlock of interest. - BasicBlockList() : next(nullptr), block(nullptr) + BasicBlockList() + : next(nullptr) + , block(nullptr) { } - BasicBlockList(BasicBlock* blk, BasicBlockList* rest) : next(rest), block(blk) + BasicBlockList(BasicBlock* blk, BasicBlockList* rest) + : next(rest) + , block(blk) { } }; @@ -2403,7 +2450,8 @@ inline BasicBlock* BBArrayIterator::operator*() const // Pred list iterator implementations (that are required to be defined after the declaration of BasicBlock and FlowEdge) -inline PredEdgeList::iterator::iterator(FlowEdge* pred) : m_pred(pred) +inline PredEdgeList::iterator::iterator(FlowEdge* pred) + : m_pred(pred) { #ifdef DEBUG m_next = (m_pred == nullptr) ? nullptr : m_pred->getNextPredEdge(); @@ -2425,7 +2473,8 @@ inline PredEdgeList::iterator& PredEdgeList::iterator::operator++() } template -inline PredBlockList::iterator::iterator(FlowEdge* pred) : m_pred(pred) +inline PredBlockList::iterator::iterator(FlowEdge* pred) + : m_pred(pred) { bool initNextPointer = allowEdits; INDEBUG(initNextPointer = true); @@ -2435,13 +2484,13 @@ inline PredBlockList::iterator::iterator(FlowEdge* pred) : m_pred(pr } } -template +template inline BasicBlock* PredBlockList::iterator::operator*() const { return m_pred->getSourceBlock(); } -template +template inline typename PredBlockList::iterator& PredBlockList::iterator::operator++() { if (allowEdits) @@ -2480,7 +2529,8 @@ void* emitCodeGetCookie(const BasicBlock* block); class AllSuccessorEnumerator { BasicBlock* m_block; - union { + union + { // We store up to 4 successors inline in the enumerator. For ASP.NET // and libraries.pmi this is enough in 99.7% of cases. BasicBlock* m_successors[4]; diff --git a/src/coreclr/jit/blockset.h b/src/coreclr/jit/blockset.h index 83de7a5dad1e5..f69e1e59ace32 100644 --- a/src/coreclr/jit/blockset.h +++ b/src/coreclr/jit/blockset.h @@ -24,10 +24,11 @@ #include "compilerbitsettraits.h" #include "bitsetasshortlong.h" -class BlockSetOps : public BitSetOps +class BlockSetOps + : public BitSetOps { public: // Specialize BlockSetOps::MakeFull(). Since we number basic blocks from one, we remove bit zero from diff --git a/src/coreclr/jit/buildstring.cpp b/src/coreclr/jit/buildstring.cpp index f432fec47475f..3f0222ad2649a 100644 --- a/src/coreclr/jit/buildstring.cpp +++ b/src/coreclr/jit/buildstring.cpp @@ -1,9 +1,9 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#define STRINGIFY(L) #L +#define STRINGIFY(L) #L #define MAKESTRING(M, L) M(L) -#define STRINGIZE(X) MAKESTRING(STRINGIFY, X) +#define STRINGIZE(X) MAKESTRING(STRINGIFY, X) #if defined(__clang__) #define BUILD_COMPILER \ diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index d63e0809b6269..0ab8a81d89ef9 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -101,7 +101,7 @@ class CodeGen final : public CodeGenInterface } } - static GenTreeIndir indirForm(var_types type, GenTree* base); + static GenTreeIndir indirForm(var_types type, GenTree* base); static GenTreeStoreInd storeIndirForm(var_types type, GenTree* base, GenTree* data); GenTreeIntCon intForm(var_types type, ssize_t value); @@ -177,8 +177,8 @@ class CodeGen final : public CodeGenInterface #ifdef JIT32_GCENCODER void* genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)); - void* genCreateAndStoreGCInfoJIT32(unsigned codeSize, - unsigned prologSize, + void* genCreateAndStoreGCInfoJIT32(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)); #else // !JIT32_GCENCODER void genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)); @@ -206,7 +206,7 @@ class CodeGen final : public CodeGenInterface unsigned genCurDispOffset; static const char* genInsName(instruction ins); - const char* genInsDisplayName(emitter::instrDesc* id); + const char* genInsDisplayName(emitter::instrDesc* id); static const char* genSizeStr(emitAttr size); @@ -317,11 +317,17 @@ class CodeGen final : public CodeGenInterface regNumber reg2; bool useSaveNextPair; - RegPair(regNumber reg1) : reg1(reg1), reg2(REG_NA), useSaveNextPair(false) + RegPair(regNumber reg1) + : reg1(reg1) + , reg2(REG_NA) + , useSaveNextPair(false) { } - RegPair(regNumber reg1, regNumber reg2) : reg1(reg1), reg2(reg2), useSaveNextPair(false) + RegPair(regNumber reg1, regNumber reg2) + : reg1(reg1) + , reg2(reg2) + , useSaveNextPair(false) { assert(reg2 == REG_NEXT(reg1)); } @@ -364,8 +370,8 @@ class CodeGen final : public CodeGenInterface bool genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg); - void genPushFltRegs(regMaskTP regMask); - void genPopFltRegs(regMaskTP regMask); + void genPushFltRegs(regMaskTP regMask); + void genPopFltRegs(regMaskTP regMask); regMaskTP genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat); regMaskTP genJmpCallArgMask(); @@ -679,17 +685,17 @@ class CodeGen final : public CodeGenInterface void genSinglePush(); void genSinglePop(); regMaskTP genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs); - void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs); - -/* -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XX XX -XX Debugging Support XX -XX XX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -*/ + void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs); + + /* + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XX XX + XX Debugging Support XX + XX XX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + */ #ifdef DEBUG void genIPmappingDisp(unsigned mappingNum, const IPmappingDsc* ipMapping); @@ -939,7 +945,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genCompareFloat(GenTree* treeNode); void genCompareInt(GenTree* treeNode); #ifdef TARGET_XARCH - bool genCanAvoidEmittingCompareAgainstZero(GenTree* tree, var_types opType); + bool genCanAvoidEmittingCompareAgainstZero(GenTree* tree, var_types opType); GenTree* genTryFindFlagsConsumer(GenTree* flagsProducer, GenCondition** condition); #endif @@ -1112,12 +1118,12 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genSpillLocal(unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum); void genUnspillLocal( unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum, bool reSpill, bool isLastUse); - void genUnspillRegIfNeeded(GenTree* tree); - void genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex); + void genUnspillRegIfNeeded(GenTree* tree); + void genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex); regNumber genConsumeReg(GenTree* tree); regNumber genConsumeReg(GenTree* tree, unsigned multiRegIndex); - void genCopyRegIfNeeded(GenTree* tree, regNumber needReg); - void genConsumeRegAndCopy(GenTree* tree, regNumber needReg); + void genCopyRegIfNeeded(GenTree* tree, regNumber needReg); + void genConsumeRegAndCopy(GenTree* tree, regNumber needReg); void genConsumeIfReg(GenTree* tree) { @@ -1127,15 +1133,15 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX } } - void genRegCopy(GenTree* tree); + void genRegCopy(GenTree* tree); regNumber genRegCopy(GenTree* tree, unsigned multiRegIndex); - void genTransferRegGCState(regNumber dst, regNumber src); - void genConsumeAddress(GenTree* addr); - void genConsumeAddrMode(GenTreeAddrMode* mode); - void genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg); - void genConsumeBlockSrc(GenTreeBlk* blkNode); - void genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg); - void genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg); + void genTransferRegGCState(regNumber dst, regNumber src); + void genConsumeAddress(GenTree* addr); + void genConsumeAddrMode(GenTreeAddrMode* mode); + void genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg); + void genConsumeBlockSrc(GenTreeBlk* blkNode); + void genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg); + void genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg); #ifdef FEATURE_PUT_STRUCT_ARG_STK void genConsumePutStructArgStk(GenTreePutArgStk* putArgStkNode, @@ -1243,10 +1249,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX unsigned genMove4IfNeeded(unsigned size, regNumber tmpReg, GenTree* src, unsigned offset); unsigned genMove2IfNeeded(unsigned size, regNumber tmpReg, GenTree* src, unsigned offset); unsigned genMove1IfNeeded(unsigned size, regNumber tmpReg, GenTree* src, unsigned offset); - void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset); - void genStoreRegToStackArg(var_types type, regNumber reg, int offset); - void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode); - void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode); + void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset); + void genStoreRegToStackArg(var_types type, regNumber reg, int offset); + void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode); + void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode); #ifdef TARGET_X86 void genStructPutArgPush(GenTreePutArgStk* putArgStkNode); #else @@ -1254,13 +1260,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif #endif // FEATURE_PUT_STRUCT_ARG_STK - void genCodeForStoreBlk(GenTreeBlk* storeBlkNode); - void genCodeForInitBlkLoop(GenTreeBlk* initBlkNode); - void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode); - void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode); + void genCodeForStoreBlk(GenTreeBlk* storeBlkNode); + void genCodeForInitBlkLoop(GenTreeBlk* initBlkNode); + void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode); + void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode); unsigned genEmitJumpTable(GenTree* treeNode, bool relativeAddr); - void genJumpTable(GenTree* tree); - void genTableBasedSwitch(GenTree* tree); + void genJumpTable(GenTree* tree); + void genTableBasedSwitch(GenTree* tree); #if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) instruction genGetInsForOper(GenTree* treeNode); #else @@ -1270,13 +1276,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX regNumber targetReg, GenTreeIndir* indir, bool* needsBarrier); - bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data); - GenTree* getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd); - regNumber getCallIndirectionCellReg(GenTreeCall* call); - void genCall(GenTreeCall* call); - void genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes)); - void genDefinePendingCallLabel(GenTreeCall* call); - void genJmpMethod(GenTree* jmp); + bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data); + GenTree* getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd); + regNumber getCallIndirectionCellReg(GenTreeCall* call); + void genCall(GenTreeCall* call); + void genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes)); + void genDefinePendingCallLabel(GenTreeCall* call); + void genJmpMethod(GenTree* jmp); BasicBlock* genCallFinally(BasicBlock* block); #if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // TODO: refactor for LA. @@ -1318,13 +1324,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genReturn(GenTree* treeNode); #ifdef TARGET_XARCH - void genStackPointerConstantAdjustment(ssize_t spDelta, bool trackSpAdjustments); - void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, bool trackSpAdjustments); + void genStackPointerConstantAdjustment(ssize_t spDelta, bool trackSpAdjustments); + void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, bool trackSpAdjustments); target_ssize_t genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, bool trackSpAdjustments); - void genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta); + void genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta); #else // !TARGET_XARCH - void genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp); - void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp); + void genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp); + void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp); target_ssize_t genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp); #endif // !TARGET_XARCH @@ -1358,8 +1364,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #ifdef DEBUG GenTree* lastConsumedNode; - void genNumberOperandUse(GenTree* const operand, int& useNum) const; - void genCheckConsumeNode(GenTree* const node); + void genNumberOperandUse(GenTree* const operand, int& useNum) const; + void genCheckConsumeNode(GenTree* const node); #else // !DEBUG inline void genCheckConsumeNode(GenTree* treeNode) { @@ -1437,7 +1443,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #if defined(TARGET_XARCH) - enum class OperandKind{ + enum class OperandKind + { ClsVar, // [CLS_VAR_ADDR] - "C" in the emitter. Local, // [Local or spill temp + offset] - "S" in the emitter. Indir, // [base+index*scale+disp] - "A" in the emitter. @@ -1448,7 +1455,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX class OperandDesc { OperandKind m_kind; - union { + union + { struct { CORINFO_FIELD_HANDLE m_fieldHnd; @@ -1476,30 +1484,45 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX }; public: - OperandDesc(CORINFO_FIELD_HANDLE fieldHnd) : m_kind(OperandKind::ClsVar), m_fieldHnd(fieldHnd) + OperandDesc(CORINFO_FIELD_HANDLE fieldHnd) + : m_kind(OperandKind::ClsVar) + , m_fieldHnd(fieldHnd) { } - OperandDesc(int varNum, uint16_t offset) : m_kind(OperandKind::Local), m_varNum(varNum), m_offset(offset) + OperandDesc(int varNum, uint16_t offset) + : m_kind(OperandKind::Local) + , m_varNum(varNum) + , m_offset(offset) { } OperandDesc(GenTreeIndir* indir) - : m_kind(OperandKind::Indir), m_addr(indir->Addr()), m_indir(indir), m_indirType(indir->TypeGet()) + : m_kind(OperandKind::Indir) + , m_addr(indir->Addr()) + , m_indir(indir) + , m_indirType(indir->TypeGet()) { } OperandDesc(var_types indirType, GenTree* addr) - : m_kind(OperandKind::Indir), m_addr(addr), m_indir(nullptr), m_indirType(indirType) + : m_kind(OperandKind::Indir) + , m_addr(addr) + , m_indir(nullptr) + , m_indirType(indirType) { } OperandDesc(ssize_t immediate, bool immediateNeedsReloc) - : m_kind(OperandKind::Imm), m_immediate(immediate), m_immediateNeedsReloc(immediateNeedsReloc) + : m_kind(OperandKind::Imm) + , m_immediate(immediate) + , m_immediateNeedsReloc(immediateNeedsReloc) { } - OperandDesc(regNumber reg) : m_kind(OperandKind::Reg), m_reg(reg) + OperandDesc(regNumber reg) + : m_kind(OperandKind::Reg) + , m_reg(reg) { } @@ -1689,7 +1712,9 @@ class CodeGenPhase final : public Phase { public: CodeGenPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)()) - : Phase(_codeGen->GetCompiler(), _phase), codeGen(_codeGen), action(_action) + : Phase(_codeGen->GetCompiler(), _phase) + , codeGen(_codeGen) + , action(_action) { } diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 8cf3ac32b3a32..65ba1bf5913c6 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -173,9 +173,9 @@ void CodeGen::genEHCatchRet(BasicBlock* block) //------------------------------------------------------------------------ // instGen_Set_Reg_To_Imm: Move an immediate value into an integer register. // -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { // reg cannot be a FP register @@ -1651,7 +1651,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, callTargetReg, // ireg REG_NA, 0, 0, // xreg, xmul, disp false // isJump - ); + ); } else { @@ -1660,7 +1660,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, DebugInfo(), REG_NA, REG_NA, 0, 0, /* ilOffset, ireg, xreg, xmul, disp */ false /* isJump */ - ); + ); } regSet.verifyRegistersUsed(RBM_CALLEE_TRASH); diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index bbd48a0e9127f..ca9ab73224d7a 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -1884,8 +1884,8 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() if (compiler->lvaPSPSym != BAD_VAR_NUM) { - if (CallerSP_to_PSP_slot_delta != - compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for debugging + if (CallerSP_to_PSP_slot_delta != compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for + // debugging { printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n", compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); @@ -2216,9 +2216,9 @@ void CodeGen::genEHCatchRet(BasicBlock* block) // move an immediate value into an integer register -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { // reg cannot be a FP register @@ -5130,7 +5130,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, gcInfo.gcRegByrefSetCur, DebugInfo(), callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper); regSet.verifyRegistersUsed(killMask); @@ -5739,8 +5739,8 @@ void CodeGen::genCodeForBfiz(GenTreeOp* tree) GenTree* castOp = cast->CastOp(); genConsumeRegs(castOp); - unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE - : genTypeSize(castOp) * BITS_PER_BYTE; + unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE + : genTypeSize(castOp) * BITS_PER_BYTE; const bool isUnsigned = cast->IsUnsigned() || varTypeIsUnsigned(cast->CastToType()); GetEmitter()->emitIns_R_R_I_I(isUnsigned ? INS_ubfiz : INS_sbfiz, size, tree->GetRegNum(), castOp->GetRegNum(), (int)shiftByImm, (int)srcBits); diff --git a/src/coreclr/jit/codegenarm64test.cpp b/src/coreclr/jit/codegenarm64test.cpp index 750daa569613f..52633ed6733e6 100644 --- a/src/coreclr/jit/codegenarm64test.cpp +++ b/src/coreclr/jit/codegenarm64test.cpp @@ -4932,16 +4932,16 @@ void CodeGen::genArm64EmitterUnitTestsSve() INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ // IF_SVE_CZ_4A_A - theEmitter->emitIns_R_R(INS_sve_movs, EA_SCALABLE, REG_P0, REG_P15, - INS_OPTS_SCALABLE_B); /* MOVS .B, .B */ + theEmitter->emitIns_R_R(INS_sve_movs, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); /* MOVS .B, .B + */ // IF_SVE_CZ_4A_K theEmitter->emitIns_R_R_R(INS_sve_mov, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); /* MOV .B, /M, .B */ // IF_SVE_CZ_4A_L - theEmitter->emitIns_R_R(INS_sve_mov, EA_SCALABLE, REG_P0, REG_P15, - INS_OPTS_SCALABLE_B); /* MOV .B, .B */ + theEmitter->emitIns_R_R(INS_sve_mov, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); /* MOV .B, .B + */ // IF_SVE_DA_4A theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index 1f550660b7798..965b72721aaaa 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -727,8 +727,8 @@ void CodeGen::genIntrinsic(GenTreeIntrinsic* treeNode) break; #if defined(FEATURE_SIMD) - // The handling is a bit more complex so genSimdUpperSave/Restore - // handles genConsumeOperands and genProduceReg + // The handling is a bit more complex so genSimdUpperSave/Restore + // handles genConsumeOperands and genProduceReg case NI_SIMD_UpperRestore: { @@ -861,7 +861,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut); #else // !TARGET_ARM64 - // There is no zero register on ARM32 + // There is no zero register on ARM32 unreached(); #endif // !TARGET_ARM64 } @@ -1018,9 +1018,9 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) nextIndex += 2; } #else // TARGET_ARM - // For a >= 4 byte sizes we will generate a ldr and str instruction each loop - // ldr r2, [r0] - // str r2, [sp, #16] + // For a >= 4 byte sizes we will generate a ldr and str instruction each loop + // ldr r2, [r0] + // str r2, [sp, #16] while (remainingSize >= TARGET_POINTER_SIZE) { var_types type = layout->GetGCPtrType(nextIndex); @@ -1812,7 +1812,7 @@ instruction CodeGen::genGetVolatileLdStIns(instruction currentIns, assert(!addrIsInReg); switch (currentIns) { - // Loads + // Loads case INS_ldrb: return INS_ldapurb; @@ -1823,7 +1823,7 @@ instruction CodeGen::genGetVolatileLdStIns(instruction currentIns, case INS_ldr: return INS_ldapur; - // Stores + // Stores case INS_strb: return INS_stlurb; @@ -1855,7 +1855,7 @@ instruction CodeGen::genGetVolatileLdStIns(instruction currentIns, const bool hasRcpc1 = compiler->compOpportunisticallyDependsOn(InstructionSet_Rcpc); switch (currentIns) { - // Loads + // Loads case INS_ldrb: return hasRcpc1 ? INS_ldaprb : INS_ldarb; @@ -1866,7 +1866,7 @@ instruction CodeGen::genGetVolatileLdStIns(instruction currentIns, case INS_ldr: return hasRcpc1 ? INS_ldapr : INS_ldar; - // Stores + // Stores case INS_strb: return INS_stlrb; @@ -2060,7 +2060,10 @@ class ProducingStreamBaseInstrs { public: ProducingStreamBaseInstrs(regNumber intReg1, regNumber intReg2, regNumber addrReg, emitter* emitter) - : intReg1(intReg1), intReg2(intReg2), addrReg(addrReg), emitter(emitter) + : intReg1(intReg1) + , intReg2(intReg2) + , addrReg(addrReg) + , emitter(emitter) { } @@ -2121,7 +2124,11 @@ class ProducingStream { public: ProducingStream(regNumber intReg1, regNumber simdReg1, regNumber simdReg2, regNumber addrReg, emitter* emitter) - : intReg1(intReg1), simdReg1(simdReg1), simdReg2(simdReg2), addrReg(addrReg), emitter(emitter) + : intReg1(intReg1) + , simdReg1(simdReg1) + , simdReg2(simdReg2) + , addrReg(addrReg) + , emitter(emitter) { } @@ -2244,7 +2251,9 @@ class BlockUnrollHelper class InitBlockUnrollHelper { public: - InitBlockUnrollHelper(int dstOffset, unsigned byteCount) : dstStartOffset(dstOffset), byteCount(byteCount) + InitBlockUnrollHelper(int dstOffset, unsigned byteCount) + : dstStartOffset(dstOffset) + , byteCount(byteCount) { } @@ -2373,7 +2382,9 @@ class CopyBlockUnrollHelper { public: CopyBlockUnrollHelper(int srcOffset, int dstOffset, unsigned byteCount) - : srcStartOffset(srcOffset), dstStartOffset(dstOffset), byteCount(byteCount) + : srcStartOffset(srcOffset) + , dstStartOffset(dstOffset) + , byteCount(byteCount) { } @@ -3432,13 +3443,13 @@ void CodeGen::genCall(GenTreeCall* call) else #endif // TARGET_ARM if (varTypeUsesFloatArgReg(returnType)) - { - returnReg = REG_FLOATRET; - } - else - { - returnReg = REG_INTRET; - } + { + returnReg = REG_FLOATRET; + } + else + { + returnReg = REG_INTRET; + } if (call->GetRegNum() != returnReg) { @@ -3694,19 +3705,19 @@ void CodeGen::genCallInstruction(GenTreeCall* call) else #endif // FEATURE_READYTORUN if (call->gtCallType == CT_HELPER) - { - CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); - noway_assert(helperNum != CORINFO_HELP_UNDEF); + { + CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); + noway_assert(helperNum != CORINFO_HELP_UNDEF); - void* pAddr = nullptr; - addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); - assert(pAddr == nullptr); - } - else - { - // Direct call to a non-virtual user function. - addr = call->gtDirectCallAddress; - } + void* pAddr = nullptr; + addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); + assert(pAddr == nullptr); + } + else + { + // Direct call to a non-virtual user function. + addr = call->gtDirectCallAddress; + } assert(addr != nullptr); @@ -4372,8 +4383,8 @@ void CodeGen::genFloatToFloatCast(GenTree* treeNode) //------------------------------------------------------------------------ // genCreateAndStoreGCInfo: Create and record GC Info for the function. // -void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, - unsigned prologSize, +void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)) { IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC()); @@ -5529,9 +5540,8 @@ void CodeGen::genFnEpilog(BasicBlock* block) compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0); } - if (jmpEpilog || - genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) == - RBM_NONE) + if (jmpEpilog || genStackAllocRegisterMask(compiler->compLclFrameSize, + regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) == RBM_NONE) { genFreeLclFrame(compiler->compLclFrameSize, &unwindStarted); } @@ -5603,9 +5613,9 @@ void CodeGen::genFnEpilog(BasicBlock* block) #if !FEATURE_FASTTAILCALL noway_assert(jmpNode->gtOper == GT_JMP); #else // FEATURE_FASTTAILCALL - // armarch - // If jmpNode is GT_JMP then gtNext must be null. - // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. + // armarch + // If jmpNode is GT_JMP then gtNext must be null. + // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr)); // Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index a36c8d92dcac1..5c733c5457d94 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -65,7 +65,10 @@ CodeGenInterface* getCodeGenerator(Compiler* comp) // CodeGen constructor CodeGenInterface::CodeGenInterface(Compiler* theCompiler) - : gcInfo(theCompiler), regSet(theCompiler, gcInfo), compiler(theCompiler), treeLifeUpdater(nullptr) + : gcInfo(theCompiler) + , regSet(theCompiler, gcInfo) + , compiler(theCompiler) + , treeLifeUpdater(nullptr) { } @@ -84,7 +87,8 @@ void CodeGenInterface::CopyRegisterInfo() /*****************************************************************************/ -CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler) +CodeGen::CodeGen(Compiler* theCompiler) + : CodeGenInterface(theCompiler) { #if defined(TARGET_XARCH) negBitmaskFlt = nullptr; @@ -1873,7 +1877,7 @@ void CodeGen::genGenerateMachineCode() (compiler->compCodeOpt() != Compiler::SMALL_CODE) && !compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) #endif - ); + ); /* Now generate code for the function */ genCodeForBBlist(); @@ -3205,9 +3209,9 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere #ifdef TARGET_X86 noway_assert(varDsc->lvType == TYP_STRUCT); #else // !TARGET_X86 - // For LSRA, it may not be in regArgMaskLive if it has a zero - // refcnt. This is in contrast with the non-LSRA case in which all - // non-tracked args are assumed live on entry. + // For LSRA, it may not be in regArgMaskLive if it has a zero + // refcnt. This is in contrast with the non-LSRA case in which all + // non-tracked args are assumed live on entry. noway_assert((varDsc->lvRefCnt() == 0) || (varDsc->lvType == TYP_STRUCT) || (varDsc->IsAddressExposed() && compiler->info.compIsVarArgs) || (varDsc->IsAddressExposed() && compiler->opts.compUseSoftFP)); @@ -4137,8 +4141,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere int nextArgNum = argNum + i; LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + i); regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].type, - compiler->info.compCallConv); - destRegNum = fieldVarDsc->GetRegNum(); + compiler->info.compCallConv); + destRegNum = fieldVarDsc->GetRegNum(); noway_assert(regArgTab[nextArgNum].varNum == varNum); noway_assert(genIsValidFloatReg(nextRegNum)); noway_assert(genIsValidFloatReg(destRegNum)); @@ -4221,7 +4225,7 @@ void CodeGen::genEnregisterIncomingStackArgs() regNumber tmp_reg = REG_NA; #endif - for (LclVarDsc *varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++) + for (LclVarDsc* varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++) { /* Is this variable a parameter? */ @@ -4295,7 +4299,7 @@ void CodeGen::genEnregisterIncomingStackArgs() } } } -#else // !TARGET_LOONGARCH64 +#else // !TARGET_LOONGARCH64 GetEmitter()->emitIns_R_S(ins_Load(regType), emitTypeSize(regType), regNum, varNum, 0); #endif // !TARGET_LOONGARCH64 @@ -5348,7 +5352,7 @@ void CodeGen::genFinalizeFrame() } noway_assert((regSet.rsGetModifiedRegsMask() & ~okRegs) == 0); #else // !TARGET_AMD64 && !TARGET_ARM64 - // On x86 we save all callee saved regs so the saved reg area size is consistent + // On x86 we save all callee saved regs so the saved reg area size is consistent regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE); #endif // !TARGET_AMD64 && !TARGET_ARM64 } @@ -6077,7 +6081,7 @@ void CodeGen::genFnProlog() } #endif // TARGET_AMD64 -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- #ifdef TARGET_ARM if (compiler->compLocallocUsed) @@ -6103,11 +6107,11 @@ void CodeGen::genFnProlog() #endif // TARGET_AMD64 compiler->unwindEndProlog(); -//------------------------------------------------------------------------- -// -// This is the end of the OS-reported prolog for purposes of unwinding -// -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- + // + // This is the end of the OS-reported prolog for purposes of unwinding + // + //------------------------------------------------------------------------- #ifdef TARGET_ARM if (needToEstablishFP) @@ -8494,7 +8498,7 @@ void CodeGen::genPoisonFrame(regMaskTP regLiveIn) bool fpBased; int addr = compiler->lvaFrameAddress((int)varNum, &fpBased); #else - int addr = 0; + int addr = 0; #endif int end = addr + (int)size; for (int offs = addr; offs < end;) diff --git a/src/coreclr/jit/codegeninterface.h b/src/coreclr/jit/codegeninterface.h index 63954adc6ffbb..ef87ccca85870 100644 --- a/src/coreclr/jit/codegeninterface.h +++ b/src/coreclr/jit/codegeninterface.h @@ -165,8 +165,8 @@ class CodeGenInterface TreeLifeUpdater* treeLifeUpdater; public: - bool genUseOptimizedWriteBarriers(GCInfo::WriteBarrierForm wbf); - bool genUseOptimizedWriteBarriers(GenTreeStoreInd* store); + bool genUseOptimizedWriteBarriers(GCInfo::WriteBarrierForm wbf); + bool genUseOptimizedWriteBarriers(GenTreeStoreInd* store); CorInfoHelpFunc genWriteBarrierHelperForWriteBarrierForm(GCInfo::WriteBarrierForm wbf); #ifdef DEBUG @@ -442,7 +442,8 @@ class CodeGenInterface { siVarLocType vlType; - union { + union + { // VLT_REG/VLT_REG_FP -- Any pointer-sized enregistered value (TYP_INT, TYP_REF, etc) // eg. EAX // VLT_REG_BYREF -- the specified register contains the address of the variable @@ -627,7 +628,9 @@ class CodeGenInterface VariableLiveRange(CodeGenInterface::siVarLoc varLocation, emitLocation startEmitLocation, emitLocation endEmitLocation) - : m_StartEmitLocation(startEmitLocation), m_EndEmitLocation(endEmitLocation), m_VarLocation(varLocation) + : m_StartEmitLocation(startEmitLocation) + , m_EndEmitLocation(endEmitLocation) + , m_VarLocation(varLocation) { } @@ -675,7 +678,8 @@ class CodeGenInterface public: LiveRangeDumper(const LiveRangeList* liveRanges) - : m_startingLiveRange(liveRanges->end()), m_hasLiveRangesToDump(false){}; + : m_startingLiveRange(liveRanges->end()) + , m_hasLiveRangesToDump(false){}; // Make the dumper point to the last "VariableLiveRange" opened or nullptr if all are closed void resetDumper(const LiveRangeList* list); @@ -756,7 +760,7 @@ class CodeGenInterface LiveRangeList* getLiveRangesForVarForBody(unsigned int varNum) const; LiveRangeList* getLiveRangesForVarForProlog(unsigned int varNum) const; - size_t getLiveRangesCount() const; + size_t getLiveRangesCount() const; // For parameters locations on prolog void psiStartVariableLiveRange(CodeGenInterface::siVarLoc varLocation, unsigned int varNum); diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 5e05f1b081982..2d8a2093454f8 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -157,7 +157,8 @@ void CodeGen::genCodeForBBlist() genMarkLabelsForCodegen(); assert(!compiler->fgFirstBBScratch || - compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first. + compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch + // has to be first. /* Initialize structures used in the block list iteration */ genInitialize(); @@ -622,7 +623,7 @@ void CodeGen::genCodeForBBlist() case BBJ_THROW: case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: - // We're going to generate more code below anyway, so no need for the NOP. + // We're going to generate more code below anyway, so no need for the NOP. case BBJ_RETURN: case BBJ_EHFINALLYRET: @@ -633,7 +634,7 @@ void CodeGen::genCodeForBBlist() case BBJ_COND: case BBJ_SWITCH: - // These can't have a call as the last instruction! + // These can't have a call as the last instruction! default: noway_assert(!"Unexpected bbKind"); @@ -1868,8 +1869,8 @@ void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArg var_types type = use.GetType(); unsigned thisFieldOffset = argOffset + use.GetOffset(); -// Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing -// argument area. + // Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing + // argument area. #if defined(FEATURE_SIMD) if (type == TYP_SIMD12) diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index a99199aedc634..ec27d2ff8ab4d 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1225,9 +1225,9 @@ void CodeGen::genFnEpilog(BasicBlock* block) #if !FEATURE_FASTTAILCALL noway_assert(jmpNode->gtOper == GT_JMP); #else // FEATURE_FASTTAILCALL - // armarch - // If jmpNode is GT_JMP then gtNext must be null. - // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. + // armarch + // If jmpNode is GT_JMP then gtNext must be null. + // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr)); // Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp @@ -1573,9 +1573,9 @@ void CodeGen::genEHCatchRet(BasicBlock* block) } // move an immediate value into an integer register -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { emitter* emit = GetEmitter(); @@ -3334,7 +3334,7 @@ void CodeGen::genCodeForReturnTrap(GenTreeOp* tree) callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC); regSet.verifyRegistersUsed(killMask); @@ -4398,7 +4398,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper); regSet.verifyRegistersUsed(killMask); @@ -6648,19 +6648,19 @@ void CodeGen::genCallInstruction(GenTreeCall* call) else #endif // FEATURE_READYTORUN if (call->gtCallType == CT_HELPER) - { - CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); - noway_assert(helperNum != CORINFO_HELP_UNDEF); + { + CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); + noway_assert(helperNum != CORINFO_HELP_UNDEF); - void* pAddr = nullptr; - addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); - assert(pAddr == nullptr); - } - else - { - // Direct call to a non-virtual user function. - addr = call->gtDirectCallAddress; - } + void* pAddr = nullptr; + addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); + assert(pAddr == nullptr); + } + else + { + // Direct call to a non-virtual user function. + addr = call->gtDirectCallAddress; + } assert(addr != nullptr); @@ -7093,8 +7093,8 @@ void CodeGen::genFloatToFloatCast(GenTree* treeNode) //------------------------------------------------------------------------ // genCreateAndStoreGCInfo: Create and record GC Info for the function. // -void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, - unsigned prologSize, +void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)) { IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC()); @@ -7615,7 +7615,7 @@ inline void CodeGen::genJumpToThrowHlpBlk_la( callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)(compiler->acdHelper(codeKind))); regSet.verifyRegistersUsed(killMask); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 87745fabe3e04..1d48582c6c316 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -1212,9 +1212,9 @@ void CodeGen::genFnEpilog(BasicBlock* block) #if !FEATURE_FASTTAILCALL noway_assert(jmpNode->gtOper == GT_JMP); #else // FEATURE_FASTTAILCALL - // armarch - // If jmpNode is GT_JMP then gtNext must be null. - // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. + // armarch + // If jmpNode is GT_JMP then gtNext must be null. + // If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts. noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr)); // Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp @@ -1534,9 +1534,9 @@ void CodeGen::genEHCatchRet(BasicBlock* block) } // move an immediate value into an integer register -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { emitter* emit = GetEmitter(); @@ -3338,7 +3338,7 @@ void CodeGen::genCodeForReturnTrap(GenTreeOp* tree) callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC); regSet.verifyRegistersUsed(killMask); @@ -4359,7 +4359,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper); regSet.verifyRegistersUsed(killMask); @@ -6724,19 +6724,19 @@ void CodeGen::genCallInstruction(GenTreeCall* call) else #endif // FEATURE_READYTORUN if (call->gtCallType == CT_HELPER) - { - CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); - noway_assert(helperNum != CORINFO_HELP_UNDEF); + { + CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd); + noway_assert(helperNum != CORINFO_HELP_UNDEF); - void* pAddr = nullptr; - addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); - assert(pAddr == nullptr); - } - else - { - // Direct call to a non-virtual user function. - addr = call->gtDirectCallAddress; - } + void* pAddr = nullptr; + addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr); + assert(pAddr == nullptr); + } + else + { + // Direct call to a non-virtual user function. + addr = call->gtDirectCallAddress; + } assert(addr != nullptr); @@ -7163,8 +7163,8 @@ void CodeGen::genFloatToFloatCast(GenTree* treeNode) //------------------------------------------------------------------------ // genCreateAndStoreGCInfo: Create and record GC Info for the function. // -void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, - unsigned prologSize, +void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)) { IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC()); @@ -7683,7 +7683,7 @@ void CodeGen::genJumpToThrowHlpBlk_la( callTarget, /* ireg */ REG_NA, 0, 0, /* xreg, xmul, disp */ false /* isJump */ - ); + ); regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)(compiler->acdHelper(codeKind))); regSet.verifyRegistersUsed(killMask); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 314346300b005..ede5df1bea39d 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -156,9 +156,9 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) regGSCheck = REG_EAX; regMaskGSCheck = RBM_EAX; #else // !TARGET_X86 - // Jmp calls: specify method handle using which JIT queries VM for its entry point - // address and hence it can neither be a VSD call nor PInvoke calli with cookie - // parameter. Therefore, in case of jmp calls it is safe to use R11. + // Jmp calls: specify method handle using which JIT queries VM for its entry point + // address and hence it can neither be a VSD call nor PInvoke calli with cookie + // parameter. Therefore, in case of jmp calls it is safe to use R11. regGSCheck = REG_R11; #endif // !TARGET_X86 } @@ -387,9 +387,9 @@ void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) // Move an immediate value into an integer register -void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, - regNumber reg, - ssize_t imm, +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { // reg cannot be a FP register @@ -2158,8 +2158,8 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) // The last slot is reserved for ICodeManager::FixContext(ppEndRegion) unsigned filterEndOffsetSlotOffs; - PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) > - TARGET_POINTER_SIZE); // below doesn't underflow. + PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) > TARGET_POINTER_SIZE); // below doesn't + // underflow. filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE); @@ -6093,13 +6093,13 @@ void CodeGen::genCall(GenTreeCall* call) else #endif // TARGET_X86 if (varTypeIsFloating(returnType)) - { - returnReg = REG_FLOATRET; - } - else - { - returnReg = REG_INTRET; - } + { + returnReg = REG_FLOATRET; + } + else + { + returnReg = REG_INTRET; + } inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ true); } @@ -8040,8 +8040,8 @@ void CodeGen::genIntrinsic(GenTreeIntrinsic* treeNode) } #if defined(FEATURE_SIMD) - // The handling is a bit more complex so genSimdUpperSave/Restore - // handles genConsumeOperands and genProduceReg + // The handling is a bit more complex so genSimdUpperSave/Restore + // handles genConsumeOperands and genProduceReg case NI_SIMD_UpperRestore: { @@ -8111,7 +8111,7 @@ unsigned CodeGen::getBaseVarForPutArgStk(GenTree* treeNode) #ifdef UNIX_AMD64_ABI assert(!varDsc->lvIsRegArg && varDsc->GetArgReg() == REG_STK); #else // !UNIX_AMD64_ABI - // On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0. + // On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0. assert(varDsc->lvIsRegArg && (varDsc->GetArgReg() == REG_ARG_0 || varDsc->GetArgReg() == REG_FLTARG_0)); #endif // !UNIX_AMD64_ABI #endif // !DEBUG @@ -8584,7 +8584,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk) unsigned argOffset = putArgStk->getArgOffset(); #ifdef DEBUG - CallArg* callArg = putArgStk->gtCall->gtArgs.FindByNode(putArgStk); + CallArg* callArg = putArgStk->gtCall->gtArgs.FindByNode(putArgStk); assert(callArg != nullptr); assert(argOffset == callArg->AbiInfo.ByteOffset); #endif @@ -8837,8 +8837,8 @@ CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigne } #ifdef JIT32_GCENCODER -void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, - unsigned prologSize, +void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, + unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr)) { BYTE headerBuf[64]; @@ -9240,8 +9240,8 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) unsigned saveStackLvl2 = genStackLevel; -// Important note: when you change enter probe layout, you must also update SKIP_ENTER_PROF_CALLBACK() -// for x86 stack unwinding + // Important note: when you change enter probe layout, you must also update SKIP_ENTER_PROF_CALLBACK() + // for x86 stack unwinding #if defined(UNIX_X86_ABI) // Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall() @@ -10203,7 +10203,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) // do an LEA to "pop off" the frame allocation. needLea = true; #else // !TARGET_AMD64 - // We will just generate "mov esp, ebp" and be done with it. + // We will just generate "mov esp, ebp" and be done with it. needMovEspEbp = true; #endif // !TARGET_AMD64 } @@ -10949,8 +10949,8 @@ void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNu assert(i == alignmentLoBlkSize); } #else // !defined(TARGET_AMD64) - // While we aren't aligning the start, we still want to - // zero anything that is not in a 16 byte chunk at end + // While we aren't aligning the start, we still want to + // zero anything that is not in a 16 byte chunk at end int alignmentBlkSize = blkSize & -XMM_REGSIZE_BYTES; int alignmentHiBlkSize = blkSize - alignmentBlkSize; int alignedLclHi = untrLclLo + alignmentBlkSize; @@ -11129,8 +11129,8 @@ void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize) assert((offset % 16) == 0); instruction copyIns = ins_Copy(TYP_FLOAT); #else // !TARGET_AMD64 - unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES; - instruction copyIns = INS_movupd; + unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES; + instruction copyIns = INS_movupd; #endif // !TARGET_AMD64 for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg)) diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 2e5f82c3daf38..10dc0ef5f07e5 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -645,11 +645,11 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, // have a struct that is larger than that. // if (structSize <= MAX_PASS_SINGLEREG_BYTES) - { - // We set the "primitive" useType based upon the structSize - // and also examine the clsHnd to see if it is an HFA of count one - useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); - } + { + // We set the "primitive" useType based upon the structSize + // and also examine the clsHnd to see if it is an HFA of count one + useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); + } #else if (isTrivialPointerSizedStruct(clsHnd)) { @@ -1157,11 +1157,15 @@ struct FileLine unsigned m_line; char* m_condStr; - FileLine() : m_file(nullptr), m_line(0), m_condStr(nullptr) + FileLine() + : m_file(nullptr) + , m_line(0) + , m_condStr(nullptr) { } - FileLine(const char* file, unsigned line, const char* condStr) : m_line(line) + FileLine(const char* file, unsigned line, const char* condStr) + : m_line(line) { size_t newSize = (strlen(file) + 1) * sizeof(char); m_file = HostAllocator::getHostAllocator().allocate(newSize); @@ -1200,7 +1204,7 @@ struct FileLine }; typedef JitHashTable FileLineToCountMap; -FileLineToCountMap* NowayAssertMap; +FileLineToCountMap* NowayAssertMap; void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr) { @@ -1233,7 +1237,8 @@ struct NowayAssertCountMap size_t count; FileLine fl; - NowayAssertCountMap() : count(0) + NowayAssertCountMap() + : count(0) { } @@ -2026,8 +2031,8 @@ void Compiler::compDone() #endif // LATE_DISASM } -void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ - void** ppIndirection) /* OUT */ +void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ + void** ppIndirection) /* OUT */ { void* addr; @@ -3406,11 +3411,10 @@ void Compiler::compInitOptions(JitFlags* jitFlags) printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry); } - printf("OPTIONS: compCodeOpt = %s\n", - (opts.compCodeOpt == BLENDED_CODE) - ? "BLENDED_CODE" - : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" - : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); + printf("OPTIONS: compCodeOpt = %s\n", (opts.compCodeOpt == BLENDED_CODE) ? "BLENDED_CODE" + : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" + : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" + : "UNKNOWN_CODE"); printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); @@ -4011,8 +4015,9 @@ void Compiler::compSetOptimizationLevel() } if (theMinOptsValue == true) { - JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " - "%3d,%3d for method %s\n", + JITLOG((LL_INFO10000, + "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " + "%3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); if (JitConfig.JitBreakOnMinOpts() != 0) { @@ -4793,7 +4798,9 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl { // Tail merge // - DoPhase(this, PHASE_HEAD_TAIL_MERGE, [this]() { return fgHeadTailMerge(true); }); + DoPhase(this, PHASE_HEAD_TAIL_MERGE, [this]() { + return fgHeadTailMerge(true); + }); // Merge common throw blocks // @@ -4864,7 +4871,6 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl DoPhase(this, PHASE_MORPH_GLOBAL, &Compiler::fgMorphBlocks); auto postMorphPhase = [this]() { - // Fix any LclVar annotations on discarded struct promotion temps for implicit by-ref args fgMarkDemotedImplicitByRefArgs(); lvaRefCountState = RCS_INVALID; @@ -4919,7 +4925,9 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // Second pass of tail merge // - DoPhase(this, PHASE_HEAD_TAIL_MERGE2, [this]() { return fgHeadTailMerge(false); }); + DoPhase(this, PHASE_HEAD_TAIL_MERGE2, [this]() { + return fgHeadTailMerge(false); + }); // Canonicalize entry to give a unique dominator tree root // @@ -5274,7 +5282,9 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // Now that lowering is completed we can proceed to perform register allocation // - auto linearScanPhase = [this]() { m_pLinearScan->doLinearScan(); }; + auto linearScanPhase = [this]() { + m_pLinearScan->doLinearScan(); + }; DoPhase(this, PHASE_LINEAR_SCAN, linearScanPhase); // Copied from rpPredictRegUse() @@ -6171,12 +6181,12 @@ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, // We need to assume, by default, that all flags coming from the VM are invalid. instructionSetFlags.Reset(); -// We then add each available instruction set for the target architecture provided -// that the corresponding JitConfig switch hasn't explicitly asked for it to be -// disabled. This allows us to default to "everything" supported for altjit scenarios -// while also still allowing instruction set opt-out providing users with the ability -// to, for example, see and debug ARM64 codegen for any desired CPU configuration without -// needing to have the hardware in question. + // We then add each available instruction set for the target architecture provided + // that the corresponding JitConfig switch hasn't explicitly asked for it to be + // disabled. This allows us to default to "everything" supported for altjit scenarios + // while also still allowing instruction set opt-out providing users with the ability + // to, for example, see and debug ARM64 codegen for any desired CPU configuration without + // needing to have the hardware in question. #if defined(TARGET_ARM64) if (JitConfig.EnableHWIntrinsic() != 0) @@ -7949,112 +7959,105 @@ int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, #endif param.result = result; - setErrorTrap(compHnd, Param*, pParamOuter, ¶m) - { - setErrorTrap(nullptr, Param*, pParam, pParamOuter) - { - if (pParam->inlineInfo) - { - // Lazily create the inlinee compiler object - if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr) - { - pParam->inlineInfo->InlinerCompiler->InlineeCompiler = - (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); - } + setErrorTrap(compHnd, Param*, pParamOuter, ¶m){setErrorTrap(nullptr, Param*, pParam, pParamOuter){ + if (pParam->inlineInfo){// Lazily create the inlinee compiler object + if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr){ + pParam->inlineInfo->InlinerCompiler->InlineeCompiler = + (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); +} - // Use the inlinee compiler object - pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; +// Use the inlinee compiler object +pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; #ifdef DEBUG // memset(pParam->pComp, 0xEE, sizeof(Compiler)); #endif - } - else - { - // Allocate create the inliner compiler object - pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); - } +} +else +{ + // Allocate create the inliner compiler object + pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); +} #if MEASURE_CLRAPI_CALLS - pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); +pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); #endif - // push this compiler on the stack (TLS) - pParam->pComp->prevCompiler = JitTls::GetCompiler(); - JitTls::SetCompiler(pParam->pComp); +// push this compiler on the stack (TLS) +pParam->pComp->prevCompiler = JitTls::GetCompiler(); +JitTls::SetCompiler(pParam->pComp); // PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here #if defined(_PREFAST_) || defined(_PREFIX_) - PREFIX_ASSUME(pParam->pComp != NULL); +PREFIX_ASSUME(pParam->pComp != NULL); #else - assert(pParam->pComp != nullptr); +assert(pParam->pComp != nullptr); #endif - pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, - pParam->inlineInfo); +pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, pParam->inlineInfo); #ifdef DEBUG - pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; +pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; #endif - // Now generate the code - pParam->result = pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, - pParam->compileFlags); - } - finallyErrorTrap() - { - Compiler* pCompiler = pParamOuter->pComp; +// Now generate the code +pParam->result = + pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); +} +finallyErrorTrap() +{ + Compiler* pCompiler = pParamOuter->pComp; - // If OOM is thrown when allocating memory for a pComp, we will end up here. - // For this case, pComp and also pCompiler will be a nullptr - // - if (pCompiler != nullptr) - { - pCompiler->info.compCode = nullptr; + // If OOM is thrown when allocating memory for a pComp, we will end up here. + // For this case, pComp and also pCompiler will be a nullptr + // + if (pCompiler != nullptr) + { + pCompiler->info.compCode = nullptr; - // pop the compiler off the TLS stack only if it was linked above - assert(JitTls::GetCompiler() == pCompiler); - JitTls::SetCompiler(pCompiler->prevCompiler); - } + // pop the compiler off the TLS stack only if it was linked above + assert(JitTls::GetCompiler() == pCompiler); + JitTls::SetCompiler(pCompiler->prevCompiler); + } - if (pParamOuter->inlineInfo == nullptr) - { - // Free up the allocator we were using - pParamOuter->pAlloc->destroy(); - } - } - endErrorTrap() + if (pParamOuter->inlineInfo == nullptr) + { + // Free up the allocator we were using + pParamOuter->pAlloc->destroy(); } - impJitErrorTrap() +} +endErrorTrap() +} +impJitErrorTrap() +{ + // If we were looking at an inlinee.... + if (inlineInfo != nullptr) { - // If we were looking at an inlinee.... - if (inlineInfo != nullptr) - { - // Note that we failed to compile the inlinee, and that - // there's no point trying to inline it again anywhere else. - inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); - } - param.result = __errc; + // Note that we failed to compile the inlinee, and that + // there's no point trying to inline it again anywhere else. + inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); } - endErrorTrap() + param.result = __errc; +} +endErrorTrap() - result = param.result; + result = param.result; - if (!inlineInfo && - (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && - !jitFallbackCompile) - { - // If we failed the JIT, reattempt with debuggable code. - jitFallbackCompile = true; +if (!inlineInfo && + (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && + !jitFallbackCompile) +{ + // If we failed the JIT, reattempt with debuggable code. + jitFallbackCompile = true; - // Update the flags for 'safer' code generation. - compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); - compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); - compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); + // Update the flags for 'safer' code generation. + compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); + compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); + compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); - goto START; - } + goto START; +} - return result; +return result; } #if defined(UNIX_AMD64_ABI) @@ -8805,8 +8808,9 @@ void CompTimeSummaryInfo::Print(FILE* f) double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms; if (pslop_pct >= 1.0) { - fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " - "%3.1f%% of total.\n\n", + fprintf(f, + "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " + "%3.1f%% of total.\n\n", m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct); } } @@ -8846,8 +8850,9 @@ void CompTimeSummaryInfo::Print(FILE* f) double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec; if (fslop_ms > 1.0) { - fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " - "%3.1f%% of total.\n\n", + fprintf(f, + "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " + "%3.1f%% of total.\n\n", m_filtered.m_parentPhaseEndSlop / 1000000.0, fslop_ms); } } @@ -8945,7 +8950,8 @@ void CompTimeSummaryInfo::Print(FILE* f) fprintf(f, "\n"); } -JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize) +JitTimer::JitTimer(unsigned byteCodeSize) + : m_info(byteCodeSize) { #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; @@ -9197,7 +9203,7 @@ void JitTimer::PrintCsvMethodStats(Compiler* comp) // for a DEBUG build (presumably not for the time info), just re-use it. const char* methName = comp->info.compFullName; #else - const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); + const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); #endif // Try and access the SPMI index to report in the data set. diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 83fdb8828bf15..30fedcd9cd56c 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -234,11 +234,13 @@ class LclSsaVarDsc { } - LclSsaVarDsc(BasicBlock* block) : m_block(block) + LclSsaVarDsc(BasicBlock* block) + : m_block(block) { } - LclSsaVarDsc(BasicBlock* block, GenTreeLclVarCommon* defNode) : m_block(block) + LclSsaVarDsc(BasicBlock* block, GenTreeLclVarCommon* defNode) + : m_block(block) { SetDefNode(defNode); } @@ -363,7 +365,10 @@ class SsaDefArray public: // Construct an empty SsaDefArray. - SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) + SsaDefArray() + : m_array(nullptr) + , m_arraySize(0) + , m_count(0) { } @@ -503,11 +508,11 @@ class LclVarDsc // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF - unsigned char lvIsParam : 1; // is this a parameter? - unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? + unsigned char lvIsParam : 1; // is this a parameter? + unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) - unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame + unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? @@ -529,16 +534,16 @@ class LclVarDsc // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. - unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects + unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) - unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) - unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. + unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) + unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. - unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local + unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp @@ -553,13 +558,13 @@ class LclVarDsc #if defined(TARGET_LOONGARCH64) unsigned char lvIs4Field1 : 1; // Set if the 1st field is int or float within struct for LA-ABI64. unsigned char lvIs4Field2 : 1; // Set if the 2nd field is int or float within struct for LA-ABI64. - unsigned char lvIsSplit : 1; // Set if the argument is splited. + unsigned char lvIsSplit : 1; // Set if the argument is splited. #endif // defined(TARGET_LOONGARCH64) #if defined(TARGET_RISCV64) unsigned char lvIs4Field1 : 1; // Set if the 1st field is int or float within struct for RISCV64. unsigned char lvIs4Field2 : 1; // Set if the 2nd field is int or float within struct for RISCV64. - unsigned char lvIsSplit : 1; // Set if the argument is splited. + unsigned char lvIsSplit : 1; // Set if the argument is splited. #endif // defined(TARGET_RISCV64) unsigned char lvSingleDef : 1; // variable has a single def. Used to identify ref type locals that can get type @@ -588,7 +593,7 @@ class LclVarDsc unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG - unsigned char lvKeepType : 1; // Don't change the type of this variable + unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security @@ -643,8 +648,8 @@ class LclVarDsc #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness - unsigned char lvIsHoist : 1; // CSE temp for a hoisted tree - unsigned char lvIsMultiDefCSE : 1; // CSE temp for a multi-def CSE + unsigned char lvIsHoist : 1; // CSE temp for a hoisted tree + unsigned char lvIsMultiDefCSE : 1; // CSE temp for a multi-def CSE #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, @@ -669,7 +674,8 @@ class LclVarDsc unsigned char lvIsSpan : 1; // The local is a Span public: - union { + union + { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the @@ -889,7 +895,7 @@ class LclVarDsc assert(_lvRegNum == reg); } -///////////////////// + ///////////////////// #if defined(TARGET_64BIT) @@ -1075,13 +1081,13 @@ class LclVarDsc public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; - void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); - void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); - void incLvRefCntSaturating(unsigned short delta, RefCountState state = RCS_NORMAL); + void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); + void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); + void incLvRefCntSaturating(unsigned short delta, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; - void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); - void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); + void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); + void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. @@ -1334,7 +1340,8 @@ class IntegralRange IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) - : m_lowerBound(lowerBound), m_upperBound(upperBound) + : m_lowerBound(lowerBound) + , m_upperBound(upperBound) { assert(lowerBound <= upperBound); } @@ -1366,7 +1373,7 @@ class IntegralRange return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } - static int64_t SymbolicToRealValue(SymbolicIntegerValue value); + static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); @@ -1422,7 +1429,10 @@ class TempDsc var_types tdType; public: - TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) + TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) + : tdNum(_tdNum) + , tdSize((BYTE)_tdSize) + , tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) @@ -1486,9 +1496,9 @@ enum class PhaseStatus : unsigned class LinearScanInterface { public: - virtual PhaseStatus doLinearScan() = 0; - virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; - virtual bool willEnregisterLocalVars() const = 0; + virtual PhaseStatus doLinearScan() = 0; + virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; + virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; @@ -7436,23 +7446,23 @@ class Compiler typedef JitHashTable, GenTree*> LocalNumberToNullCheckTreeMap; - GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); - GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); - GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); - GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); - bool optDoEarlyPropForBlock(BasicBlock* block); + GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); + GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); + GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); + GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); + bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); PhaseStatus optEarlyProp(); - bool optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); - GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); - bool optIsNullCheckFoldingLegal(GenTree* tree, - GenTree* nullCheckTree, - GenTree** nullCheckParent, - Statement** nullCheckStmt); - bool optCanMoveNullCheckPastTree(GenTree* tree, - unsigned nullCheckLclNum, - bool isInsideTry, - bool checkSideEffectSummary); + bool optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); + GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); + bool optIsNullCheckFoldingLegal(GenTree* tree, + GenTree* nullCheckTree, + GenTree** nullCheckParent, + Statement** nullCheckStmt); + bool optCanMoveNullCheckPastTree(GenTree* tree, + unsigned nullCheckLclNum, + bool isInsideTry, + bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, @@ -7463,30 +7473,30 @@ class Compiler #endif PhaseStatus optInductionVariables(); - bool optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop); - bool optIsIVWideningProfitable(unsigned lclNum, - BasicBlock* initBlock, - bool initedToConstant, - FlowGraphNaturalLoop* loop, - ArrayStack& ivUses); - void optBestEffortReplaceNarrowIVUses( - unsigned lclNum, unsigned ssaNum, unsigned newLclNum, BasicBlock* block, Statement* firstStmt); + bool optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop); + bool optIsIVWideningProfitable(unsigned lclNum, + BasicBlock* initBlock, + bool initedToConstant, + FlowGraphNaturalLoop* loop, + ArrayStack& ivUses); + void optBestEffortReplaceNarrowIVUses( + unsigned lclNum, unsigned ssaNum, unsigned newLclNum, BasicBlock* block, Statement* firstStmt); void optReplaceWidenedIV(unsigned lclNum, unsigned ssaNum, unsigned newLclNum, Statement* stmt); void optSinkWidenedIV(unsigned lclNum, unsigned newLclNum, FlowGraphNaturalLoop* loop); // Redundant branch opts // - PhaseStatus optRedundantBranches(); - bool optRedundantRelop(BasicBlock* const block); - bool optRedundantBranch(BasicBlock* const block); - bool optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); - bool optJumpThreadPhi(BasicBlock* const block, GenTree* tree, ValueNum treeNormVN); - bool optJumpThreadCheck(BasicBlock* const block, BasicBlock* const domBlock); - bool optJumpThreadCore(JumpThreadInfo& jti); - bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); + PhaseStatus optRedundantBranches(); + bool optRedundantRelop(BasicBlock* const block); + bool optRedundantBranch(BasicBlock* const block); + bool optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); + bool optJumpThreadPhi(BasicBlock* const block, GenTree* tree, ValueNum treeNormVN); + bool optJumpThreadCheck(BasicBlock* const block, BasicBlock* const domBlock); + bool optJumpThreadCore(JumpThreadInfo& jti); + bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); BitVecTraits* optReachableBitVecTraits; BitVec optReachableBitVec; - void optRelopImpliesRelop(RelopImplicationInfo* rii); + void optRelopImpliesRelop(RelopImplicationInfo* rii); /************************************************************************** * Value/Assertion propagation @@ -7553,7 +7563,8 @@ class Compiler { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; - union { + union + { SsaVar lcl; ArrBnd bnd; }; @@ -7573,7 +7584,8 @@ class Compiler #endif FieldSeq* fieldSeq; }; - union { + union + { SsaVar lcl; IntVal u1; __int64 lconVal; @@ -7790,49 +7802,49 @@ class Compiler bool optCanPropSubRange; public: - void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); + void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNBasedFoldCurStmt(BasicBlock* block, Statement* stmt, GenTree* parent, GenTree* tree); - GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); - GenTree* optVNBasedFoldConstExpr(BasicBlock* block, GenTree* parent, GenTree* tree); - GenTree* optVNBasedFoldExpr(BasicBlock* block, GenTree* parent, GenTree* tree); - GenTree* optVNBasedFoldExpr_Call(BasicBlock* block, GenTree* parent, GenTreeCall* call); - GenTree* optExtractSideEffListFromConst(GenTree* tree); + GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); + GenTree* optVNBasedFoldConstExpr(BasicBlock* block, GenTree* parent, GenTree* tree); + GenTree* optVNBasedFoldExpr(BasicBlock* block, GenTree* parent, GenTree* tree); + GenTree* optVNBasedFoldExpr_Call(BasicBlock* block, GenTree* parent, GenTreeCall* call); + GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } - ASSERT_TP* bbJtrueAssertionOut; + ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable, ASSERT_TP> ValueNumToAssertsMap; - ValueNumToAssertsMap* optValueNumToAsserts; + ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. - ASSERT_TP& GetAssertionDep(unsigned lclNum); + ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); - void optAssertionInit(bool isLocalProp); - void optAssertionTraitsInit(AssertionIndex assertionCount); - void optAssertionReset(AssertionIndex limit); - void optAssertionRemove(AssertionIndex index); + void optAssertionInit(bool isLocalProp); + void optAssertionTraitsInit(AssertionIndex assertionCount); + void optAssertionReset(AssertionIndex limit); + void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. PhaseStatus optAssertionPropMain(); - Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); - bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); - ASSERT_TP* optInitAssertionDataflowFlags(); - ASSERT_TP* optComputeAssertionGen(); + Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); + bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); + ASSERT_TP* optInitAssertionDataflowFlags(); + ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. - void optAssertionGen(GenTree* tree); + void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); - AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); - AssertionInfo optAssertionGenJtrue(GenTree* tree); + AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); + AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); - void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); + void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); ValueNum optConservativeNormalVN(GenTree* tree); @@ -7853,9 +7865,9 @@ class Compiler GenTree* op2, bool helperCallArgs = false); - bool optAssertionVnInvolvesNan(AssertionDsc* assertion); + bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); - void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); + void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif @@ -7865,8 +7877,8 @@ class Compiler AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); - bool optAssertionIsNonNull(GenTree* op, - ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); + bool optAssertionIsNonNull(GenTree* op, + ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); @@ -7874,15 +7886,15 @@ class Compiler optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. - bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); + bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, - Statement* stmt DEBUGARG(AssertionIndex index)); + Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, - Statement* stmt DEBUGARG(AssertionIndex index)); - bool optIsProfitableToSubstitute(GenTree* dest, BasicBlock* destBlock, GenTree* destParent, GenTree* value); - bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); + Statement* stmt DEBUGARG(AssertionIndex index)); + bool optIsProfitableToSubstitute(GenTree* dest, BasicBlock* destBlock, GenTree* destParent, GenTree* value); + bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); @@ -7902,8 +7914,8 @@ class Compiler GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); - bool optNonNullAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* indir); - bool optWriteBarrierAssertionProp_StoreInd(ASSERT_VALARG_TP assertions, GenTreeStoreInd* indir); + bool optNonNullAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* indir); + bool optWriteBarrierAssertionProp_StoreInd(ASSERT_VALARG_TP assertions, GenTreeStoreInd* indir); void optAssertionProp_RangeProperties(ASSERT_VALARG_TP assertions, GenTree* tree, @@ -7959,11 +7971,11 @@ class Compiler bool optReconstructArrIndex(GenTree* tree, ArrIndex* result); bool optIdentifyLoopOptInfo(FlowGraphNaturalLoop* loop, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; - fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); - bool optObtainLoopCloningOpts(LoopCloneContext* context); - bool optIsLoopClonable(FlowGraphNaturalLoop* loop, LoopCloneContext* context); - bool optCheckLoopCloningGDVTestProfitable(GenTreeOp* guard, LoopCloneVisitorInfo* info); - bool optIsHandleOrIndirOfHandle(GenTree* tree, GenTreeFlags handleType); + fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); + bool optObtainLoopCloningOpts(LoopCloneContext* context); + bool optIsLoopClonable(FlowGraphNaturalLoop* loop, LoopCloneContext* context); + bool optCheckLoopCloningGDVTestProfitable(GenTreeOp* guard, LoopCloneVisitorInfo* info); + bool optIsHandleOrIndirOfHandle(GenTree* tree, GenTreeFlags handleType); static bool optLoopCloningEnabled(); @@ -8125,7 +8137,7 @@ class Compiler const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd, char* buffer = nullptr, size_t bufferSize = 0); - void eePrintObjectDescription(const char* prefix, CORINFO_OBJECT_HANDLE handle); + void eePrintObjectDescription(const char* prefix, CORINFO_OBJECT_HANDLE handle); const char* eeGetShortClassName(CORINFO_CLASS_HANDLE clsHnd); #if defined(DEBUG) @@ -8134,12 +8146,12 @@ class Compiler unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); - var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); - var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); + var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); + var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); - unsigned eeGetArgSize(CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd); - static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); + unsigned eeGetArgSize(CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd); + static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs @@ -8267,7 +8279,7 @@ class Compiler unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE - void eeSetLIcount(unsigned count); + void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); @@ -8275,7 +8287,7 @@ class Compiler static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); - void eeDispLineInfos(); + void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info @@ -8290,7 +8302,7 @@ class Compiler UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; - } * eeVars; + }* eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, @@ -8324,7 +8336,7 @@ class Compiler WORD eeGetRelocTypeHint(void* target); -// ICorStaticInfo wrapper functions + // ICorStaticInfo wrapper functions #if defined(UNIX_AMD64_ABI) #ifdef DEBUG @@ -8353,7 +8365,11 @@ class Compiler template bool eeRunFunctorWithSPMIErrorTrap(Functor f) { - return eeRunWithSPMIErrorTrap([](Functor* pf) { (*pf)(); }, &f); + return eeRunWithSPMIErrorTrap( + [](Functor* pf) { + (*pf)(); + }, + &f); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); @@ -8361,7 +8377,7 @@ class Compiler // Utility functions static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); - static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); + static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); enum StaticHelperReturnValue { @@ -8412,7 +8428,7 @@ class Compiler // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable, DebugInfo> CallSiteDebugInfoTable; - CallSiteDebugInfoTable* genCallSite2DebugInfoMap; + CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. @@ -8446,11 +8462,11 @@ class Compiler return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); - bool shouldDoubleAlign(unsigned refCntStk, - unsigned refCntReg, - weight_t refCntWtdReg, - unsigned refCntStkParam, - weight_t refCntWtdStkDbl); + bool shouldDoubleAlign(unsigned refCntStk, + unsigned refCntReg, + weight_t refCntWtdReg, + unsigned refCntStkParam, + weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() @@ -8462,7 +8478,7 @@ class Compiler codeGen->SetFullPtrRegMapRequired(value); } -// Things that MAY belong either in CodeGen or CodeGenContext + // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; @@ -8495,7 +8511,7 @@ class Compiler #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); - void funSetCurrentFunc(unsigned funcIdx); + void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); @@ -8518,15 +8534,15 @@ class Compiler // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); -/* -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XX XX -XX UnwindInfo XX -XX XX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -*/ + /* + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XX XX + XX UnwindInfo XX + XX XX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + */ #if !defined(__GNUC__) #pragma region Unwind information @@ -8640,13 +8656,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); - void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); - void unwindPushPopCFI(regNumber reg); - void unwindBegPrologCFI(); - void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); - void unwindAllocStackCFI(unsigned size); - void unwindSetFrameRegCFI(regNumber reg, unsigned offset); - void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); + void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); + void unwindPushPopCFI(regNumber reg); + void unwindBegPrologCFI(); + void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); + void unwindAllocStackCFI(unsigned size); + void unwindSetFrameRegCFI(regNumber reg, unsigned offset); + void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, @@ -8895,11 +8911,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX GenTree* impSIMDPopStack(); - void setLclRelatedToSIMDIntrinsic(GenTree* tree); - bool areFieldsContiguous(GenTreeIndir* op1, GenTreeIndir* op2); - bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); - bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); - bool areArgumentsContiguous(GenTree* op1, GenTree* op2); + void setLclRelatedToSIMDIntrinsic(GenTree* tree); + bool areFieldsContiguous(GenTreeIndir* op1, GenTreeIndir* op2); + bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); + bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); + bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* CreateAddressNodeForSimdHWIntrinsicCreate(GenTree* tree, var_types simdBaseType, unsigned simdSize); // Get the size of the SIMD type in bytes @@ -9553,8 +9569,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set -// NOTE: These values are only reliable after -// the importing is completely finished. + // NOTE: These values are only reliable after + // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? @@ -9642,11 +9658,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX uint32_t preferredVectorByteLength; #endif // TARGET_XARCH -// optimize maximally and/or favor speed over size? + // optimize maximally and/or favor speed over size? -#define DEFAULT_MIN_OPTS_CODE_SIZE 60000 -#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 -#define DEFAULT_MIN_OPTS_BB_COUNT 2000 +#define DEFAULT_MIN_OPTS_CODE_SIZE 60000 +#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 +#define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 @@ -10067,7 +10083,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG -// clang-format off + // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ @@ -10137,7 +10153,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX STRESS_MODES #undef STRESS_MODE }; -// clang-format on + // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNamesW[STRESS_COUNT + 1]; @@ -10147,8 +10163,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #define MAX_STRESS_WEIGHT 100 - bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); - bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); + bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); + bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); static unsigned compStressAreaHash(compStressArea area); #ifdef DEBUG @@ -10252,11 +10268,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). - bool compIsStatic : 1; // Is the method static (no 'this' pointer)? - bool compIsVarArgs : 1; // Does the method have varargs parameters? - bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? - bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback - bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic + bool compIsStatic : 1; // Is the method static (no 'this' pointer)? + bool compIsVarArgs : 1; // Does the method have varargs parameters? + bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? + bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback + bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL (including SIMD normalization) @@ -10376,7 +10392,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX } } #endif // TARGET_ARM64 - // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. + // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) @@ -10465,7 +10481,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX var_types TypeHandleToVarType(CORINFO_CLASS_HANDLE handle, ClassLayout** pLayout = nullptr); var_types TypeHandleToVarType(CorInfoType jitType, CORINFO_CLASS_HANDLE handle, ClassLayout** pLayout = nullptr); -//-------------------------- Global Compiler Data ------------------------------------ + //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: @@ -10573,8 +10589,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX //------------ Some utility functions -------------- - void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ - void** ppIndirection); /* OUT */ + void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ + void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the @@ -10589,17 +10605,17 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void compDoComponentUnitTestsOnce(); #endif // DEBUG - int compCompile(CORINFO_MODULE_HANDLE classPtr, - void** methodCodePtr, - uint32_t* methodCodeSize, - JitFlags* compileFlags); + int compCompile(CORINFO_MODULE_HANDLE classPtr, + void** methodCodePtr, + uint32_t* methodCodeSize, + JitFlags* compileFlags); void compCompileFinish(); - int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, - COMP_HANDLE compHnd, - CORINFO_METHOD_INFO* methodInfo, - void** methodCodePtr, - uint32_t* methodCodeSize, - JitFlags* compileFlag); + int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, + COMP_HANDLE compHnd, + CORINFO_METHOD_INFO* methodInfo, + void** methodCodePtr, + uint32_t* methodCodeSize, + JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); @@ -10690,10 +10706,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); - VarName compVarName(regNumber reg, bool isFloatReg = false); + VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); - void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); - void compDspSrcLinesByLineNum(unsigned line, bool seek = false); + void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); + void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG const char* compRegNameForSize(regNumber reg, size_t size); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); @@ -10865,8 +10881,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); - void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); - void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); + void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); + void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfoForLocal(unsigned lclNum); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, @@ -10963,8 +10979,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk -#define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. - // This can be overwritten by setting DOTNET_JITInlineSize env variable. +#define DEFAULT_MAX_INLINE_SIZE \ + 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. + // This can be overwritten by setting DOTNET_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined @@ -11148,7 +11165,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // defined(UNIX_AMD64_ABI) - void fgMorphMultiregStructArgs(GenTreeCall* call); + void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(CallArg* arg); bool killGCRefs(GenTree* tree); @@ -11303,7 +11320,9 @@ class GenTreeVisitor Compiler* m_compiler; ArrayStack m_ancestors; - GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) + GenTreeVisitor(Compiler* compiler) + : m_compiler(compiler) + , m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); @@ -11726,7 +11745,8 @@ class DomTreeVisitor protected: Compiler* m_compiler; - DomTreeVisitor(Compiler* compiler) : m_compiler(compiler) + DomTreeVisitor(Compiler* compiler) + : m_compiler(compiler) { } @@ -11815,7 +11835,8 @@ class EHClauses EHblkDsc* m_ehDsc; public: - iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) + iterator(EHblkDsc* ehDsc) + : m_ehDsc(ehDsc) { } @@ -11837,7 +11858,9 @@ class EHClauses }; public: - EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) + EHClauses(Compiler* comp) + : m_begin(comp->compHndBBtab) + , m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } @@ -11874,7 +11897,9 @@ class StringPrinter public: StringPrinter(CompAllocator alloc, char* buffer = nullptr, size_t bufferMax = 0) - : m_alloc(alloc), m_buffer(buffer), m_bufferMax(bufferMax) + : m_alloc(alloc) + , m_buffer(buffer) + , m_bufferMax(bufferMax) { if ((m_buffer == nullptr) || (m_bufferMax == 0)) { diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 82e0c47b099af..b1329e88b0436 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -78,9 +78,9 @@ inline T genFindLowestBit(T value) } /***************************************************************************** -* -* Return true if the given value has exactly zero or one bits set. -*/ + * + * Return true if the given value has exactly zero or one bits set. + */ template inline bool genMaxOneBit(T value) @@ -89,9 +89,9 @@ inline bool genMaxOneBit(T value) } /***************************************************************************** -* -* Return true if the given value has exactly one bit set. -*/ + * + * Return true if the given value has exactly one bit set. + */ template inline bool genExactlyOneBit(T value) @@ -280,7 +280,8 @@ class Counter : public Dumpable public: int64_t Value; - Counter(int64_t initialValue = 0) : Value(initialValue) + Counter(int64_t initialValue = 0) + : Value(initialValue) { } @@ -332,7 +333,8 @@ class Histogram : public Dumpable class NodeCounts : public Dumpable { public: - NodeCounts() : m_counts() + NodeCounts() + : m_counts() { } @@ -544,7 +546,7 @@ BasicBlockVisit BasicBlock::VisitEHEnclosedHandlerSecondPassSuccs(Compiler* comp // 3. As part of two pass EH, control may bypass filters and flow directly to // filter-handlers // -template +template static BasicBlockVisit VisitEHSuccs(Compiler* comp, BasicBlock* block, TFunc func) { if (!block->HasPotentialEHSuccs(comp)) @@ -1273,8 +1275,8 @@ inline Statement* Compiler::gtNewStmt(GenTree* expr, const DebugInfo& di) inline GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1) { assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0); - assert((GenTree::OperKind(oper) & GTK_EXOP) == - 0); // Can't use this to construct any types that extend unary/binary operator. + assert((GenTree::OperKind(oper) & GTK_EXOP) == 0); // Can't use this to construct any types that extend unary/binary + // operator. assert(op1 != nullptr || oper == GT_RETFILT || (oper == GT_RETURN && type == TYP_VOID)); GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, nullptr); @@ -1320,7 +1322,7 @@ inline GenTreeIntCon* Compiler::gtNewIconHandleNode(size_t value, GenTreeFlags f node = new (this, LargeOpOpcode()) GenTreeIntCon(gtGetTypeForIconFlags(flags), value, fields DEBUGARG(/*largeNode*/ true)); #else - node = new (this, GT_CNS_INT) GenTreeIntCon(gtGetTypeForIconFlags(flags), value, fields); + node = new (this, GT_CNS_INT) GenTreeIntCon(gtGetTypeForIconFlags(flags), value, fields); #endif node->gtFlags |= flags; return node; @@ -2520,8 +2522,8 @@ inline assert(varDsc->lvIsParam); #endif // UNIX_AMD64_ABI #else // !TARGET_AMD64 - // For other targets, a stack parameter that is enregistered or prespilled - // for profiling on ARM will have a stack location. + // For other targets, a stack parameter that is enregistered or prespilled + // for profiling on ARM will have a stack location. assert((varDsc->lvIsParam && !varDsc->lvIsRegArg) || isPrespilledArg); #endif // !TARGET_AMD64 } @@ -2609,7 +2611,7 @@ inline #ifdef TARGET_ARM varOffset = codeGen->genCallerSPtoInitialSPdelta() - codeGen->genCallerSPtoFPdelta(); #else - varOffset = -(codeGen->genTotalFrameSize()); + varOffset = -(codeGen->genTotalFrameSize()); #endif } } @@ -2663,7 +2665,7 @@ inline *pBaseReg = REG_SPBASE; } #else - *pFPbased = FPbased; + *pFPbased = FPbased; #endif return varOffset; @@ -4782,7 +4784,6 @@ unsigned Compiler::fgRunDfs(VisitPreorder visitPreorder, VisitPostorder visitPos ArrayStack blocks(getAllocator(CMK_DepthFirstSearch)); auto dfsFrom = [&](BasicBlock* firstBB) { - BitVecOps::AddElemD(&traits, visited, firstBB->bbNum); blocks.Emplace(this, firstBB); visitPreorder(firstBB, preOrderIndex++); @@ -4808,7 +4809,6 @@ unsigned Compiler::fgRunDfs(VisitPreorder visitPreorder, VisitPostorder visitPos visitPostorder(block, postOrderIndex++); } } - }; dfsFrom(fgFirstBB); @@ -4853,7 +4853,7 @@ template BasicBlockVisit FlowGraphNaturalLoop::VisitLoopBlocksReversePostOrder(TFunc func) { BitVecTraits traits(m_blocksSize, m_dfsTree->GetCompiler()); - bool result = BitVecOps::VisitBits(&traits, m_blocks, [=](unsigned index) { + bool result = BitVecOps::VisitBits(&traits, m_blocks, [=](unsigned index) { // head block rpo index = PostOrderCount - 1 - headPreOrderIndex // loop block rpo index = head block rpoIndex + index // loop block po index = PostOrderCount - 1 - loop block rpo index @@ -4885,7 +4885,7 @@ template BasicBlockVisit FlowGraphNaturalLoop::VisitLoopBlocksPostOrder(TFunc func) { BitVecTraits traits(m_blocksSize, m_dfsTree->GetCompiler()); - bool result = BitVecOps::VisitBitsReverse(&traits, m_blocks, [=](unsigned index) { + bool result = BitVecOps::VisitBitsReverse(&traits, m_blocks, [=](unsigned index) { unsigned poIndex = m_header->bbPostorderNum - index; assert(poIndex < m_dfsTree->GetPostOrderCount()); return func(m_dfsTree->GetPostOrder(poIndex)) == BasicBlockVisit::Continue; diff --git a/src/coreclr/jit/compilerbitsettraits.h b/src/coreclr/jit/compilerbitsettraits.h index 02223b1ecedfc..965ffac55465e 100644 --- a/src/coreclr/jit/compilerbitsettraits.h +++ b/src/coreclr/jit/compilerbitsettraits.h @@ -107,7 +107,9 @@ struct BitVecTraits Compiler* comp; public: - BitVecTraits(unsigned size, Compiler* comp) : size(size), comp(comp) + BitVecTraits(unsigned size, Compiler* comp) + : size(size) + , comp(comp) { const unsigned elemBits = 8 * sizeof(size_t); arraySize = roundUp(size, elemBits) / elemBits; diff --git a/src/coreclr/jit/copyprop.cpp b/src/coreclr/jit/copyprop.cpp index 90a593ef65b2f..142c745fc7c31 100644 --- a/src/coreclr/jit/copyprop.cpp +++ b/src/coreclr/jit/copyprop.cpp @@ -462,7 +462,9 @@ PhaseStatus Compiler::optVnCopyProp() public: CopyPropDomTreeVisitor(Compiler* compiler) - : DomTreeVisitor(compiler), m_curSsaName(compiler->getAllocator(CMK_CopyProp)), m_madeChanges(false) + : DomTreeVisitor(compiler) + , m_curSsaName(compiler->getAllocator(CMK_CopyProp)) + , m_madeChanges(false) { } diff --git a/src/coreclr/jit/debuginfo.h b/src/coreclr/jit/debuginfo.h index 3f628840765dc..72119b905c948 100644 --- a/src/coreclr/jit/debuginfo.h +++ b/src/coreclr/jit/debuginfo.h @@ -12,12 +12,17 @@ class InlineContext; class ILLocation { public: - ILLocation() : m_offset(BAD_IL_OFFSET), m_isStackEmpty(false), m_isCall(false) + ILLocation() + : m_offset(BAD_IL_OFFSET) + , m_isStackEmpty(false) + , m_isCall(false) { } ILLocation(IL_OFFSET offset, bool isStackEmpty, bool isCall) - : m_offset(offset), m_isStackEmpty(isStackEmpty), m_isCall(isCall) + : m_offset(offset) + , m_isStackEmpty(isStackEmpty) + , m_isCall(isCall) { } @@ -65,18 +70,21 @@ class ILLocation private: IL_OFFSET m_offset; bool m_isStackEmpty : 1; - bool m_isCall : 1; + bool m_isCall : 1; }; // Represents debug information about a statement. class DebugInfo { public: - DebugInfo() : m_inlineContext(nullptr) + DebugInfo() + : m_inlineContext(nullptr) { } - DebugInfo(InlineContext* inlineContext, ILLocation loc) : m_inlineContext(inlineContext), m_location(loc) + DebugInfo(InlineContext* inlineContext, ILLocation loc) + : m_inlineContext(inlineContext) + , m_location(loc) { } diff --git a/src/coreclr/jit/decomposelongs.h b/src/coreclr/jit/decomposelongs.h index b8ddc62107992..744061091e42b 100644 --- a/src/coreclr/jit/decomposelongs.h +++ b/src/coreclr/jit/decomposelongs.h @@ -18,7 +18,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX class DecomposeLongs { public: - DecomposeLongs(Compiler* compiler) : m_compiler(compiler) + DecomposeLongs(Compiler* compiler) + : m_compiler(compiler) { } @@ -72,7 +73,7 @@ class DecomposeLongs GenTree* RepresentOpAsLocalVar(GenTree* op, GenTree* user, GenTree** edge); GenTree* EnsureIntSized(GenTree* node, bool signExtend); - GenTree* StoreNodeToVar(LIR::Use& use); + GenTree* StoreNodeToVar(LIR::Use& use); static genTreeOps GetHiOper(genTreeOps oper); static genTreeOps GetLoOper(genTreeOps oper); diff --git a/src/coreclr/jit/disasm.cpp b/src/coreclr/jit/disasm.cpp index 2a49f9d8cb55c..bff93c85150a6 100644 --- a/src/coreclr/jit/disasm.cpp +++ b/src/coreclr/jit/disasm.cpp @@ -1,12 +1,12 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*********************************************************************** -* -* File: disasm.cpp -* -* This file handles disassembly for the "late disassembler". -* -***********************************************************************/ + * + * File: disasm.cpp + * + * This file handles disassembly for the "late disassembler". + * + ***********************************************************************/ #include "jitpch.h" #ifdef _MSC_VER @@ -23,7 +23,7 @@ FILE* g_disAsmFileCorDisTools; #endif // USE_COREDISTOOLS // Define DISASM_DEBUG to get verbose output of late disassembler inner workings. -//#define DISASM_DEBUG +// #define DISASM_DEBUG #ifdef DISASM_DEBUG #ifdef DEBUG #define DISASM_DUMP(...) \ @@ -96,12 +96,12 @@ typedef struct codeFix { codeFix* cfNext; unsigned cfFixup; -} * codeFixPtr; +}* codeFixPtr; typedef struct codeBlk { codeFix* cbFixupLst; -} * codeBlkPtr; +}* codeBlkPtr; #ifdef USE_MSVCDIS @@ -139,7 +139,7 @@ size_t DisAssembler::disCchAddrMember( switch (terminationType) { - // int disCallSize; + // int disCallSize; case DISX86::trmtaJmpShort: case DISX86::trmtaJmpCcShort: @@ -228,7 +228,7 @@ size_t DisAssembler::disCchAddrMember( switch (terminationType) { - // int disCallSize; + // int disCallSize; case DISARM64::TRMTA::trmtaBra: case DISARM64::TRMTA::trmtaBraCase: @@ -620,7 +620,7 @@ size_t DisAssembler::disCchRegRelMember( case DISX86::trmtaFallThrough: - /* some instructions like division have a TRAP termination type - ignore it */ + /* some instructions like division have a TRAP termination type - ignore it */ case DISX86::trmtaTrap: case DISX86::trmtaTrapCc: @@ -715,7 +715,7 @@ size_t DisAssembler::disCchRegRelMember( case DISARM64::TRMTA::trmtaFallThrough: - /* some instructions like division have a TRAP termination type - ignore it */ + /* some instructions like division have a TRAP termination type - ignore it */ case DISARM64::TRMTA::trmtaTrap: case DISARM64::TRMTA::trmtaTrapCc: @@ -1261,7 +1261,7 @@ void DisAssembler::DisasmBuffer(FILE* pfile, bool printit) #elif defined(TARGET_AMD64) pdis = DIS::PdisNew(DIS::distX8664); #elif defined(TARGET_ARM64) - pdis = DIS::PdisNew(DIS::distArm64); + pdis = DIS::PdisNew(DIS::distArm64); #else // TARGET* #error Unsupported or unset target architecture #endif @@ -1340,7 +1340,7 @@ void DisAssembler::DisasmBuffer(FILE* pfile, bool printit) #else false // Display code bytes? #endif - ); + ); ibCur += (unsigned)cb; } @@ -1680,7 +1680,7 @@ bool DisAssembler::InitCoredistoolsLibrary() s_disCoreDisToolsLibraryLoadSuccessful = true; // We made it! -// done initializing + // done initializing FinishedInitializing: InterlockedExchange(&s_disCoreDisToolsLibraryInitializing, 0); // unlock initialization @@ -1703,7 +1703,7 @@ bool DisAssembler::InitCoredistoolsDisasm() #if defined(TARGET_ARM64) coreDisTargetArchitecture = Target_Arm64; #elif defined(TARGET_ARM) - coreDisTargetArchitecture = Target_Thumb; + coreDisTargetArchitecture = Target_Thumb; #elif defined(TARGET_X86) coreDisTargetArchitecture = Target_X86; #elif defined(TARGET_AMD64) diff --git a/src/coreclr/jit/ee_il_dll.cpp b/src/coreclr/jit/ee_il_dll.cpp index cfe50b492bb32..b33e6eed17bbc 100644 --- a/src/coreclr/jit/ee_il_dll.cpp +++ b/src/coreclr/jit/ee_il_dll.cpp @@ -211,7 +211,9 @@ void SetJitTls(void* value) #if defined(DEBUG) -JitTls::JitTls(ICorJitInfo* jitInfo) : m_compiler(nullptr), m_logEnv(jitInfo) +JitTls::JitTls(ICorJitInfo* jitInfo) + : m_compiler(nullptr) + , m_logEnv(jitInfo) { m_next = reinterpret_cast(GetJitTls()); SetJitTls(this); @@ -1407,7 +1409,9 @@ bool Compiler::eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param) unsigned Compiler::eeTryGetClassSize(CORINFO_CLASS_HANDLE clsHnd) { unsigned classSize = UINT_MAX; - eeRunFunctorWithSPMIErrorTrap([&]() { classSize = info.compCompHnd->getClassSize(clsHnd); }); + eeRunFunctorWithSPMIErrorTrap([&]() { + classSize = info.compCompHnd->getClassSize(clsHnd); + }); return classSize; } diff --git a/src/coreclr/jit/ee_il_dll.hpp b/src/coreclr/jit/ee_il_dll.hpp index c3801d88292f5..d676ba8caa479 100644 --- a/src/coreclr/jit/ee_il_dll.hpp +++ b/src/coreclr/jit/ee_il_dll.hpp @@ -10,12 +10,12 @@ class CILJit : public ICorJitCompiler unsigned flags, /* IN */ uint8_t** nativeEntry, /* OUT */ uint32_t* nativeSizeOfCode /* OUT */ - ); + ); void ProcessShutdownWork(ICorStaticInfo* statInfo); void getVersionIdentifier(GUID* versionIdentifier /* OUT */ - ); + ); void setTargetOS(CORINFO_OS os); }; diff --git a/src/coreclr/jit/eeinterface.cpp b/src/coreclr/jit/eeinterface.cpp index 0578dee4109ef..a6552c2194294 100644 --- a/src/coreclr/jit/eeinterface.cpp +++ b/src/coreclr/jit/eeinterface.cpp @@ -210,7 +210,7 @@ void Compiler::eePrintTypeOrJitAlias(StringPrinter* printer, CORINFO_CLASS_HANDL } static const char* s_jitHelperNames[CORINFO_HELP_COUNT] = { -#define JITHELPER(code, pfnHelper, sig) #code, +#define JITHELPER(code, pfnHelper, sig) #code, #define DYNAMICJITHELPER(code, pfnHelper, sig) #code, #include "jithelpers.h" }; @@ -403,10 +403,9 @@ const char* Compiler::eeGetMethodFullName( CORINFO_SIG_INFO sig; eeGetMethodSig(hnd, &sig); eePrintMethod(&p, clsHnd, hnd, &sig, - /* includeClassInstantiation */ true, - /* includeMethodInstantiation */ true, - /* includeSignature */ true, includeReturnType, includeThisSpecifier); - + /* includeClassInstantiation */ true, + /* includeMethodInstantiation */ true, + /* includeSignature */ true, includeReturnType, includeThisSpecifier); }); if (success) @@ -475,13 +474,12 @@ const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE methHnd, char* buffe StringPrinter p(getAllocator(CMK_DebugOnly), buffer, bufferSize); bool success = eeRunFunctorWithSPMIErrorTrap([&]() { eePrintMethod(&p, NO_CLASS_HANDLE, methHnd, - /* sig */ nullptr, - /* includeClassInstantiation */ false, - /* includeMethodInstantiation */ false, - /* includeSignature */ false, - /* includeReturnType */ false, - /* includeThisSpecifier */ false); - + /* sig */ nullptr, + /* includeClassInstantiation */ false, + /* includeMethodInstantiation */ false, + /* includeSignature */ false, + /* includeReturnType */ false, + /* includeThisSpecifier */ false); }); if (!success) @@ -512,7 +510,9 @@ const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE methHnd, char* buffe const char* Compiler::eeGetFieldName(CORINFO_FIELD_HANDLE fldHnd, bool includeType, char* buffer, size_t bufferSize) { StringPrinter p(getAllocator(CMK_DebugOnly), buffer, bufferSize); - bool success = eeRunFunctorWithSPMIErrorTrap([&]() { eePrintField(&p, fldHnd, includeType); }); + bool success = eeRunFunctorWithSPMIErrorTrap([&]() { + eePrintField(&p, fldHnd, includeType); + }); if (success) { @@ -525,7 +525,9 @@ const char* Compiler::eeGetFieldName(CORINFO_FIELD_HANDLE fldHnd, bool includeTy { p.Append(":"); - success = eeRunFunctorWithSPMIErrorTrap([&]() { eePrintField(&p, fldHnd, false); }); + success = eeRunFunctorWithSPMIErrorTrap([&]() { + eePrintField(&p, fldHnd, false); + }); if (success) { @@ -560,7 +562,9 @@ const char* Compiler::eeGetFieldName(CORINFO_FIELD_HANDLE fldHnd, bool includeTy const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd, char* buffer, size_t bufferSize) { StringPrinter printer(getAllocator(CMK_DebugOnly), buffer, bufferSize); - if (!eeRunFunctorWithSPMIErrorTrap([&]() { eePrintType(&printer, clsHnd, true); })) + if (!eeRunFunctorWithSPMIErrorTrap([&]() { + eePrintType(&printer, clsHnd, true); + })) { printer.Truncate(0); printer.Append(""); @@ -581,7 +585,9 @@ const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd, char* buffer, const char* Compiler::eeGetShortClassName(CORINFO_CLASS_HANDLE clsHnd) { StringPrinter printer(getAllocator(CMK_DebugOnly)); - if (!eeRunFunctorWithSPMIErrorTrap([&]() { eePrintType(&printer, clsHnd, false); })) + if (!eeRunFunctorWithSPMIErrorTrap([&]() { + eePrintType(&printer, clsHnd, false); + })) { printer.Truncate(0); printer.Append(""); @@ -597,8 +603,9 @@ void Compiler::eePrintObjectDescription(const char* prefix, CORINFO_OBJECT_HANDL size_t actualLen = 0; // Ignore potential SPMI failures - bool success = eeRunFunctorWithSPMIErrorTrap( - [&]() { actualLen = this->info.compCompHnd->printObjectDescription(handle, str, maxStrSize); }); + bool success = eeRunFunctorWithSPMIErrorTrap([&]() { + actualLen = this->info.compCompHnd->printObjectDescription(handle, str, maxStrSize); + }); if (!success) { diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 5e3736786f0d5..cabad877f8383 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -787,7 +787,7 @@ void emitter::emitGenIG(insGroup* ig) IMPL_LIMITATION("Too many arguments pushed on stack"); } -// printf("Start IG #%02u [stk=%02u]\n", ig->igNum, emitCurStackLvl); + // printf("Start IG #%02u [stk=%02u]\n", ig->igNum, emitCurStackLvl); #endif @@ -1205,7 +1205,7 @@ void emitter::emitBegFN(bool hasFramePtr , bool chkAlign #endif - ) +) { insGroup* ig; @@ -1612,7 +1612,7 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) #if defined(FEATURE_EH_FUNCLETS) && !emitIGisInFuncletProlog(emitCurIG) && !emitIGisInFuncletEpilog(emitCurIG) #endif // FEATURE_EH_FUNCLETS - ) + ) { emitNxtIG(true); } @@ -1627,7 +1627,7 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) !emitIGisInProlog(emitCurIG) && // don't do this in prolog or epilog !emitIGisInEpilog(emitCurIG) && emitRandomNops // sometimes we turn off where exact codegen is needed (pinvoke inline) - ) + ) { if (emitNextNop == 0) { @@ -1761,7 +1761,7 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) #ifndef TARGET_AMD64 && emitComp->opts.compReloc #endif // TARGET_AMD64 - ) + ) { /* Mark idInfo()->idDspReloc to remember that the */ /* address mode has a displacement that is relocatable */ @@ -2074,7 +2074,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, #if defined(FEATURE_EH_FUNCLETS) || igType == IGPT_FUNCLET_EPILOG #endif // FEATURE_EH_FUNCLETS - ) + ) { #ifdef TARGET_AMD64 emitOutputPreEpilogNOP(); @@ -2202,7 +2202,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, #if defined(FEATURE_EH_FUNCLETS) || igType == IGPT_FUNCLET_EPILOG #endif // FEATURE_EH_FUNCLETS - ) + ) { // If this was an epilog, then assume this is the end of any currently in progress // no-GC region. If a block after the epilog needs to be no-GC, it needs to call @@ -2509,11 +2509,11 @@ void emitter::emitEndFnEpilog() // because the only instruction is the last one and thus a slight // underestimation of the epilog size is harmless (since the EIP // can not be between instructions). - assert(emitEpilogCnt == 1 || - (emitExitSeqSize - newSize) <= 5 // delta between size of various forms of jmp (size is either 6 or 5), - // and various forms of ret (size is either 1 or 3). The combination can - // be anything between 1 and 5. - ); + assert(emitEpilogCnt == 1 || (emitExitSeqSize - newSize) <= 5 // delta between size of various forms of jmp + // (size is either 6 or 5), and various forms of + // ret (size is either 1 or 3). The combination + // can be anything between 1 and 5. + ); emitExitSeqSize = newSize; } #endif // JIT32_GCENCODER @@ -2825,11 +2825,11 @@ bool emitter::emitNoGChelper(CorInfoHelpFunc helpFunc) case CORINFO_HELP_LRSH: case CORINFO_HELP_LRSZ: -// case CORINFO_HELP_LMUL: -// case CORINFO_HELP_LDIV: -// case CORINFO_HELP_LMOD: -// case CORINFO_HELP_ULDIV: -// case CORINFO_HELP_ULMOD: + // case CORINFO_HELP_LMUL: + // case CORINFO_HELP_LDIV: + // case CORINFO_HELP_LMOD: + // case CORINFO_HELP_ULDIV: + // case CORINFO_HELP_ULMOD: #ifdef TARGET_X86 case CORINFO_HELP_ASSIGN_REF_EAX: @@ -2890,8 +2890,8 @@ bool emitter::emitNoGChelper(CORINFO_METHOD_HANDLE methHnd) * Mark the current spot as having a label. */ -void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars, - regMaskTP gcrefRegs, +void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars, + regMaskTP gcrefRegs, regMaskTP byrefRegs DEBUG_ARG(BasicBlock* block)) { /* Create a new IG if the current one is non-empty */ @@ -3088,7 +3088,7 @@ void emitter::emitSplit(emitLocation* startLoc, return; } -// Report it! + // Report it! #ifdef DEBUG if (EMITVERBOSE) @@ -3605,7 +3605,7 @@ emitter::instrDesc* emitter::emitNewInstrCallInd(int argCnt, VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMaskTP byrefRegs, - emitAttr retSizeIn + emitAttr retSizeIn MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize)) { emitAttr retSize = (retSizeIn != EA_UNKNOWN) ? retSizeIn : EA_PTRSIZE; @@ -3688,7 +3688,7 @@ emitter::instrDesc* emitter::emitNewInstrCallDir(int argCnt, VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMaskTP byrefRegs, - emitAttr retSizeIn + emitAttr retSizeIn MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize)) { emitAttr retSize = (retSizeIn != EA_UNKNOWN) ? retSizeIn : EA_PTRSIZE; @@ -3912,8 +3912,8 @@ void emitter::emitDispRegPtrListDelta() // Dump any deltas in regPtrDsc's for outgoing args; these aren't captured in the other sets. if (debugPrevRegPtrDsc != codeGen->gcInfo.gcRegPtrLast) { - for (regPtrDsc* dsc = (debugPrevRegPtrDsc == nullptr) ? codeGen->gcInfo.gcRegPtrList - : debugPrevRegPtrDsc->rpdNext; + for (regPtrDsc* dsc = (debugPrevRegPtrDsc == nullptr) ? codeGen->gcInfo.gcRegPtrList + : debugPrevRegPtrDsc->rpdNext; dsc != nullptr; dsc = dsc->rpdNext) { // The non-arg regPtrDscs are reflected in the register sets debugPrevGCrefRegs/emitThisGCrefRegs @@ -4397,7 +4397,7 @@ size_t emitter::emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp) ig->igPerfScore += insPerfScore; #endif // defined(DEBUG) || defined(LATE_DISASM) -// printf("[S=%02u]\n", emitCurStackLvl); + // printf("[S=%02u]\n", emitCurStackLvl); #if EMIT_TRACK_STACK_DEPTH @@ -4559,7 +4559,7 @@ void emitter::emitDispCommentForHandle(size_t handle, size_t cookie, GenTreeFlag #ifdef DEBUG emitComp->eePrintObjectDescription(commentPrefix, (CORINFO_OBJECT_HANDLE)handle); #else - str = "frozen object handle"; + str = "frozen object handle"; #endif } else if (flag == GTF_ICON_CLASS_HDL) @@ -4870,9 +4870,9 @@ void emitter::emitJumpDistBind() int jmp_iteration = 1; -/*****************************************************************************/ -/* If we iterate to look for more jumps to shorten, we start again here. */ -/*****************************************************************************/ + /*****************************************************************************/ + /* If we iterate to look for more jumps to shorten, we start again here. */ + /*****************************************************************************/ AGAIN: @@ -4880,10 +4880,10 @@ void emitter::emitJumpDistBind() emitCheckIGList(); #endif -/* - In the following loop we convert all jump targets from "BasicBlock *" - to "insGroup *" values. We also estimate which jumps will be short. - */ + /* + In the following loop we convert all jump targets from "BasicBlock *" + to "insGroup *" values. We also estimate which jumps will be short. + */ #ifdef DEBUG insGroup* lastIG = nullptr; @@ -5023,7 +5023,7 @@ void emitter::emitJumpDistBind() } #endif // TARGET_ARM64 -/* Make sure the jumps are properly ordered */ + /* Make sure the jumps are properly ordered */ #ifdef DEBUG assert(lastLJ == nullptr || lastIG != jmp->idjIG || lastLJ->idjOffs < jmp->idjOffs); @@ -5427,9 +5427,9 @@ void emitter::emitJumpDistBind() continue; - /*****************************************************************************/ - /* Handle conversion to short jump */ - /*****************************************************************************/ + /*****************************************************************************/ + /* Handle conversion to short jump */ + /*****************************************************************************/ SHORT_JMP: @@ -5469,9 +5469,9 @@ void emitter::emitJumpDistBind() #if defined(TARGET_ARM) - /*****************************************************************************/ - /* Handle conversion to medium jump */ - /*****************************************************************************/ + /*****************************************************************************/ + /* Handle conversion to medium jump */ + /*****************************************************************************/ MEDIUM_JMP: @@ -5496,7 +5496,7 @@ void emitter::emitJumpDistBind() #endif // TARGET_ARM - /*****************************************************************************/ + /*****************************************************************************/ NEXT_JMP: @@ -5572,7 +5572,7 @@ void emitter::emitJumpDistBind() #if defined(TARGET_ARM) || (minMediumExtra <= adjIG) #endif // TARGET_ARM - ) + ) { jmp_iteration++; @@ -5827,8 +5827,8 @@ bool emitter::emitEndsWithAlignInstr() // Returns: size of a loop in bytes. // unsigned emitter::getLoopSize(insGroup* igLoopHeader, - unsigned maxLoopSize // - DEBUG_ARG(bool isAlignAdjusted) // + unsigned maxLoopSize // + DEBUG_ARG(bool isAlignAdjusted) // DEBUG_ARG(UNATIVE_OFFSET containingIGNum) // DEBUG_ARG(UNATIVE_OFFSET loopHeadPredIGNum)) { @@ -6236,7 +6236,7 @@ void emitter::emitLoopAlignAdjustments() } #endif // TARGET_XARCH & TARGET_ARM64 #endif // DEBUG - // Adjust the padding amount in all align instructions in this IG + // Adjust the padding amount in all align instructions in this IG instrDescAlign *alignInstrToAdj = alignInstr, *prevAlignInstr = nullptr; for (; alignInstrToAdj != nullptr && alignInstrToAdj->idaIG == alignInstr->idaIG; alignInstrToAdj = alignInstrToAdj->idaNext) @@ -6332,7 +6332,7 @@ void emitter::emitLoopAlignAdjustments() // 3b. If the loop already fits in minimum alignmentBoundary blocks, then return 0. // already best aligned // 3c. return paddingNeeded. // -unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* loopHeadIG, +unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* loopHeadIG, size_t offset DEBUG_ARG(bool isAlignAdjusted) DEBUG_ARG(UNATIVE_OFFSET containingIGNum) DEBUG_ARG(UNATIVE_OFFSET loopHeadPredIGNum)) @@ -6673,18 +6673,18 @@ void emitter::emitComputeCodeSizes() // Returns: // size of the method code, in bytes // -unsigned emitter::emitEndCodeGen(Compiler* comp, - bool contTrkPtrLcls, - bool fullyInt, - bool fullPtrMap, - unsigned xcptnsCount, - unsigned* prologSize, - unsigned* epilogSize, - void** codeAddr, - void** codeAddrRW, - void** coldCodeAddr, - void** coldCodeAddrRW, - void** consAddr, +unsigned emitter::emitEndCodeGen(Compiler* comp, + bool contTrkPtrLcls, + bool fullyInt, + bool fullPtrMap, + unsigned xcptnsCount, + unsigned* prologSize, + unsigned* epilogSize, + void** codeAddr, + void** codeAddrRW, + void** coldCodeAddr, + void** coldCodeAddrRW, + void** consAddr, void** consAddrRW DEBUGARG(unsigned* instrCount)) { #ifdef DEBUG @@ -7116,7 +7116,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, assert(indx < emitComp->lvaTrackedCount); -// printf("Variable #%2u/%2u is at stack offset %d\n", num, indx, offs); + // printf("Variable #%2u/%2u is at stack offset %d\n", num, indx, offs); #ifdef JIT32_GCENCODER #ifndef FEATURE_EH_FUNCLETS @@ -8445,7 +8445,7 @@ void emitter::emitDispDataSec(dataSecDsc* section, BYTE* dst) printf("\tdd\t%08Xh", (uint32_t)(size_t)emitOffsetToPtr(ig->igOffs)); } #else // TARGET_64BIT - // We have a 64-BIT target + // We have a 64-BIT target if (emitComp->opts.disDiffable) { printf("\tdq\t%s\n", blockLabel); @@ -9042,7 +9042,7 @@ void emitter::emitGCregDeadSet(GCtype gcType, regMaskTP regMask, BYTE* addr) unsigned char emitter::emitOutputByte(BYTE* dst, ssize_t val) { - BYTE* dstRW = dst + writeableOffset; + BYTE* dstRW = dst + writeableOffset; *castto(dstRW, unsigned char*) = (unsigned char)val; #ifdef DEBUG @@ -9808,13 +9808,13 @@ cnsval_ssize_t emitter::emitGetInsSC(const instrDesc* id) const else #endif // TARGET_ARM if (id->idIsLargeCns()) - { - return ((instrDescCns*)id)->idcCnsVal; - } - else - { - return id->idSmallCns(); - } + { + return ((instrDescCns*)id)->idcCnsVal; + } + else + { + return id->idSmallCns(); + } } #ifdef TARGET_ARM @@ -9925,7 +9925,7 @@ void emitter::emitStackPop(BYTE* addr, bool isCall, unsigned char callInstrSize, #ifndef JIT32_GCENCODER || (emitComp->IsFullPtrRegMapRequired() && !emitComp->GetInterruptible() && isCall) #endif // JIT32_GCENCODER - ) + ) { emitStackPopLargeStk(addr, isCall, callInstrSize, 0); } @@ -10202,17 +10202,17 @@ void emitter::emitStackKillArgs(BYTE* addr, unsigned count, unsigned char callIn #ifdef DEBUG -void emitter::emitRecordRelocationHelp(void* location, /* IN */ - void* target, /* IN */ - uint16_t fRelocType, /* IN */ - const char* relocTypeName, /* IN */ +void emitter::emitRecordRelocationHelp(void* location, /* IN */ + void* target, /* IN */ + uint16_t fRelocType, /* IN */ + const char* relocTypeName, /* IN */ int32_t addlDelta /* = 0 */) /* IN */ #else // !DEBUG -void emitter::emitRecordRelocation(void* location, /* IN */ - void* target, /* IN */ - uint16_t fRelocType, /* IN */ +void emitter::emitRecordRelocation(void* location, /* IN */ + void* target, /* IN */ + uint16_t fRelocType, /* IN */ int32_t addlDelta /* = 0 */) /* IN */ #endif // !DEBUG diff --git a/src/coreclr/jit/emit.h b/src/coreclr/jit/emit.h index 48f5edeef728b..094720597ead7 100644 --- a/src/coreclr/jit/emit.h +++ b/src/coreclr/jit/emit.h @@ -118,7 +118,7 @@ inline const char* GCtypeStr(GCtype gcType) #if DEBUG_EMIT #define INTERESTING_JUMP_NUM -1 // set to 0 to see all jump info -//#define INTERESTING_JUMP_NUM 0 +// #define INTERESTING_JUMP_NUM 0 #endif /***************************************************************************** @@ -129,11 +129,15 @@ inline const char* GCtypeStr(GCtype gcType) class emitLocation { public: - emitLocation() : ig(nullptr), codePos(0) + emitLocation() + : ig(nullptr) + , codePos(0) { } - emitLocation(insGroup* _ig) : ig(_ig), codePos(0) + emitLocation(insGroup* _ig) + : ig(_ig) + , codePos(0) { } @@ -147,7 +151,9 @@ class emitLocation CaptureLocation(emit); } - emitLocation(void* emitCookie) : ig((insGroup*)emitCookie), codePos(0) + emitLocation(void* emitCookie) + : ig((insGroup*)emitCookie) + , codePos(0) { } @@ -286,20 +292,23 @@ struct insGroup insGroup* igLoopBackEdge; // "last" back-edge that branches back to an aligned loop head. #endif -#define IGF_GC_VARS 0x0001 // new set of live GC ref variables -#define IGF_BYREF_REGS 0x0002 // new set of live by-ref registers +#define IGF_GC_VARS 0x0001 // new set of live GC ref variables +#define IGF_BYREF_REGS 0x0002 // new set of live by-ref registers #define IGF_FUNCLET_PROLOG 0x0004 // this group belongs to a funclet prolog #define IGF_FUNCLET_EPILOG 0x0008 // this group belongs to a funclet epilog. -#define IGF_EPILOG 0x0010 // this group belongs to a main function epilog -#define IGF_NOGCINTERRUPT 0x0020 // this IG is in a no-interrupt region (prolog, epilog, etc.) -#define IGF_UPD_ISZ 0x0040 // some instruction sizes updated -#define IGF_PLACEHOLDER 0x0080 // this is a placeholder group, to be filled in later -#define IGF_EXTEND 0x0100 // this block is conceptually an extension of the previous block - // and the emitter should continue to track GC info as if there was no new block. -#define IGF_HAS_ALIGN 0x0200 // this group contains an alignment instruction(s) at the end to align either the next - // IG, or, if this IG contains with an unconditional branch, some subsequent IG. -#define IGF_REMOVED_ALIGN 0x0400 // IG was marked as having an alignment instruction(s), but was later unmarked - // without updating the IG's size/offsets. +#define IGF_EPILOG 0x0010 // this group belongs to a main function epilog +#define IGF_NOGCINTERRUPT 0x0020 // this IG is in a no-interrupt region (prolog, epilog, etc.) +#define IGF_UPD_ISZ 0x0040 // some instruction sizes updated +#define IGF_PLACEHOLDER 0x0080 // this is a placeholder group, to be filled in later +#define IGF_EXTEND \ + 0x0100 // this block is conceptually an extension of the previous block + // and the emitter should continue to track GC info as if there was no new block. +#define IGF_HAS_ALIGN \ + 0x0200 // this group contains an alignment instruction(s) at the end to align either the next + // IG, or, if this IG contains with an unconditional branch, some subsequent IG. +#define IGF_REMOVED_ALIGN \ + 0x0400 // IG was marked as having an alignment instruction(s), but was later unmarked + // without updating the IG's size/offsets. #define IGF_HAS_REMOVABLE_JMP 0x0800 // this group ends with an unconditional jump which is a candidate for removal #ifdef TARGET_ARM64 #define IGF_HAS_REMOVED_INSTR 0x1000 // this group has an instruction that was removed. @@ -325,7 +334,8 @@ struct insGroup regMaskSmall igGCregs; // set of registers with live GC refs #endif // !(REGMASK_BITS <= 32) - union { + union + { BYTE* igData; // addr of instruction descriptors insPlaceholderGroupData* igPhData; // when igFlags & IGF_PLACEHOLDER }; @@ -428,8 +438,8 @@ struct emitLclVarAddr // protected: unsigned _lvaVarNum : 15; // Usually the lvaVarNum - unsigned _lvaExtra : 15; // Usually the lvaOffset - unsigned _lvaTag : 2; // tag field to support larger varnums + unsigned _lvaExtra : 15; // Usually the lvaOffset + unsigned _lvaTag : 2; // tag field to support larger varnums }; enum idAddrUnionTag @@ -513,7 +523,7 @@ class emitter #ifdef TARGET_AMD64 OPSZP = OPSZ8, #else - OPSZP = OPSZ4, + OPSZP = OPSZ4, #endif }; @@ -522,7 +532,7 @@ class emitter static const emitAttr emitSizeDecode[]; static emitter::opSize emitEncodeSize(emitAttr size); - static emitAttr emitDecodeSize(emitter::opSize ensz); + static emitAttr emitDecodeSize(emitter::opSize ensz); // Currently, we only allow one IG for the prolog bool emitIGisInProlog(const insGroup* ig) @@ -570,10 +580,10 @@ class emitter #ifdef TARGET_XARCH -#define AM_DISP_BITS ((sizeof(unsigned) * 8) - 2 * (REGNUM_BITS + 1) - 2) +#define AM_DISP_BITS ((sizeof(unsigned) * 8) - 2 * (REGNUM_BITS + 1) - 2) #define AM_DISP_BIG_VAL (-(1 << (AM_DISP_BITS - 1))) -#define AM_DISP_MIN (-((1 << (AM_DISP_BITS - 1)) - 1)) -#define AM_DISP_MAX (+((1 << (AM_DISP_BITS - 1)) - 1)) +#define AM_DISP_MIN (-((1 << (AM_DISP_BITS - 1)) - 1)) +#define AM_DISP_MAX (+((1 << (AM_DISP_BITS - 1)) - 1)) struct emitAddrMode { @@ -643,9 +653,9 @@ class emitter static_assert_no_msg(IF_COUNT <= 128); insFormat _idInsFmt : 7; #elif defined(TARGET_LOONGARCH64) - unsigned _idCodeSize : 5; // the instruction(s) size of this instrDesc described. + unsigned _idCodeSize : 5; // the instruction(s) size of this instrDesc described. #elif defined(TARGET_RISCV64) - unsigned _idCodeSize : 6; // the instruction(s) size of this instrDesc described. + unsigned _idCodeSize : 6; // the instruction(s) size of this instrDesc described. #elif defined(TARGET_ARM64) static_assert_no_msg(IF_COUNT <= 1024); insFormat _idInsFmt : 10; @@ -685,7 +695,7 @@ class emitter { } #elif defined(TARGET_RISCV64) - insFormat idInsFmt() const + insFormat idInsFmt() const { NYI_RISCV64("idInsFmt-----unimplemented on RISCV64 yet----"); return (insFormat)0; @@ -695,7 +705,7 @@ class emitter NYI_RISCV64("idInsFmt-----unimplemented on RISCV64 yet----"); } #else - insFormat idInsFmt() const + insFormat idInsFmt() const { return _idInsFmt; } @@ -721,7 +731,7 @@ class emitter private: #if defined(TARGET_XARCH) unsigned _idCodeSize : 4; // size of instruction in bytes. Max size of an Intel instruction is 15 bytes. - opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16, 5=32 + opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16, 5=32 // At this point we have fully consumed first DWORD so that next field // doesn't cross a byte boundary. #elif defined(TARGET_ARM64) @@ -730,7 +740,7 @@ class emitter #elif defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) /* _idOpSize defined below. */ #else - opSize _idOpSize : 2; // operand size: 0=1 , 1=2 , 2=4 , 3=8 + opSize _idOpSize : 2; // operand size: 0=1 , 1=2 , 2=4 , 3=8 #endif // TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 // On Amd64, this is where the second DWORD begins @@ -763,9 +773,9 @@ class emitter // loongarch64: 28 bits // risc-v: 28 bits - unsigned _idSmallDsc : 1; // is this a "small" descriptor? - unsigned _idLargeCns : 1; // does a large constant follow? - unsigned _idLargeDsp : 1; // does a large displacement follow? + unsigned _idSmallDsc : 1; // is this a "small" descriptor? + unsigned _idLargeCns : 1; // does a large constant follow? + unsigned _idLargeDsp : 1; // does a large displacement follow? unsigned _idLargeCall : 1; // large call descriptor used // We have several pieces of information we need to encode but which are only applicable @@ -776,15 +786,15 @@ class emitter unsigned _idCustom2 : 1; unsigned _idCustom3 : 1; -#define _idBound _idCustom1 /* jump target / frame offset bound */ -#define _idTlsGD _idCustom2 /* Used to store information related to TLS GD access on linux */ -#define _idNoGC _idCustom3 /* Some helpers don't get recorded in GC tables */ +#define _idBound _idCustom1 /* jump target / frame offset bound */ +#define _idTlsGD _idCustom2 /* Used to store information related to TLS GD access on linux */ +#define _idNoGC _idCustom3 /* Some helpers don't get recorded in GC tables */ #define _idEvexAaaContext (_idCustom3 << 2) | (_idCustom2 << 1) | _idCustom1 /* bits used for the EVEX.aaa context */ #if !defined(TARGET_ARMARCH) unsigned _idCustom4 : 1; -#define _idCallRegPtr _idCustom4 /* IL indirect calls : addr in reg */ +#define _idCallRegPtr _idCustom4 /* IL indirect calls : addr in reg */ #define _idEvexZContext _idCustom4 /* bits used for the EVEX.z context */ #endif // !TARGET_ARMARCH @@ -798,12 +808,12 @@ class emitter #ifdef TARGET_ARM64 - unsigned _idLclVar : 1; // access a local on stack - unsigned _idLclVarPair : 1 // carries information for 2 GC lcl vars. + unsigned _idLclVar : 1; // access a local on stack + unsigned _idLclVarPair : 1 // carries information for 2 GC lcl vars. #endif #ifdef TARGET_LOONGARCH64 - // TODO-LoongArch64: maybe delete on future. + // TODO-LoongArch64: maybe delete on future. opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16 insOpts _idInsOpt : 6; // loongarch options for special: placeholders. e.g emitIns_R_C, also identifying the // accessing a local on stack. @@ -818,11 +828,11 @@ class emitter #endif #ifdef TARGET_ARM - insSize _idInsSize : 2; // size of instruction: 16, 32 or 48 bits - insFlags _idInsFlags : 1; // will this instruction set the flags - unsigned _idLclVar : 1; // access a local on stack + insSize _idInsSize : 2; // size of instruction: 16, 32 or 48 bits + insFlags _idInsFlags : 1; // will this instruction set the flags + unsigned _idLclVar : 1; // access a local on stack unsigned _idLclFPBase : 1; // access a local on stack - SP based offset - insOpts _idInsOpt : 3; // options for Load/Store instructions + insOpts _idInsOpt : 3; // options for Load/Store instructions #endif //////////////////////////////////////////////////////////////////////// @@ -892,7 +902,7 @@ class emitter #define ID_EXTRA_BITS (ID_EXTRA_RELOC_BITS + ID_EXTRA_BITFIELD_BITS + ID_EXTRA_PREV_OFFSET_BITS) -/* Use whatever bits are left over for small constants */ + /* Use whatever bits are left over for small constants */ #define ID_BIT_SMALL_CNS (32 - ID_EXTRA_BITS) C_ASSERT(ID_BIT_SMALL_CNS > 0); @@ -951,9 +961,10 @@ class emitter void checkSizes(); - union idAddrUnion { -// TODO-Cleanup: We should really add a DEBUG-only tag to this union so we can add asserts -// about reading what we think is here, to avoid unexpected corruption issues. + union idAddrUnion + { + // TODO-Cleanup: We should really add a DEBUG-only tag to this union so we can add asserts + // about reading what we think is here, to avoid unexpected corruption issues. #if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) emitLclVarAddr iiaLclVar; @@ -1857,137 +1868,137 @@ class emitter #define PERFSCORE_THROUGHPUT_1C 1.0f // Single Issue -#define PERFSCORE_THROUGHPUT_2C 2.0f // slower - 2 cycles -#define PERFSCORE_THROUGHPUT_3C 3.0f // slower - 3 cycles -#define PERFSCORE_THROUGHPUT_4C 4.0f // slower - 4 cycles -#define PERFSCORE_THROUGHPUT_5C 5.0f // slower - 5 cycles -#define PERFSCORE_THROUGHPUT_6C 6.0f // slower - 6 cycles -#define PERFSCORE_THROUGHPUT_7C 7.0f // slower - 7 cycles -#define PERFSCORE_THROUGHPUT_8C 8.0f // slower - 8 cycles -#define PERFSCORE_THROUGHPUT_9C 9.0f // slower - 9 cycles -#define PERFSCORE_THROUGHPUT_10C 10.0f // slower - 10 cycles -#define PERFSCORE_THROUGHPUT_11C 10.0f // slower - 10 cycles -#define PERFSCORE_THROUGHPUT_13C 13.0f // slower - 13 cycles -#define PERFSCORE_THROUGHPUT_14C 14.0f // slower - 13 cycles -#define PERFSCORE_THROUGHPUT_16C 16.0f // slower - 13 cycles -#define PERFSCORE_THROUGHPUT_19C 19.0f // slower - 19 cycles -#define PERFSCORE_THROUGHPUT_25C 25.0f // slower - 25 cycles -#define PERFSCORE_THROUGHPUT_33C 33.0f // slower - 33 cycles -#define PERFSCORE_THROUGHPUT_50C 50.0f // slower - 50 cycles -#define PERFSCORE_THROUGHPUT_52C 52.0f // slower - 52 cycles -#define PERFSCORE_THROUGHPUT_57C 57.0f // slower - 57 cycles +#define PERFSCORE_THROUGHPUT_2C 2.0f // slower - 2 cycles +#define PERFSCORE_THROUGHPUT_3C 3.0f // slower - 3 cycles +#define PERFSCORE_THROUGHPUT_4C 4.0f // slower - 4 cycles +#define PERFSCORE_THROUGHPUT_5C 5.0f // slower - 5 cycles +#define PERFSCORE_THROUGHPUT_6C 6.0f // slower - 6 cycles +#define PERFSCORE_THROUGHPUT_7C 7.0f // slower - 7 cycles +#define PERFSCORE_THROUGHPUT_8C 8.0f // slower - 8 cycles +#define PERFSCORE_THROUGHPUT_9C 9.0f // slower - 9 cycles +#define PERFSCORE_THROUGHPUT_10C 10.0f // slower - 10 cycles +#define PERFSCORE_THROUGHPUT_11C 10.0f // slower - 10 cycles +#define PERFSCORE_THROUGHPUT_13C 13.0f // slower - 13 cycles +#define PERFSCORE_THROUGHPUT_14C 14.0f // slower - 13 cycles +#define PERFSCORE_THROUGHPUT_16C 16.0f // slower - 13 cycles +#define PERFSCORE_THROUGHPUT_19C 19.0f // slower - 19 cycles +#define PERFSCORE_THROUGHPUT_25C 25.0f // slower - 25 cycles +#define PERFSCORE_THROUGHPUT_33C 33.0f // slower - 33 cycles +#define PERFSCORE_THROUGHPUT_50C 50.0f // slower - 50 cycles +#define PERFSCORE_THROUGHPUT_52C 52.0f // slower - 52 cycles +#define PERFSCORE_THROUGHPUT_57C 57.0f // slower - 57 cycles #define PERFSCORE_THROUGHPUT_140C 140.0f // slower - 140 cycles #define PERFSCORE_LATENCY_ILLEGAL -1024.0f #define PERFSCORE_LATENCY_ZERO 0.0f -#define PERFSCORE_LATENCY_1C 1.0f -#define PERFSCORE_LATENCY_2C 2.0f -#define PERFSCORE_LATENCY_3C 3.0f -#define PERFSCORE_LATENCY_4C 4.0f -#define PERFSCORE_LATENCY_5C 5.0f -#define PERFSCORE_LATENCY_6C 6.0f -#define PERFSCORE_LATENCY_7C 7.0f -#define PERFSCORE_LATENCY_8C 8.0f -#define PERFSCORE_LATENCY_9C 9.0f -#define PERFSCORE_LATENCY_10C 10.0f -#define PERFSCORE_LATENCY_11C 11.0f -#define PERFSCORE_LATENCY_12C 12.0f -#define PERFSCORE_LATENCY_13C 13.0f -#define PERFSCORE_LATENCY_14C 14.0f -#define PERFSCORE_LATENCY_15C 15.0f -#define PERFSCORE_LATENCY_16C 16.0f -#define PERFSCORE_LATENCY_18C 18.0f -#define PERFSCORE_LATENCY_20C 20.0f -#define PERFSCORE_LATENCY_22C 22.0f -#define PERFSCORE_LATENCY_23C 23.0f -#define PERFSCORE_LATENCY_26C 26.0f -#define PERFSCORE_LATENCY_62C 62.0f -#define PERFSCORE_LATENCY_69C 69.0f +#define PERFSCORE_LATENCY_1C 1.0f +#define PERFSCORE_LATENCY_2C 2.0f +#define PERFSCORE_LATENCY_3C 3.0f +#define PERFSCORE_LATENCY_4C 4.0f +#define PERFSCORE_LATENCY_5C 5.0f +#define PERFSCORE_LATENCY_6C 6.0f +#define PERFSCORE_LATENCY_7C 7.0f +#define PERFSCORE_LATENCY_8C 8.0f +#define PERFSCORE_LATENCY_9C 9.0f +#define PERFSCORE_LATENCY_10C 10.0f +#define PERFSCORE_LATENCY_11C 11.0f +#define PERFSCORE_LATENCY_12C 12.0f +#define PERFSCORE_LATENCY_13C 13.0f +#define PERFSCORE_LATENCY_14C 14.0f +#define PERFSCORE_LATENCY_15C 15.0f +#define PERFSCORE_LATENCY_16C 16.0f +#define PERFSCORE_LATENCY_18C 18.0f +#define PERFSCORE_LATENCY_20C 20.0f +#define PERFSCORE_LATENCY_22C 22.0f +#define PERFSCORE_LATENCY_23C 23.0f +#define PERFSCORE_LATENCY_26C 26.0f +#define PERFSCORE_LATENCY_62C 62.0f +#define PERFSCORE_LATENCY_69C 69.0f #define PERFSCORE_LATENCY_140C 140.0f #define PERFSCORE_LATENCY_400C 400.0f // Intel microcode issue with these instructions -#define PERFSCORE_LATENCY_BRANCH_DIRECT 1.0f // cost of an unconditional branch -#define PERFSCORE_LATENCY_BRANCH_COND 2.0f // includes cost of a possible misprediction +#define PERFSCORE_LATENCY_BRANCH_DIRECT 1.0f // cost of an unconditional branch +#define PERFSCORE_LATENCY_BRANCH_COND 2.0f // includes cost of a possible misprediction #define PERFSCORE_LATENCY_BRANCH_INDIRECT 2.0f // includes cost of a possible misprediction #if defined(TARGET_XARCH) // a read,write or modify from stack location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_2C -#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_2C +#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_2C +#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_2C #define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_5C // a read, write or modify from constant location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_2C -#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_2C +#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_2C +#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_2C #define PERFSCORE_LATENCY_RD_WR_CONST_ADDR PERFSCORE_LATENCY_5C // a read, write or modify from memory location, possible def to use latency from L0 or L1 cache // plus an extra cost (of 1.0) for a increased chance of a cache miss -#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_3C #define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_6C #elif defined(TARGET_ARM64) || defined(TARGET_ARM) // a read,write or modify from stack location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C // a read, write or modify from constant location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C #define PERFSCORE_LATENCY_RD_WR_CONST_ADDR PERFSCORE_LATENCY_3C // a read, write or modify from memory location, possible def to use latency from L0 or L1 cache // plus an extra cost (of 1.0) for a increased chance of a cache miss -#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C -#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C #elif defined(TARGET_LOONGARCH64) // a read,write or modify from stack location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C // a read, write or modify from constant location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C #define PERFSCORE_LATENCY_RD_WR_CONST_ADDR PERFSCORE_LATENCY_3C // a read, write or modify from memory location, possible def to use latency from L0 or L1 cache // plus an extra cost (of 1.0) for a increased chance of a cache miss -#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C -#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C #elif defined(TARGET_RISCV64) // a read,write or modify from stack location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C // a read, write or modify from constant location, possible def to use latency from L0 cache -#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C -#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C +#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C #define PERFSCORE_LATENCY_RD_WR_CONST_ADDR PERFSCORE_LATENCY_3C // a read, write or modify from memory location, possible def to use latency from L0 or L1 cache // plus an extra cost (of 1.0) for a increased chance of a cache miss -#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C -#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C -#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C +#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C +#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C #endif // TARGET_XXX // Make this an enum: // -#define PERFSCORE_MEMORY_NONE 0 -#define PERFSCORE_MEMORY_READ 1 -#define PERFSCORE_MEMORY_WRITE 2 +#define PERFSCORE_MEMORY_NONE 0 +#define PERFSCORE_MEMORY_READ 1 +#define PERFSCORE_MEMORY_WRITE 2 #define PERFSCORE_MEMORY_READ_WRITE 3 struct insExecutionCharacteristics @@ -2020,7 +2031,8 @@ class emitter instrDescJmp* idjNext; // next jump in the group/method insGroup* idjIG; // containing group - union { + union + { BYTE* idjAddr; // address of jump ins (for patching) } idjTemp; @@ -2043,7 +2055,7 @@ class emitter #else 30; #endif - unsigned idjShort : 1; // is the jump known to be a short one? + unsigned idjShort : 1; // is the jump known to be a short one? unsigned idjKeepLong : 1; // should the jump be kept long? (used for hot to cold and cold to hot jumps) }; @@ -2184,7 +2196,9 @@ class emitter alignas(alignof(T)) char idStorage[sizeof(T)]; public: - inlineInstrDesc() : idDebugInfo(nullptr), idStorage() + inlineInstrDesc() + : idDebugInfo(nullptr) + , idStorage() { static_assert_no_msg((offsetof(inlineInstrDesc, idStorage) - sizeof(instrDescDebugInfo*)) == offsetof(inlineInstrDesc, idDebugInfo)); @@ -2210,7 +2224,7 @@ class emitter #endif // TARGET_ARM insUpdateModes emitInsUpdateMode(instruction ins); - insFormat emitInsModeFormat(instruction ins, insFormat base); + insFormat emitInsModeFormat(instruction ins, insFormat base); static const BYTE emitInsModeFmtTab[]; #ifdef DEBUG @@ -2225,7 +2239,7 @@ class emitter ssize_t emitGetInsDsp(instrDesc* id); ssize_t emitGetInsAmd(instrDesc* id); - ssize_t emitGetInsCIdisp(instrDesc* id); + ssize_t emitGetInsCIdisp(instrDesc* id); unsigned emitGetInsCIargs(instrDesc* id); inline emitAttr emitGetMemOpSize(instrDesc* id) const; @@ -2238,7 +2252,7 @@ class emitter #endif // TARGET_XARCH cnsval_ssize_t emitGetInsSC(const instrDesc* id) const; - unsigned emitInsCount; + unsigned emitInsCount; /************************************************************************/ /* A few routines used for debug display purposes */ @@ -2264,11 +2278,11 @@ class emitter regMaskTP debugPrevGCrefRegs; regMaskTP debugPrevByrefRegs; void emitDispInsIndent(); - void emitDispGCDeltaTitle(const char* title); - void emitDispGCRegDelta(const char* title, regMaskTP prevRegs, regMaskTP curRegs); - void emitDispGCVarDelta(); - void emitDispRegPtrListDelta(); - void emitDispGCInfoDelta(); + void emitDispGCDeltaTitle(const char* title); + void emitDispGCRegDelta(const char* title, regMaskTP prevRegs, regMaskTP curRegs); + void emitDispGCVarDelta(); + void emitDispRegPtrListDelta(); + void emitDispGCInfoDelta(); void emitDispIGflags(unsigned flags); void emitDispIG(insGroup* ig, @@ -2325,7 +2339,9 @@ class emitter EpilogList* elNext; emitLocation elLoc; - EpilogList() : elNext(nullptr), elLoc() + EpilogList() + : elNext(nullptr) + , elLoc() { } }; @@ -2362,12 +2378,12 @@ class emitter /* Methods to record a code position and later convert to offset */ /************************************************************************/ - unsigned emitFindInsNum(const insGroup* ig, const instrDesc* id) const; + unsigned emitFindInsNum(const insGroup* ig, const instrDesc* id) const; UNATIVE_OFFSET emitFindOffset(const insGroup* ig, unsigned insNum) const; -/************************************************************************/ -/* Members and methods used to issue (encode) instructions. */ -/************************************************************************/ + /************************************************************************/ + /* Members and methods used to issue (encode) instructions. */ + /************************************************************************/ #ifdef DEBUG // If we have started issuing instructions from the list of instrDesc, this is set @@ -2458,9 +2474,9 @@ class emitter #endif // TARGET_LOONGARCH64 || TARGET_RISCV64 instrDesc* emitFirstInstrDesc(BYTE* idData) const; - void emitAdvanceInstrDesc(instrDesc** id, size_t idSize) const; - size_t emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp); - size_t emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp); + void emitAdvanceInstrDesc(instrDesc** id, size_t idSize) const; + size_t emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp); + size_t emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp); bool emitHasFramePtr; @@ -2511,13 +2527,13 @@ class emitter #endif // FEATURE_SIMD regNumber emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src); regNumber emitInsTernary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src1, GenTree* src2); - void emitInsLoadInd(instruction ins, emitAttr attr, regNumber dstReg, GenTreeIndir* mem); - void emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* mem); - void emitInsStoreLcl(instruction ins, emitAttr attr, GenTreeLclVarCommon* varNode); + void emitInsLoadInd(instruction ins, emitAttr attr, regNumber dstReg, GenTreeIndir* mem); + void emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* mem); + void emitInsStoreLcl(instruction ins, emitAttr attr, GenTreeLclVarCommon* varNode); insFormat emitMapFmtForIns(insFormat fmt, instruction ins); insFormat emitMapFmtAtoM(insFormat fmt); - void emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt, instruction ins); - void spillIntArgRegsToShadowSlots(); + void emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt, instruction ins); + void spillIntArgRegsToShadowSlots(); #ifdef TARGET_XARCH bool emitIsInstrWritingToReg(instrDesc* id, regNumber reg); @@ -2604,22 +2620,22 @@ class emitter // non-adaptive alignment on xarch, this points to the first align instruction of the series of align instructions. instrDescAlign* emitAlignLastGroup; - unsigned getLoopSize(insGroup* igLoopHeader, + unsigned getLoopSize(insGroup* igLoopHeader, unsigned maxLoopSize DEBUG_ARG(bool isAlignAdjusted) DEBUG_ARG(UNATIVE_OFFSET containingIGNum) DEBUG_ARG(UNATIVE_OFFSET loopHeadPredIGNum)); // Get the smallest loop size - void emitLoopAlignment(DEBUG_ARG1(bool isPlacedBehindJmp)); - bool emitEndsWithAlignInstr(); // Validate if newLabel is appropriate - bool emitSetLoopBackEdge(const BasicBlock* loopTopBlock); + void emitLoopAlignment(DEBUG_ARG1(bool isPlacedBehindJmp)); + bool emitEndsWithAlignInstr(); // Validate if newLabel is appropriate + bool emitSetLoopBackEdge(const BasicBlock* loopTopBlock); void emitLoopAlignAdjustments(); // Predict if loop alignment is needed and make appropriate adjustments - unsigned emitCalculatePaddingForLoopAlignment(insGroup* ig, + unsigned emitCalculatePaddingForLoopAlignment(insGroup* ig, size_t offset DEBUG_ARG(bool isAlignAdjusted) DEBUG_ARG(UNATIVE_OFFSET containingIGNum) DEBUG_ARG(UNATIVE_OFFSET loopHeadPredIGNum)); - void emitLoopAlign(unsigned paddingBytes, bool isFirstAlign DEBUG_ARG(bool isPlacedBehindJmp)); - void emitLongLoopAlign(unsigned alignmentBoundary DEBUG_ARG(bool isPlacedBehindJmp)); + void emitLoopAlign(unsigned paddingBytes, bool isFirstAlign DEBUG_ARG(bool isPlacedBehindJmp)); + void emitLongLoopAlign(unsigned alignmentBoundary DEBUG_ARG(bool isPlacedBehindJmp)); instrDescAlign* emitAlignInNextIG(instrDescAlign* alignInstr); - void emitConnectAlignInstrWithCurIG(); + void emitConnectAlignInstrWithCurIG(); #endif @@ -2692,7 +2708,7 @@ class emitter void emitSetSecondRetRegGCType(instrDescCGCA* id, emitAttr secondRetSize); #endif // MULTIREG_HAS_SECOND_GC_RET - static void emitEncodeCallGCregs(regMaskTP regs, instrDesc* id); + static void emitEncodeCallGCregs(regMaskTP regs, instrDesc* id); static unsigned emitDecodeCallGCregs(instrDesc* id); unsigned emitNxtIGnum; @@ -2716,8 +2732,8 @@ class emitter insGroup* emitAllocAndLinkIG(); insGroup* emitAllocIG(); - void emitInitIG(insGroup* ig); - void emitInsertIGAfter(insGroup* insertAfterIG, insGroup* ig); + void emitInitIG(insGroup* ig); + void emitInsertIGAfter(insGroup* insertAfterIG, insGroup* ig); void emitNewIG(); @@ -2732,9 +2748,9 @@ class emitter static bool emitJmpInstHasNoCode(instrDesc* id); #endif - void emitGenIG(insGroup* ig); + void emitGenIG(insGroup* ig); insGroup* emitSavIG(bool emitAdd = false); - void emitNxtIG(bool extend = false); + void emitNxtIG(bool extend = false); #ifdef TARGET_ARM64 void emitRemoveLastInstruction(); @@ -2864,8 +2880,8 @@ class emitter // Mark this instruction group as having a label; return the new instruction group. // Sets the emitter's record of the currently live GC variables // and registers. - void* emitAddLabel(VARSET_VALARG_TP GCvars, - regMaskTP gcrefRegs, + void* emitAddLabel(VARSET_VALARG_TP GCvars, + regMaskTP gcrefRegs, regMaskTP byrefRegs DEBUG_ARG(BasicBlock* block = nullptr)); // Same as above, except the label is added and is conceptually "inline" in @@ -2873,7 +2889,7 @@ class emitter // continues to track GC info as if there was no label. void* emitAddInlineLabel(); - void emitPrintLabel(const insGroup* ig) const; + void emitPrintLabel(const insGroup* ig) const; const char* emitLabelString(const insGroup* ig) const; #if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) @@ -3096,7 +3112,7 @@ class emitter return (offs >= emitGCrFrameOffsMin) && (offs < emitGCrFrameOffsMax); } - static instruction emitJumpKindToIns(emitJumpKind jumpKind); + static instruction emitJumpKindToIns(emitJumpKind jumpKind); static emitJumpKind emitInsToJumpKind(instruction ins); static emitJumpKind emitReverseJumpKind(emitJumpKind jumpKind); @@ -3161,7 +3177,8 @@ class emitter bool emitSimpleStkUsed; // using the "simple" stack table? - union { + union + { struct // if emitSimpleStkUsed==true { @@ -3209,8 +3226,8 @@ class emitter #ifdef DEBUG const char* emitGetFrameReg(); - void emitDispRegSet(regMaskTP regs); - void emitDispVarSet(); + void emitDispRegSet(regMaskTP regs); + void emitDispVarSet(); #endif void emitGCregLiveUpd(GCtype gcType, regNumber reg, BYTE* addr); @@ -3275,7 +3292,11 @@ class emitter UNATIVE_OFFSET dsdOffs; UNATIVE_OFFSET alignment; // in bytes, defaults to 4 - dataSecDsc() : dsdList(nullptr), dsdLast(nullptr), dsdOffs(0), alignment(4) + dataSecDsc() + : dsdList(nullptr) + , dsdLast(nullptr) + , dsdOffs(0) + , alignment(4) { } }; @@ -3293,9 +3314,9 @@ class emitter COMP_HANDLE emitCmpHandle; -/************************************************************************/ -/* Helpers for interface to EE */ -/************************************************************************/ + /************************************************************************/ + /* Helpers for interface to EE */ + /************************************************************************/ #ifdef DEBUG @@ -3305,25 +3326,25 @@ class emitter #define emitRecordRelocationWithAddlDelta(location, target, fRelocType, addlDelta) \ emitRecordRelocationHelp(location, target, fRelocType, #fRelocType, addlDelta) - void emitRecordRelocationHelp(void* location, /* IN */ - void* target, /* IN */ - uint16_t fRelocType, /* IN */ - const char* relocTypeName, /* IN */ - int32_t addlDelta = 0); /* IN */ + void emitRecordRelocationHelp(void* location, /* IN */ + void* target, /* IN */ + uint16_t fRelocType, /* IN */ + const char* relocTypeName, /* IN */ + int32_t addlDelta = 0); /* IN */ #else // !DEBUG void emitRecordRelocationWithAddlDelta(void* location, /* IN */ void* target, /* IN */ uint16_t fRelocType, /* IN */ - int32_t addlDelta) /* IN */ + int32_t addlDelta) /* IN */ { emitRecordRelocation(location, target, fRelocType, addlDelta); } - void emitRecordRelocation(void* location, /* IN */ - void* target, /* IN */ - uint16_t fRelocType, /* IN */ + void emitRecordRelocation(void* location, /* IN */ + void* target, /* IN */ + uint16_t fRelocType, /* IN */ int32_t addlDelta = 0); /* IN */ #endif // !DEBUG @@ -3343,9 +3364,9 @@ class emitter CORINFO_SIG_INFO* emitScratchSigInfo; #endif // DEBUG -/************************************************************************/ -/* Logic to collect and display statistics */ -/************************************************************************/ + /************************************************************************/ + /* Logic to collect and display statistics */ + /************************************************************************/ #if EMITTER_STATS @@ -3482,10 +3503,10 @@ class emitter } #endif // EMITTER_STATS -/************************************************************************* - * - * Define any target-dependent emitter members. - */ + /************************************************************************* + * + * Define any target-dependent emitter members. + */ #include "emitdef.h" diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp index 362e304273415..5a20f8a1f940a 100644 --- a/src/coreclr/jit/emitarm.cpp +++ b/src/coreclr/jit/emitarm.cpp @@ -700,8 +700,8 @@ emitter::insFormat emitter::emitInsFormat(instruction ins) } // INST_FP is 1 -#define LD 2 -#define ST 4 +#define LD 2 +#define ST 4 #define CMP 8 // clang-format off @@ -1708,10 +1708,10 @@ void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg) * Add an instruction referencing a register and a constant. */ -void emitter::emitIns_R_I(instruction ins, - emitAttr attr, - regNumber reg, - target_ssize_t imm, +void emitter::emitIns_R_I(instruction ins, + emitAttr attr, + regNumber reg, + target_ssize_t imm, insFlags flags /* = INS_FLAGS_DONT_CARE */ DEBUGARG(GenTreeFlags gtFlags)) { @@ -1738,7 +1738,7 @@ void emitter::emitIns_R_I(instruction ins, ins = INS_sub; else // ins == INS_sub ins = INS_add; - imm = -imm; + imm = -imm; } fmt = IF_T1_J0; sf = INS_FLAGS_SET; @@ -2607,7 +2607,7 @@ void emitter::emitIns_R_R_I(instruction ins, ins = INS_sub; else ins = INS_add; - imm = -imm; + imm = -imm; } fmt = IF_T1_G; sf = INS_FLAGS_SET; @@ -2621,7 +2621,7 @@ void emitter::emitIns_R_R_I(instruction ins, ins = INS_sub; else ins = INS_add; - imm = -imm; + imm = -imm; } // Use Thumb-1 encoding emitIns_R_I(ins, attr, reg1, imm, flags); @@ -2982,9 +2982,9 @@ void emitter::emitIns_R_R_I(instruction ins, } } } - // - // If we did not find a thumb-1 encoding above - // + // + // If we did not find a thumb-1 encoding above + // COMMON_THUMB2_LDST: assert(fmt == IF_NONE); @@ -3185,8 +3185,8 @@ void emitter::emitIns_R_R_R(instruction ins, case INS_mul: if (insMustSetFlags(flags)) { - assert(reg1 != - REG_PC); // VM debugging single stepper doesn't support PC register with this instruction. + assert(reg1 != REG_PC); // VM debugging single stepper doesn't support PC register with this + // instruction. assert(reg2 != REG_PC); assert(reg3 != REG_PC); @@ -4836,7 +4836,7 @@ void emitter::emitIns_Call(EmitCallType callType, if (m_debugInfoSize > 0) { INDEBUG(id->idDebugOnlyInfo()->idCallSig = sigInfo); - id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token + id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token } #ifdef LATE_DISASM @@ -5236,7 +5236,7 @@ unsigned emitter::emitOutput_Thumb1Instr(BYTE* dst, code_t code) unsigned emitter::emitOutput_Thumb2Instr(BYTE* dst, code_t code) { unsigned short word1 = (code >> 16) & 0xffff; - unsigned short word2 = (code)&0xffff; + unsigned short word2 = (code) & 0xffff; assert((code_t)((word1 << 16) | word2) == code); #ifdef DEBUG @@ -5342,7 +5342,7 @@ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i) if (dstOffs <= srcOffs) { -/* This is a backward jump - distance is known at this point */ + /* This is a backward jump - distance is known at this point */ #if DEBUG_EMIT if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0) @@ -5731,7 +5731,7 @@ BYTE* emitter::emitOutputIT(BYTE* dst, instruction ins, insFormat fmt, code_t co #endif // FEATURE_ITINSTRUCTION /***************************************************************************** -* + * * Append the machine code corresponding to the given instruction descriptor * to the code block at '*dp'; the base of the code block is 'bp', and 'ig' * is the instruction group that contains the instruction. Updates '*dp' to @@ -6561,9 +6561,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; - /********************************************************************/ - /* oops */ - /********************************************************************/ + /********************************************************************/ + /* oops */ + /********************************************************************/ default: diff --git a/src/coreclr/jit/emitarm.h b/src/coreclr/jit/emitarm.h index 245196bfa1834..6ae0c57dea6d2 100644 --- a/src/coreclr/jit/emitarm.h +++ b/src/coreclr/jit/emitarm.h @@ -81,7 +81,7 @@ bool emitInsIsStore(instruction ins); bool emitInsIsLoadOrStore(instruction ins); emitter::insFormat emitInsFormat(instruction ins); -emitter::code_t emitInsCode(instruction ins, insFormat fmt); +emitter::code_t emitInsCode(instruction ins, insFormat fmt); // Generate code for a load or store operation and handle the case // of contained GT_LEA op1 with [base + index<idInsOpt())) { - assert((emitGetInsSC(id) > 0) || - (id->idReg2() == REG_ZR)); // REG_ZR encodes SP and we allow a shift of zero + assert((emitGetInsSC(id) > 0) || (id->idReg2() == REG_ZR)); // REG_ZR encodes SP and we allow a shift of + // zero } break; @@ -967,7 +967,7 @@ bool emitter::emitInsMayWriteToGCReg(instrDesc* id) switch (fmt) { - // These are the formats with "destination" registers: + // These are the formats with "destination" registers: case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw) case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s) @@ -1031,7 +1031,7 @@ bool emitter::emitInsMayWriteToGCReg(instrDesc* id) // Tracked GC pointers cannot be placed into the SIMD registers. return false; - // These are the load/store formats with "target" registers: + // These are the load/store formats with "target" registers: case IF_LS_1A: // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB) case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn @@ -1471,8 +1471,8 @@ emitter::insFormat emitter::emitInsFormat(instruction ins) return insFormats[ins]; } -#define LD 1 -#define ST 2 +#define LD 1 +#define ST 2 #define CMP 4 #define RSH 8 #define WID 16 @@ -1733,8 +1733,8 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt) }; // clang-format on - const static insFormat formatEncode9[9] = {IF_DR_2E, IF_DR_2G, IF_DI_1B, IF_DI_1D, IF_DV_3C, - IF_DV_2B, IF_DV_2C, IF_DV_2E, IF_DV_2F}; + const static insFormat formatEncode9[9] = {IF_DR_2E, IF_DR_2G, IF_DI_1B, IF_DI_1D, IF_DV_3C, + IF_DV_2B, IF_DV_2C, IF_DV_2E, IF_DV_2F}; const static insFormat formatEncode6A[6] = {IF_DR_3A, IF_DR_3B, IF_DR_3C, IF_DI_2A, IF_DV_3A, IF_DV_3E}; const static insFormat formatEncode6B[6] = {IF_LS_2D, IF_LS_3F, IF_LS_2E, IF_LS_2F, IF_LS_3G, IF_LS_2G}; const static insFormat formatEncode5A[5] = {IF_LS_2A, IF_LS_2B, IF_LS_2C, IF_LS_3A, IF_LS_1A}; @@ -3748,13 +3748,13 @@ void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg, insOpts o * Add an instruction referencing a register and a constant. */ -void emitter::emitIns_R_I(instruction ins, - emitAttr attr, - regNumber reg, - ssize_t imm, - insOpts opt, /* = INS_OPTS_NONE */ +void emitter::emitIns_R_I(instruction ins, + emitAttr attr, + regNumber reg, + ssize_t imm, + insOpts opt, /* = INS_OPTS_NONE */ insScalableOpts sopt /* = INS_SCALABLE_OPTS_NONE */ - DEBUGARG(size_t targetHandle /* = 0 */) DEBUGARG(GenTreeFlags gtFlags /* = GTF_EMPTY */)) + DEBUGARG(size_t targetHandle /* = 0 */) DEBUGARG(GenTreeFlags gtFlags /* = GTF_EMPTY */)) { emitAttr size = EA_SIZE(attr); emitAttr elemsize = EA_UNKNOWN; @@ -3940,8 +3940,8 @@ void emitter::emitIns_R_I(instruction ins, // First try the standard 'byteShifted immediate' imm(i8,bySh) bsi.immBSVal = 0; canEncode = canEncodeByteShiftedImm(imm, elemsize, - (ins == INS_mvni), // mvni supports the ones shifting variant (aka MSL) - &bsi); + (ins == INS_mvni), // mvni supports the ones shifting variant (aka MSL) + &bsi); if (canEncode) { imm = bsi.immBSVal; @@ -4955,8 +4955,8 @@ void emitter::emitIns_R_I_I(instruction ins, regNumber reg, ssize_t imm1, ssize_t imm2, - insOpts opt /* = INS_OPTS_NONE */ - DEBUGARG(size_t targetHandle /* = 0 */) DEBUGARG(GenTreeFlags gtFlags /* = 0 */)) + insOpts opt /* = INS_OPTS_NONE */ + DEBUGARG(size_t targetHandle /* = 0 */) DEBUGARG(GenTreeFlags gtFlags /* = 0 */)) { emitAttr size = EA_SIZE(attr); insFormat fmt = IF_NONE; @@ -5792,15 +5792,15 @@ void emitter::emitIns_R_R_F( } /***************************************************************************** -* -* Add an instruction referencing two registers and a constant. -* Also checks for a large immediate that needs a second instruction -* and will load it in reg1 -* -* - Supports instructions: add, adds, sub, subs, and, ands, eor and orr -* - Requires that reg1 is a general register and not SP or ZR -* - Requires that reg1 != reg2 -*/ + * + * Add an instruction referencing two registers and a constant. + * Also checks for a large immediate that needs a second instruction + * and will load it in reg1 + * + * - Supports instructions: add, adds, sub, subs, and, ands, eor and orr + * - Requires that reg1 is a general register and not SP or ZR + * - Requires that reg1 != reg2 + */ void emitter::emitIns_R_R_Imm(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm) { assert(isGeneralRegister(reg1)); @@ -6554,7 +6554,7 @@ void emitter::emitIns_R_R_R_I_LdStPair(instruction ins, int varx1, int varx2, int offs1, - int offs2 DEBUG_ARG(unsigned var1RefsOffs) DEBUG_ARG(unsigned var2RefsOffs)) + int offs2 DEBUG_ARG(unsigned var1RefsOffs) DEBUG_ARG(unsigned var2RefsOffs)) { assert((ins == INS_stp) || (ins == INS_ldp)); emitAttr size = EA_SIZE(attr); @@ -7147,7 +7147,7 @@ void emitter::emitIns_R_R_R_Ext(instruction ins, regNumber reg1, regNumber reg2, regNumber reg3, - insOpts opt, /* = INS_OPTS_NONE */ + insOpts opt, /* = INS_OPTS_NONE */ int shiftAmount) /* = -1 -- unset */ { emitAttr size = EA_SIZE(attr); @@ -8482,9 +8482,9 @@ void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNu } // This generates code to populate the access for TLS on linux -void emitter::emitIns_Adrp_Ldr_Add(emitAttr attr, - regNumber reg1, - regNumber reg2, +void emitter::emitIns_Adrp_Ldr_Add(emitAttr attr, + regNumber reg1, + regNumber reg2, ssize_t addr DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert(emitComp->IsTargetAbi(CORINFO_NATIVEAOT_ABI)); @@ -8537,9 +8537,9 @@ void emitter::emitIns_Adrp_Ldr_Add(emitAttr attr, } // This computes address from the immediate which is relocatable. -void emitter::emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber ireg, +void emitter::emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber ireg, ssize_t addr DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert(EA_IS_RELOC(attr)); @@ -9108,7 +9108,7 @@ void emitter::emitIns_Call(EmitCallType callType, if (m_debugInfoSize > 0) { INDEBUG(id->idDebugOnlyInfo()->idCallSig = sigInfo); - id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token + id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token } #ifdef LATE_DISASM @@ -10438,9 +10438,9 @@ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i) } /***************************************************************************** -* -* Output a short branch instruction. -*/ + * + * Output a short branch instruction. + */ BYTE* emitter::emitOutputShortBranch(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id) { code_t code = emitInsCode(ins, fmt); @@ -10503,9 +10503,9 @@ BYTE* emitter::emitOutputShortBranch(BYTE* dst, instruction ins, insFormat fmt, } /***************************************************************************** -* -* Output a short address instruction. -*/ + * + * Output a short address instruction. + */ BYTE* emitter::emitOutputShortAddress(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, regNumber reg) { ssize_t loBits = (distVal & 3); @@ -10533,9 +10533,9 @@ BYTE* emitter::emitOutputShortAddress(BYTE* dst, instruction ins, insFormat fmt, } /***************************************************************************** -* -* Output a short constant instruction. -*/ + * + * Output a short constant instruction. + */ BYTE* emitter::emitOutputShortConstant( BYTE* dst, instruction ins, insFormat fmt, ssize_t imm, regNumber reg, emitAttr opSize) { @@ -10768,7 +10768,7 @@ unsigned emitter::emitOutput_Instr(BYTE* dst, code_t code) } /***************************************************************************** -* + * * Append the machine code corresponding to the given instruction descriptor * to the code block at '*dp'; the base of the code block is 'bp', and 'ig' * is the instruction group that contains the instruction. Updates '*dp' to @@ -11164,8 +11164,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) code = emitInsCode(ins, fmt); code |= insEncodeReg_Rd(id->idReg1()); // ddddd dst += emitOutput_Instr(dst, code); - emitRecordRelocation(odst, id->idAddr()->iiaAddr, id->idIsTlsGD() ? IMAGE_REL_AARCH64_TLSDESC_ADR_PAGE21 - : IMAGE_REL_ARM64_PAGEBASE_REL21); + emitRecordRelocation(odst, id->idAddr()->iiaAddr, + id->idIsTlsGD() ? IMAGE_REL_AARCH64_TLSDESC_ADR_PAGE21 + : IMAGE_REL_ARM64_PAGEBASE_REL21); } else { @@ -11208,8 +11209,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) { assert(sz == sizeof(instrDesc)); assert(id->idAddr()->iiaAddr != nullptr); - emitRecordRelocation(odst, id->idAddr()->iiaAddr, id->idIsTlsGD() ? IMAGE_REL_AARCH64_TLSDESC_ADD_LO12 - : IMAGE_REL_ARM64_PAGEOFFSET_12A); + emitRecordRelocation(odst, id->idAddr()->iiaAddr, + id->idIsTlsGD() ? IMAGE_REL_AARCH64_TLSDESC_ADD_LO12 + : IMAGE_REL_ARM64_PAGEOFFSET_12A); } break; @@ -12356,7 +12358,7 @@ void emitter::emitDispCond(insCond cond) { const static char* armCond[16] = {"eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc", "hi", "ls", "ge", "lt", "gt", "le", "AL", "NV"}; // The last two are invalid - unsigned imm = (unsigned)cond; + unsigned imm = (unsigned)cond; assert((0 <= imm) && (imm < ArrLen(armCond))); printf(armCond[imm]); } @@ -12369,7 +12371,7 @@ void emitter::emitDispFlags(insCflags flags) { const static char* armFlags[16] = {"0", "v", "c", "cv", "z", "zv", "zc", "zcv", "n", "nv", "nc", "ncv", "nz", "nzv", "nzc", "nzcv"}; - unsigned imm = (unsigned)flags; + unsigned imm = (unsigned)flags; assert((0 <= imm) && (imm < ArrLen(armFlags))); printf(armFlags[imm]); } @@ -12382,7 +12384,7 @@ void emitter::emitDispBarrier(insBarrier barrier) { const static char* armBarriers[16] = {"#0", "oshld", "oshst", "osh", "#4", "nshld", "nshst", "nsh", "#8", "ishld", "ishst", "ish", "#12", "ld", "st", "sy"}; - unsigned imm = (unsigned)barrier; + unsigned imm = (unsigned)barrier; assert((0 <= imm) && (imm < ArrLen(armBarriers))); printf(armBarriers[imm]); } @@ -14681,9 +14683,9 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins switch (insFmt) { - // - // Branch Instructions - // + // + // Branch Instructions + // case IF_BI_0A: // b, bl_local case IF_BI_0C: // bl, b_tail @@ -14936,9 +14938,9 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins break; } - // - // Load/Store Instructions - // + // + // Load/Store Instructions + // case IF_LS_1A: // ldr, ldrsw (literal, pc relative immediate) result.insThroughput = PERFSCORE_THROUGHPUT_1C; @@ -16617,7 +16619,7 @@ bool emitter::OptimizeLdrStr(instruction ins, insFormat fmt, bool localVar, int varx, - int offs DEBUG_ARG(bool useRsvdReg)) + int offs DEBUG_ARG(bool useRsvdReg)) { assert(ins == INS_ldr || ins == INS_str); diff --git a/src/coreclr/jit/emitarm64.h b/src/coreclr/jit/emitarm64.h index 62624fe50d68e..cc3254c06810a 100644 --- a/src/coreclr/jit/emitarm64.h +++ b/src/coreclr/jit/emitarm64.h @@ -124,21 +124,21 @@ enum RegisterOrder /************************************************************************/ private: -bool emitInsIsCompare(instruction ins); -bool emitInsIsLoad(instruction ins); -bool emitInsIsStore(instruction ins); -bool emitInsIsLoadOrStore(instruction ins); -bool emitInsIsVectorRightShift(instruction ins); -bool emitInsIsVectorLong(instruction ins); -bool emitInsIsVectorNarrow(instruction ins); -bool emitInsIsVectorWide(instruction ins); -bool emitInsDestIsOp2(instruction ins); +bool emitInsIsCompare(instruction ins); +bool emitInsIsLoad(instruction ins); +bool emitInsIsStore(instruction ins); +bool emitInsIsLoadOrStore(instruction ins); +bool emitInsIsVectorRightShift(instruction ins); +bool emitInsIsVectorLong(instruction ins); +bool emitInsIsVectorNarrow(instruction ins); +bool emitInsIsVectorWide(instruction ins); +bool emitInsDestIsOp2(instruction ins); emitAttr emitInsTargetRegSize(instrDesc* id); emitAttr emitInsLoadStoreSize(instrDesc* id); emitter::insFormat emitInsFormat(instruction ins); -emitter::code_t emitInsCode(instruction ins, insFormat fmt); -emitter::code_t emitInsCodeSve(instruction ins, insFormat fmt); +emitter::code_t emitInsCode(instruction ins, insFormat fmt); +emitter::code_t emitInsCodeSve(instruction ins, insFormat fmt); // Generate code for a load or store operation and handle the case of contained GT_LEA op1 with [base + index<(id->idReg1()); // ddddd - code |= insEncodeSimm<9, 5>(imm1); // iiiii - code |= insEncodeSimm<20, 16>(imm2); // iiiii - code |= insEncodeElemsize(optGetSveElemsize(id->idInsOpt())); // xx - dst += emitOutput_Instr(dst, code); - break; - } + { + ssize_t imm1; + ssize_t imm2; + insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); + code = emitInsCodeSve(ins, fmt); + code |= insEncodeReg_V<4, 0>(id->idReg1()); // ddddd + code |= insEncodeSimm<9, 5>(imm1); // iiiii + code |= insEncodeSimm<20, 16>(imm2); // iiiii + code |= insEncodeElemsize(optGetSveElemsize(id->idInsOpt())); // xx + dst += emitOutput_Instr(dst, code); + break; + } case IF_SVE_AY_2A: // ........xx.mmmmm ......iiiiiddddd -- SVE index generation (immediate start, register // increment) @@ -11451,16 +11451,16 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id) case IF_SVE_HM_2A: // ........xx...... ...ggg....iddddd -- SVE floating-point arithmetic with immediate // (predicated) - { - imm = emitGetInsSC(id); - code = emitInsCodeSve(ins, fmt); - code |= insEncodeReg_V<4, 0>(id->idReg1()); // ddddd - code |= insEncodeReg_P<12, 10>(id->idReg2()); // ggg - code |= insEncodeSveSmallFloatImm(imm); // i - code |= insEncodeSveElemsize(optGetSveElemsize(id->idInsOpt())); // xx - dst += emitOutput_Instr(dst, code); - } - break; + { + imm = emitGetInsSC(id); + code = emitInsCodeSve(ins, fmt); + code |= insEncodeReg_V<4, 0>(id->idReg1()); // ddddd + code |= insEncodeReg_P<12, 10>(id->idReg2()); // ggg + code |= insEncodeSveSmallFloatImm(imm); // i + code |= insEncodeSveElemsize(optGetSveElemsize(id->idInsOpt())); // xx + dst += emitOutput_Instr(dst, code); + } + break; case IF_SVE_HN_2A: // ........xx...iii ......mmmmmddddd -- SVE floating-point trig multiply-add coefficient imm = emitGetInsSC(id); @@ -13443,17 +13443,17 @@ void emitter::emitInsSveSanityCheck(instrDesc* id) case IF_SVE_AX_1A: // ........xx.iiiii ......iiiiiddddd -- SVE index generation (immediate start, immediate // increment) - { - ssize_t imm1; - ssize_t imm2; - insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); - assert(insOptsScalableStandard(id->idInsOpt())); - assert(isVectorRegister(id->idReg1())); // ddddd - assert(isValidSimm<5>(imm1)); // iiiii - assert(isValidSimm<5>(imm2)); // iiiii - assert(isValidVectorElemsize(optGetSveElemsize(id->idInsOpt()))); // xx - break; - } + { + ssize_t imm1; + ssize_t imm2; + insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); + assert(insOptsScalableStandard(id->idInsOpt())); + assert(isVectorRegister(id->idReg1())); // ddddd + assert(isValidSimm<5>(imm1)); // iiiii + assert(isValidSimm<5>(imm2)); // iiiii + assert(isValidVectorElemsize(optGetSveElemsize(id->idInsOpt()))); // xx + break; + } case IF_SVE_AY_2A: // ........xx.mmmmm ......iiiiiddddd -- SVE index generation (immediate start, register // increment) @@ -14579,37 +14579,37 @@ void emitter::emitDispInsSveHelp(instrDesc* id) // ., #, # case IF_SVE_AX_1A: // ........xx.iiiii ......iiiiiddddd -- SVE index generation (immediate start, immediate // increment) - { - ssize_t imm1; - ssize_t imm2; - insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); - emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd - emitDispImm(imm1, true); // iiiii - emitDispImm(imm2, false); // iiiii - break; - } + { + ssize_t imm1; + ssize_t imm2; + insSveDecodeTwoSimm5(emitGetInsSC(id), &imm1, &imm2); + emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd + emitDispImm(imm1, true); // iiiii + emitDispImm(imm2, false); // iiiii + break; + } // ., #, case IF_SVE_AY_2A: // ........xx.mmmmm ......iiiiiddddd -- SVE index generation (immediate start, register // increment) - { - const emitAttr intRegSize = (id->idInsOpt() == INS_OPTS_SCALABLE_D) ? EA_8BYTE : EA_4BYTE; - emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd - emitDispImm(emitGetInsSC(id), true); // iiiii - emitDispReg(id->idReg2(), intRegSize, false); // mmmmm - break; - } + { + const emitAttr intRegSize = (id->idInsOpt() == INS_OPTS_SCALABLE_D) ? EA_8BYTE : EA_4BYTE; + emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd + emitDispImm(emitGetInsSC(id), true); // iiiii + emitDispReg(id->idReg2(), intRegSize, false); // mmmmm + break; + } // ., , # case IF_SVE_AZ_2A: // ........xx.iiiii ......nnnnnddddd -- SVE index generation (register start, immediate // increment) - { - const emitAttr intRegSize = (id->idInsOpt() == INS_OPTS_SCALABLE_D) ? EA_8BYTE : EA_4BYTE; - emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd - emitDispReg(id->idReg2(), intRegSize, true); // mmmmm - emitDispImm(emitGetInsSC(id), false); // iiiii - break; - } + { + const emitAttr intRegSize = (id->idInsOpt() == INS_OPTS_SCALABLE_D) ? EA_8BYTE : EA_4BYTE; + emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd + emitDispReg(id->idReg2(), intRegSize, true); // mmmmm + emitDispImm(emitGetInsSC(id), false); // iiiii + break; + } // .H, .B, .B case IF_SVE_GN_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp index 6aaf00a973c8a..24b408b4b8db3 100644 --- a/src/coreclr/jit/emitloongarch64.cpp +++ b/src/coreclr/jit/emitloongarch64.cpp @@ -51,9 +51,9 @@ const emitJumpKind emitReverseJumpKinds[] = { } /***************************************************************************** -* Look up the jump kind for an instruction. It better be a conditional -* branch instruction with a jump kind! -*/ + * Look up the jump kind for an instruction. It better be a conditional + * branch instruction with a jump kind! + */ /*static*/ emitJumpKind emitter::emitInsToJumpKind(instruction ins) { @@ -2047,9 +2047,9 @@ void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNu } // This computes address from the immediate which is relocatable. -void emitter::emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber reg, +void emitter::emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, ssize_t addr DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert(EA_IS_RELOC(attr)); // EA_PTR_DSP_RELOC @@ -2381,8 +2381,8 @@ void emitter::emitIns_I_la(emitAttr size, regNumber reg, ssize_t imm) void emitter::emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE - void* addr, - ssize_t argSize, + void* addr, + ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, @@ -2786,9 +2786,9 @@ void emitter::emitJumpDistBind() B_DIST_SMALL_MAX_POS - emitCounts_INS_OPTS_J * (3 << 2); // the max placeholder sizeof(INS_OPTS_JIRL) - sizeof(INS_OPTS_J). -/*****************************************************************************/ -/* If the default small encoding is not enough, we start again here. */ -/*****************************************************************************/ + /*****************************************************************************/ + /* If the default small encoding is not enough, we start again here. */ + /*****************************************************************************/ AGAIN: @@ -2819,7 +2819,7 @@ void emitter::emitJumpDistBind() UNATIVE_OFFSET dstOffs; NATIVE_OFFSET jmpDist; // the relative jump distance, as it will be encoded -/* Make sure the jumps are properly ordered */ + /* Make sure the jumps are properly ordered */ #ifdef DEBUG assert(lastSJ == nullptr || lastIG != jmp->idjIG || lastSJ->idjOffs < (jmp->idjOffs + adjSJ)); @@ -2997,8 +2997,8 @@ void emitter::emitJumpDistBind() instruction ins = jmp->idIns(); assert((INS_bceqz <= ins) && (ins <= INS_bl)); - if (ins < - INS_beqz) // bceqz/bcnez/beq/bne/blt/bltu/bge/bgeu < beqz < bnez // See instrsloongarch64.h. + if (ins < INS_beqz) // bceqz/bcnez/beq/bne/blt/bltu/bge/bgeu < beqz < bnez // See + // instrsloongarch64.h. { if ((jmpDist + emitCounts_INS_OPTS_J * 4) < 0x8000000) { @@ -3085,8 +3085,8 @@ void emitter::emitJumpDistBind() instruction ins = jmp->idIns(); assert((INS_bceqz <= ins) && (ins <= INS_bl)); - if (ins < - INS_beqz) // bceqz/bcnez/beq/bne/blt/bltu/bge/bgeu < beqz < bnez // See instrsloongarch64.h. + if (ins < INS_beqz) // bceqz/bcnez/beq/bne/blt/bltu/bge/bgeu < beqz < bnez // See + // instrsloongarch64.h. { if ((jmpDist + emitCounts_INS_OPTS_J * 4) < 0x8000000) { @@ -3181,7 +3181,7 @@ void emitter::emitJumpDistBind() } /***************************************************************************** -* + * * Append the machine code corresponding to the given instruction descriptor * to the code block at '*dp'; the base of the code block is 'bp', and 'ig' * is the instruction group that contains the instruction. Updates '*dp' to diff --git a/src/coreclr/jit/emitloongarch64.h b/src/coreclr/jit/emitloongarch64.h index 11a2f9ee90710..135f9cf400673 100644 --- a/src/coreclr/jit/emitloongarch64.h +++ b/src/coreclr/jit/emitloongarch64.h @@ -104,10 +104,10 @@ enum insDisasmFmt #endif }; -code_t emitGetInsMask(int ins); +code_t emitGetInsMask(int ins); insDisasmFmt emitGetInsFmt(instruction ins); -void emitDispInst(instruction ins); -void emitDisInsName(code_t code, const BYTE* addr, instrDesc* id); +void emitDispInst(instruction ins); +void emitDisInsName(code_t code, const BYTE* addr, instrDesc* id); #endif // DEBUG void emitIns_J_cond_la(instruction ins, BasicBlock* dst, regNumber reg1 = REG_R0, regNumber reg2 = REG_R0); @@ -316,9 +316,9 @@ void emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg) void emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs); -void emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber reg, +void emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, ssize_t disp DEBUGARG(size_t targetHandle = 0) DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY)); enum EmitCallType @@ -343,8 +343,8 @@ enum EmitCallType void emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE - void* addr, - ssize_t argSize, + void* addr, + ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, diff --git a/src/coreclr/jit/emitpub.h b/src/coreclr/jit/emitpub.h index c31d21153fd97..bf15ba33667ca 100644 --- a/src/coreclr/jit/emitpub.h +++ b/src/coreclr/jit/emitpub.h @@ -16,24 +16,24 @@ void emitBegFN(bool hasFramePtr , bool checkAlign #endif - ); +); void emitEndFN(); void emitComputeCodeSizes(); -unsigned emitEndCodeGen(Compiler* comp, - bool contTrkPtrLcls, - bool fullyInt, - bool fullPtrMap, - unsigned xcptnsCount, - unsigned* prologSize, - unsigned* epilogSize, - void** codeAddr, - void** codeAddrRW, - void** coldCodeAddr, - void** coldCodeAddrRW, - void** consAddr, +unsigned emitEndCodeGen(Compiler* comp, + bool contTrkPtrLcls, + bool fullyInt, + bool fullPtrMap, + unsigned xcptnsCount, + unsigned* prologSize, + unsigned* epilogSize, + void** codeAddr, + void** codeAddrRW, + void** coldCodeAddr, + void** coldCodeAddrRW, + void** consAddr, void** consAddrRW DEBUGARG(unsigned* instrCount)); /************************************************************************/ @@ -102,11 +102,11 @@ UNATIVE_OFFSET emitDataSize(); /************************************************************************/ #ifdef TARGET_XARCH -static bool instrIs3opImul(instruction ins); -static bool instrIsExtendedReg3opImul(instruction ins); -static bool instrHasImplicitRegPairDest(instruction ins); -static void check3opImulValues(); -static regNumber inst3opImulReg(instruction ins); +static bool instrIs3opImul(instruction ins); +static bool instrIsExtendedReg3opImul(instruction ins); +static bool instrHasImplicitRegPairDest(instruction ins); +static void check3opImulValues(); +static regNumber inst3opImulReg(instruction ins); static instruction inst3opImulForReg(regNumber reg); #endif diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp index 1c5be94198f8a..525d5e5274ba7 100644 --- a/src/coreclr/jit/emitriscv64.cpp +++ b/src/coreclr/jit/emitriscv64.cpp @@ -988,9 +988,9 @@ void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNu } // This computes address from the immediate which is relocatable. -void emitter::emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber reg, +void emitter::emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, ssize_t addr DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert(EA_IS_RELOC(attr)); // EA_PTR_DSP_RELOC @@ -1290,8 +1290,8 @@ void emitter::emitLoadImmediate(emitAttr size, regNumber reg, ssize_t imm) void emitter::emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE - void* addr, - ssize_t argSize, + void* addr, + ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, @@ -1760,9 +1760,9 @@ void emitter::emitJumpDistBind() emitCounts_INS_OPTS_J * (6 << 2); // the max placeholder sizeof(INS_OPTS_JALR) - sizeof(INS_OPTS_J) NATIVE_OFFSET psd = B_DIST_SMALL_MAX_POS - maxPlaceholderSize; -/*****************************************************************************/ -/* If the default small encoding is not enough, we start again here. */ -/*****************************************************************************/ + /*****************************************************************************/ + /* If the default small encoding is not enough, we start again here. */ + /*****************************************************************************/ AGAIN: @@ -1793,7 +1793,7 @@ void emitter::emitJumpDistBind() UNATIVE_OFFSET dstOffs; NATIVE_OFFSET jmpDist; // the relative jump distance, as it will be encoded -/* Make sure the jumps are properly ordered */ + /* Make sure the jumps are properly ordered */ #ifdef DEBUG assert(lastSJ == nullptr || lastIG != jmp->idjIG || lastSJ->idjOffs < (jmp->idjOffs + adjSJ)); @@ -1948,8 +1948,8 @@ void emitter::emitJumpDistBind() instruction ins = jmp->idIns(); assert((INS_jal <= ins) && (ins <= INS_bgeu)); - if (ins > INS_jalr || - (ins < INS_jalr && ins > INS_j)) // jal < beqz < bnez < jalr < beq/bne/blt/bltu/bge/bgeu + if (ins > INS_jalr || (ins < INS_jalr && ins > INS_j)) // jal < beqz < bnez < jalr < + // beq/bne/blt/bltu/bge/bgeu { if (isValidSimm13(jmpDist + maxPlaceholderSize)) { @@ -2022,8 +2022,8 @@ void emitter::emitJumpDistBind() instruction ins = jmp->idIns(); assert((INS_jal <= ins) && (ins <= INS_bgeu)); - if (ins > INS_jalr || - (ins < INS_jalr && ins > INS_j)) // jal < beqz < bnez < jalr < beq/bne/blt/bltu/bge/bgeu + if (ins > INS_jalr || (ins < INS_jalr && ins > INS_j)) // jal < beqz < bnez < jalr < + // beq/bne/blt/bltu/bge/bgeu { if (isValidSimm13(jmpDist + maxPlaceholderSize)) { @@ -2966,7 +2966,7 @@ BYTE* emitter::emitOutputInstr_OptsRcNoReloc(BYTE* dst, instruction* ins, unsign const regNumber rsvdReg = codeGen->rsGetRsvdReg(); const instruction lastIns = (*ins == INS_jal) ? (*ins = INS_addi) : *ins; - const ssize_t high = immediate >> 11; + const ssize_t high = immediate >> 11; dst += emitOutput_UTypeInstr(dst, INS_lui, rsvdReg, UpperNBitsOfWordSignExtend<20>(high)); dst += emitOutput_ITypeInstr(dst, INS_addi, rsvdReg, rsvdReg, LowerNBitsOfWord<12>(high)); diff --git a/src/coreclr/jit/emitriscv64.h b/src/coreclr/jit/emitriscv64.h index aef61a029cb57..07e603a70afb7 100644 --- a/src/coreclr/jit/emitriscv64.h +++ b/src/coreclr/jit/emitriscv64.h @@ -82,17 +82,17 @@ void emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataReg, GenTr unsigned emitOutput_Instr(BYTE* dst, code_t code) const; ssize_t emitOutputInstrJumpDistance(const BYTE* src, const insGroup* ig, instrDescJmp* jmp); -void emitOutputInstrJumpDistanceHelper(const insGroup* ig, - instrDescJmp* jmp, - UNATIVE_OFFSET& dstOffs, - const BYTE*& dstAddr) const; +void emitOutputInstrJumpDistanceHelper(const insGroup* ig, + instrDescJmp* jmp, + UNATIVE_OFFSET& dstOffs, + const BYTE*& dstAddr) const; // Method to do check if mov is redundant with respect to the last instruction. // If yes, the caller of this method can choose to omit current mov instruction. static bool IsMovInstruction(instruction ins); -bool IsRedundantMov(instruction ins, emitAttr size, regNumber dst, regNumber src, bool canSkip); -bool IsRedundantLdStr( - instruction ins, regNumber reg1, regNumber reg2, ssize_t imm, emitAttr size, insFormat fmt); // New functions end. +bool IsRedundantMov(instruction ins, emitAttr size, regNumber dst, regNumber src, bool canSkip); +bool IsRedundantLdStr( + instruction ins, regNumber reg1, regNumber reg2, ssize_t imm, emitAttr size, insFormat fmt); // New functions end. static code_t insEncodeRTypeInstr( unsigned opcode, unsigned rd, unsigned funct3, unsigned rs1, unsigned rs2, unsigned funct7); @@ -293,9 +293,9 @@ void emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg) void emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs); -void emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber reg, +void emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, ssize_t disp DEBUGARG(size_t targetHandle = 0) DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY)); enum EmitCallType @@ -324,8 +324,8 @@ enum EmitCallType void emitIns_Call(EmitCallType callType, CORINFO_METHOD_HANDLE methHnd, INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE - void* addr, - ssize_t argSize, + void* addr, + ssize_t argSize, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), VARSET_VALARG_TP ptrVars, regMaskTP gcrefRegs, diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index 1bafb6796d807..e356ab8b3d113 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -1287,10 +1287,10 @@ bool emitter::TakesEvexPrefix(const instrDesc* id) const #define DEFAULT_BYTE_EVEX_PREFIX 0x62F07C0800000000ULL #define DEFAULT_BYTE_EVEX_PREFIX_MASK 0xFFFFFFFF00000000ULL -#define BBIT_IN_BYTE_EVEX_PREFIX 0x0000001000000000ULL -#define LBIT_IN_BYTE_EVEX_PREFIX 0x0000002000000000ULL +#define BBIT_IN_BYTE_EVEX_PREFIX 0x0000001000000000ULL +#define LBIT_IN_BYTE_EVEX_PREFIX 0x0000002000000000ULL #define LPRIMEBIT_IN_BYTE_EVEX_PREFIX 0x0000004000000000ULL -#define ZBIT_IN_BYTE_EVEX_PREFIX 0x0000008000000000ULL +#define ZBIT_IN_BYTE_EVEX_PREFIX 0x0000008000000000ULL //------------------------------------------------------------------------ // AddEvexPrefix: Add default EVEX prefix with only LL' bits set. @@ -1460,9 +1460,9 @@ bool emitter::TakesVexPrefix(instruction ins) const // 01 - 66 (66 0F - packed double) // 10 - F3 (F3 0F - scalar float // 11 - F2 (F2 0F - scalar double) -#define DEFAULT_3BYTE_VEX_PREFIX 0xC4E07800000000ULL +#define DEFAULT_3BYTE_VEX_PREFIX 0xC4E07800000000ULL #define DEFAULT_3BYTE_VEX_PREFIX_MASK 0xFFFFFF00000000ULL -#define LBIT_IN_3BYTE_VEX_PREFIX 0x00000400000000ULL +#define LBIT_IN_3BYTE_VEX_PREFIX 0x00000400000000ULL emitter::code_t emitter::AddVexPrefix(instruction ins, code_t code, emitAttr attr) { // The 2-byte VEX encoding is preferred when possible, but actually emitting @@ -3597,7 +3597,7 @@ bool emitter::emitVerifyEncodable(instruction ins, emitAttr size, regNumber reg1 #ifdef FEATURE_HW_INTRINSICS && (ins != INS_crc32) #endif - ) + ) { // reg1 must be a byte-able register if ((genRegMask(reg1) & RBM_BYTE_REGS) == 0) @@ -4108,7 +4108,8 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code) assert((attrSize == EA_4BYTE) || (attrSize == EA_PTRSIZE) // Only for x64 || (attrSize == EA_16BYTE) || (attrSize == EA_32BYTE) || (attrSize == EA_64BYTE) // only for x64 - || (ins == INS_movzx) || (ins == INS_movsx) || (ins == INS_cmpxchg) + || (ins == INS_movzx) || (ins == INS_movsx) || + (ins == INS_cmpxchg) // The prefetch instructions are always 3 bytes and have part of their modr/m byte hardcoded || isPrefetch(ins)); @@ -4489,9 +4490,9 @@ emitter::instrDesc* emitter::emitNewInstrAmdCns(emitAttr size, ssize_t dsp, int } /***************************************************************************** -* -* Add a data16 instruction of the 1 byte. -*/ + * + * Add a data16 instruction of the 1 byte. + */ void emitter::emitIns_Data16() { @@ -4539,7 +4540,8 @@ void emitter::emitIns(instruction ins) (ins == INS_cdq || ins == INS_int3 || ins == INS_lock || ins == INS_leave || ins == INS_movsb || ins == INS_movsd || ins == INS_movsp || ins == INS_nop || ins == INS_r_movsb || ins == INS_r_movsd || ins == INS_r_movsp || ins == INS_r_stosb || ins == INS_r_stosd || ins == INS_r_stosp || ins == INS_ret || - ins == INS_sahf || ins == INS_stosb || ins == INS_stosd || ins == INS_stosp + ins == INS_sahf || ins == INS_stosb || ins == INS_stosd || + ins == INS_stosp // These instructions take zero operands || ins == INS_vzeroupper || ins == INS_lfence || ins == INS_mfence || ins == INS_sfence || ins == INS_pause || ins == INS_serialize); @@ -6969,9 +6971,9 @@ void emitter::emitIns_R_R_C(instruction ins, } /***************************************************************************** -* -* Add an instruction with three register operands. -*/ + * + * Add an instruction with three register operands. + */ void emitter::emitIns_R_R_R( instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2, insOpts instOptions) @@ -7102,16 +7104,16 @@ void emitter::emitIns_R_R_C_I( } /********************************************************************************** -* emitIns_R_R_R_I: Add an instruction with three register operands and an immediate. -* -* Arguments: -* ins - the instruction to add -* attr - the emitter attribute for instruction -* targetReg - the target (destination) register -* reg1 - the first source register -* reg2 - the second source register -* ival - the immediate value -*/ + * emitIns_R_R_R_I: Add an instruction with three register operands and an immediate. + * + * Arguments: + * ins - the instruction to add + * attr - the emitter attribute for instruction + * targetReg - the target (destination) register + * reg1 - the first source register + * reg2 - the second source register + * ival - the immediate value + */ void emitter::emitIns_R_R_R_I( instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2, int ival) @@ -7745,9 +7747,9 @@ void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber reg, regNum emitIns_R_ARX(ins, attr, reg, base, REG_NA, 1, disp); } -void emitter::emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber ireg, +void emitter::emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber ireg, ssize_t disp DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA)); @@ -9706,7 +9708,7 @@ void emitter::emitIns_Call(EmitCallType callType, if (m_debugInfoSize > 0) { INDEBUG(id->idDebugOnlyInfo()->idCallSig = sigInfo); - id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token + id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token } #ifdef LATE_DISASM @@ -11698,7 +11700,7 @@ void emitter::emitDispIns( #ifdef TARGET_AMD64 || ins == INS_shrx || ins == INS_shlx || ins == INS_sarx #endif - ) + ) { // BMI bextr,bzhi, shrx, shlx and sarx encode the reg2 in VEX.vvvv and reg3 in modRM, // which is different from most of other instructions @@ -12999,9 +13001,9 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) dst += emitOutputWord(dst, code | 0x0500); } #else // TARGET_AMD64 - // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. - // This addr mode should never be used while generating relocatable ngen code nor if - // the addr can be encoded as pc-relative address. + // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. + // This addr mode should never be used while generating relocatable ngen code nor if + // the addr can be encoded as pc-relative address. noway_assert(!emitComp->opts.compReloc); noway_assert(codeGen->genAddrRelocTypeHint((size_t)dsp) != IMAGE_REL_BASED_REL32); noway_assert((int)dsp == dsp); @@ -13925,7 +13927,7 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) case IF_SRW_CNS: case IF_SRW_RRD: case IF_SRW_RRW: - // += -= of a byref, no change + // += -= of a byref, no change case IF_SRW: break; @@ -16437,9 +16439,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* Simple constant, local label, method */ - /********************************************************************/ + /********************************************************************/ + /* Simple constant, local label, method */ + /********************************************************************/ case IF_CNS: { @@ -16557,9 +16559,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) #ifdef TARGET_X86 dst += emitOutputWord(dst, code | 0x0500); #else // TARGET_AMD64 - // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. - // This addr mode should never be used while generating relocatable ngen code nor if - // the addr can be encoded as pc-relative address. + // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. + // This addr mode should never be used while generating relocatable ngen code nor if + // the addr can be encoded as pc-relative address. noway_assert(!emitComp->opts.compReloc); noway_assert(codeGen->genAddrRelocTypeHint((size_t)addr) != IMAGE_REL_BASED_REL32); noway_assert(static_cast(reinterpret_cast(addr)) == (ssize_t)addr); @@ -16712,9 +16714,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* One register operand */ - /********************************************************************/ + /********************************************************************/ + /* One register operand */ + /********************************************************************/ case IF_RRD: case IF_RWR: @@ -16725,9 +16727,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* Register and register/constant */ - /********************************************************************/ + /********************************************************************/ + /* Register and register/constant */ + /********************************************************************/ case IF_RRW_SHF: { @@ -16952,9 +16954,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* Address mode operand */ - /********************************************************************/ + /********************************************************************/ + /* Address mode operand */ + /********************************************************************/ case IF_ARD: case IF_AWR: @@ -17191,9 +17193,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) break; } - /********************************************************************/ - /* Stack-based operand */ - /********************************************************************/ + /********************************************************************/ + /* Stack-based operand */ + /********************************************************************/ case IF_SRD: case IF_SWR: @@ -17455,9 +17457,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) unreached(); } - /********************************************************************/ - /* Direct memory address */ - /********************************************************************/ + /********************************************************************/ + /* Direct memory address */ + /********************************************************************/ case IF_MRD: case IF_MRW: @@ -17757,9 +17759,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) unreached(); } - /********************************************************************/ - /* oops */ - /********************************************************************/ + /********************************************************************/ + /* oops */ + /********************************************************************/ default: @@ -18224,7 +18226,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins #ifdef TARGET_AMD64 || ins == INS_movsxd #endif - ) + ) { result.insLatency += PERFSCORE_LATENCY_2C; } diff --git a/src/coreclr/jit/emitxarch.h b/src/coreclr/jit/emitxarch.h index 4554a892201f9..e32cab66254fe 100644 --- a/src/coreclr/jit/emitxarch.h +++ b/src/coreclr/jit/emitxarch.h @@ -93,7 +93,7 @@ code_t emitExtractEvexPrefix(instruction ins, code_t& code) const; unsigned insEncodeReg012(const instrDesc* id, regNumber reg, emitAttr size, code_t* code); unsigned insEncodeReg345(const instrDesc* id, regNumber reg, emitAttr size, code_t* code); -code_t insEncodeReg3456(const instrDesc* id, regNumber reg, emitAttr size, code_t code); +code_t insEncodeReg3456(const instrDesc* id, regNumber reg, emitAttr size, code_t code); unsigned insEncodeRegSIB(const instrDesc* id, regNumber reg, code_t* code); code_t insEncodeMRreg(const instrDesc* id, code_t code); @@ -116,11 +116,11 @@ static bool IsKInstruction(instruction ins); static regNumber getBmiRegNumber(instruction ins); static regNumber getSseShiftRegNumber(instruction ins); -bool HasVexEncoding(instruction ins) const; -bool HasEvexEncoding(instruction ins) const; -bool IsVexEncodableInstruction(instruction ins) const; -bool IsEvexEncodableInstruction(instruction ins) const; -bool IsVexOrEvexEncodableInstruction(instruction ins) const; +bool HasVexEncoding(instruction ins) const; +bool HasEvexEncoding(instruction ins) const; +bool IsVexEncodableInstruction(instruction ins) const; +bool IsEvexEncodableInstruction(instruction ins) const; +bool IsVexOrEvexEncodableInstruction(instruction ins) const; code_t insEncodeMIreg(const instrDesc* id, regNumber reg, emitAttr size, code_t code); @@ -130,15 +130,15 @@ code_t AddRexXPrefix(const instrDesc* id, code_t code); code_t AddRexBPrefix(const instrDesc* id, code_t code); code_t AddRexPrefix(instruction ins, code_t code); -bool EncodedBySSE38orSSE3A(instruction ins) const; -bool Is4ByteSSEInstruction(instruction ins) const; +bool EncodedBySSE38orSSE3A(instruction ins) const; +bool Is4ByteSSEInstruction(instruction ins) const; code_t AddEvexVPrimePrefix(code_t code); code_t AddEvexRPrimePrefix(code_t code); static bool IsMovInstruction(instruction ins); -bool HasSideEffect(instruction ins, emitAttr size); -bool IsRedundantMov( - instruction ins, insFormat fmt, emitAttr size, regNumber dst, regNumber src, bool canIgnoreSideEffects); +bool HasSideEffect(instruction ins, emitAttr size); +bool IsRedundantMov( + instruction ins, insFormat fmt, emitAttr size, regNumber dst, regNumber src, bool canIgnoreSideEffects); bool EmitMovsxAsCwde(instruction ins, emitAttr size, regNumber dst, regNumber src); bool IsRedundantStackMov(instruction ins, insFormat fmt, emitAttr size, regNumber ireg, int varx, int offs); @@ -478,15 +478,15 @@ void SetContainsCallNeedingVzeroupper(bool value) containsCallNeedingVzeroupper = value; } -bool IsDstDstSrcAVXInstruction(instruction ins) const; -bool IsDstSrcSrcAVXInstruction(instruction ins) const; -bool IsThreeOperandAVXInstruction(instruction ins) const; +bool IsDstDstSrcAVXInstruction(instruction ins) const; +bool IsDstSrcSrcAVXInstruction(instruction ins) const; +bool IsThreeOperandAVXInstruction(instruction ins) const; static bool HasRegularWideForm(instruction ins); static bool HasRegularWideImmediateForm(instruction ins); static bool DoesWriteZeroFlag(instruction ins); static bool DoesWriteSignFlag(instruction ins); static bool DoesResetOverflowAndCarryFlags(instruction ins); -bool IsFlagsAlwaysModified(instrDesc* id); +bool IsFlagsAlwaysModified(instrDesc* id); static bool IsRexW0Instruction(instruction ins); static bool IsRexW1Instruction(instruction ins); static bool IsRexWXInstruction(instruction ins); @@ -528,7 +528,7 @@ const char* emitZMMregName(unsigned reg) const; /************************************************************************/ private: -void emitSetAmdDisp(instrDescAmd* id, ssize_t dsp); +void emitSetAmdDisp(instrDescAmd* id, ssize_t dsp); instrDesc* emitNewInstrAmd(emitAttr attr, ssize_t dsp); instrDesc* emitNewInstrAmdCns(emitAttr attr, ssize_t dsp, int cns); @@ -545,9 +545,9 @@ instrDesc* emitNewInstrCallInd(int argCnt, regMaskTP byrefRegs, emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize)); -void emitGetInsCns(const instrDesc* id, CnsVal* cv) const; +void emitGetInsCns(const instrDesc* id, CnsVal* cv) const; ssize_t emitGetInsAmdCns(const instrDesc* id, CnsVal* cv) const; -void emitGetInsDcmCns(const instrDesc* id, CnsVal* cv) const; +void emitGetInsDcmCns(const instrDesc* id, CnsVal* cv) const; ssize_t emitGetInsAmdAny(const instrDesc* id) const; /************************************************************************/ @@ -580,10 +580,10 @@ size_t emitSizeOfInsDsc_NONE(instrDesc* id) const; size_t emitSizeOfInsDsc_SPEC(instrDesc* id) const; /***************************************************************************** -* -* Convert between an index scale in bytes to a smaller encoding used for -* storage in instruction descriptors. -*/ + * + * Convert between an index scale in bytes to a smaller encoding used for + * storage in instruction descriptors. + */ inline emitter::opSize emitEncodeScale(size_t scale) { @@ -752,9 +752,9 @@ void emitIns_I_AI(instruction ins, emitAttr attr, int val, ssize_t disp); void emitIns_R_AR(instruction ins, emitAttr attr, regNumber reg, regNumber base, int disp); -void emitIns_R_AI(instruction ins, - emitAttr attr, - regNumber ireg, +void emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber ireg, ssize_t disp DEBUGARG(size_t targetHandle = 0) DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY)); void emitIns_AR_R(instruction ins, emitAttr attr, regNumber reg, regNumber base, cnsval_ssize_t disp); diff --git a/src/coreclr/jit/error.cpp b/src/coreclr/jit/error.cpp index a45ad7c7df0ef..5ae6cea056efe 100644 --- a/src/coreclr/jit/error.cpp +++ b/src/coreclr/jit/error.cpp @@ -250,7 +250,9 @@ void debugError(const char* msg, const char* file, unsigned line) } /*****************************************************************************/ -LogEnv::LogEnv(ICorJitInfo* aCompHnd) : compHnd(aCompHnd), compiler(nullptr) +LogEnv::LogEnv(ICorJitInfo* aCompHnd) + : compHnd(aCompHnd) + , compiler(nullptr) { } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 5b4fcd33f8e21..a650ae437fccc 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -899,7 +899,10 @@ BasicBlock* Compiler::fgLookupBB(unsigned addr) class FgStack { public: - FgStack() : slot0(SLOT_INVALID), slot1(SLOT_INVALID), depth(0) + FgStack() + : slot0(SLOT_INVALID) + , slot1(SLOT_INVALID) + , depth(0) { // Empty } @@ -3122,7 +3125,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F codeAddr += sizeof(__int8); goto DECODE_OPCODE; - /* Check to see if we have a jump/return opcode */ + /* Check to see if we have a jump/return opcode */ case CEE_BRFALSE: case CEE_BRFALSE_S: @@ -3305,7 +3308,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F // statement in the block. // Otherwise, we will assert at the following line in fgMorphCall() // noway_assert(fgMorphStmt->GetNextStmt() == NULL); - ) + ) { // Neither .tailcall prefix, no tailcall stress. So move on. break; @@ -4030,11 +4033,11 @@ void Compiler::fgFindBasicBlocks() #endif } -/* Init ebdHandlerNestingLevel of current clause, and bump up value for all - * enclosed clauses (which have to be before it in the table). - * Innermost try-finally blocks must precede outermost - * try-finally blocks. - */ + /* Init ebdHandlerNestingLevel of current clause, and bump up value for all + * enclosed clauses (which have to be before it in the table). + * Innermost try-finally blocks must precede outermost + * try-finally blocks. + */ #if !defined(FEATURE_EH_FUNCLETS) HBtab->ebdHandlerNestingLevel = 0; @@ -5917,8 +5920,8 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r } else { - assert(fgFirstFuncletBB != - insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the funclet region. + assert(fgFirstFuncletBB != insertAfterBlk->Next()); // We insert at the end, not at the beginning, of the + // funclet region. } #ifdef DEBUG @@ -6244,8 +6247,8 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, noway_assert(startBlk != nullptr); noway_assert(startBlk != endBlk); noway_assert((regionIndex == 0 && putInTryRegion) || // Search in the main method - (putInTryRegion && regionIndex > 0 && - startBlk->bbTryIndex == regionIndex) || // Search in the specified try region + (putInTryRegion && regionIndex > 0 && startBlk->bbTryIndex == regionIndex) || // Search in the + // specified try region (!putInTryRegion && regionIndex > 0 && startBlk->bbHndIndex == regionIndex)); // Search in the specified handler region diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index f49f863b16dc5..e2af55f45ac82 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -389,7 +389,7 @@ const char* ConvertToUtf8(LPCWSTR wideString, CompAllocator& allocator) return alloc; } -} +} // namespace #endif //------------------------------------------------------------------------ @@ -546,7 +546,7 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePositi ONE_FILE_PER_METHOD:; -#define FILENAME_PATTERN "%s-%s-%s-%s.%s" +#define FILENAME_PATTERN "%s-%s-%s-%s.%s" #define FILENAME_PATTERN_WITH_NUMBER "%s-%s-%s-%s~%d.%s" const size_t MaxFileNameLength = MAX_PATH_FNAME - 20 /* give us some extra buffer */; @@ -1249,7 +1249,10 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) public: RegionGraph(Compiler* comp, unsigned* blkMap, unsigned blkMapSize) - : m_comp(comp), m_rgnRoot(nullptr), m_blkMap(blkMap), m_blkMapSize(blkMapSize) + : m_comp(comp) + , m_rgnRoot(nullptr) + , m_blkMap(blkMap) + , m_blkMapSize(blkMapSize) { // Create a root region that encompasses the whole function. m_rgnRoot = @@ -2642,7 +2645,8 @@ void Compiler::fgStress64RsltMul() class BBPredsChecker { public: - BBPredsChecker(Compiler* compiler) : comp(compiler) + BBPredsChecker(Compiler* compiler) + : comp(compiler) { } @@ -3240,7 +3244,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef #ifndef JIT32_GCENCODER copiedForGenericsCtxt = ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0); #else // JIT32_GCENCODER - copiedForGenericsCtxt = false; + copiedForGenericsCtxt = false; #endif // JIT32_GCENCODER // This if only in support of the noway_asserts it contains. @@ -3284,7 +3288,8 @@ void Compiler::fgDebugCheckTypes(GenTree* tree) DoPostOrder = true, }; - NodeTypeValidator(Compiler* comp) : GenTreeVisitor(comp) + NodeTypeValidator(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -3733,7 +3738,9 @@ void Compiler::fgDebugCheckLinkedLocals() UseExecutionOrder = true, }; - DebugLocalSequencer(Compiler* comp) : GenTreeVisitor(comp), m_locals(comp->getAllocator(CMK_DebugOnly)) + DebugLocalSequencer(Compiler* comp) + : GenTreeVisitor(comp) + , m_locals(comp->getAllocator(CMK_DebugOnly)) { } @@ -4014,7 +4021,9 @@ class UniquenessCheckWalker { public: UniquenessCheckWalker(Compiler* comp) - : comp(comp), nodesVecTraits(comp->compGenTreeID, comp), uniqueNodes(BitVecOps::MakeEmpty(&nodesVecTraits)) + : comp(comp) + , nodesVecTraits(comp->compGenTreeID, comp) + , uniqueNodes(BitVecOps::MakeEmpty(&nodesVecTraits)) { } @@ -4132,11 +4141,15 @@ class SsaCheckVisitor : public GenTreeVisitor unsigned m_ssaNum; public: - SsaKey() : m_lclNum(BAD_VAR_NUM), m_ssaNum(SsaConfig::RESERVED_SSA_NUM) + SsaKey() + : m_lclNum(BAD_VAR_NUM) + , m_ssaNum(SsaConfig::RESERVED_SSA_NUM) { } - SsaKey(unsigned lclNum, unsigned ssaNum) : m_lclNum(lclNum), m_ssaNum(ssaNum) + SsaKey(unsigned lclNum, unsigned ssaNum) + : m_lclNum(lclNum) + , m_ssaNum(ssaNum) { } @@ -4773,13 +4786,15 @@ void Compiler::fgDebugCheckFlowGraphAnnotations() return; } - unsigned count = - fgRunDfs([](BasicBlock* block, unsigned preorderNum) { assert(block->bbPreorderNum == preorderNum); }, - [=](BasicBlock* block, unsigned postorderNum) { - assert(block->bbPostorderNum == postorderNum); - assert(m_dfsTree->GetPostOrder(postorderNum) == block); - }, - [](BasicBlock* block, BasicBlock* succ) {}); + unsigned count = fgRunDfs( + [](BasicBlock* block, unsigned preorderNum) { + assert(block->bbPreorderNum == preorderNum); + }, + [=](BasicBlock* block, unsigned postorderNum) { + assert(block->bbPostorderNum == postorderNum); + assert(m_dfsTree->GetPostOrder(postorderNum) == block); + }, + [](BasicBlock* block, BasicBlock* succ) {}); assert(m_dfsTree->GetPostOrderCount() == count); diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index a437a3da128d4..0e1ce24c39ed8 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -1867,11 +1867,15 @@ PhaseStatus Compiler::fgTailMergeThrows() BasicBlock* m_block; GenTreeCall* m_call; - ThrowHelper() : m_block(nullptr), m_call(nullptr) + ThrowHelper() + : m_block(nullptr) + , m_call(nullptr) { } - ThrowHelper(BasicBlock* block, GenTreeCall* call) : m_block(block), m_call(call) + ThrowHelper(BasicBlock* block, GenTreeCall* call) + : m_block(block) + , m_call(call) { } diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 8ea17f2bff05a..ba5ed96610dd3 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -214,7 +214,8 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitor( [](Param* pParam) { - // Init the local var info of the inlinee - pParam->pThis->impInlineInitVars(pParam->inlineInfo); + // Init the local var info of the inlinee + pParam->pThis->impInlineInitVars(pParam->inlineInfo); - if (pParam->inlineInfo->inlineResult->IsCandidate()) - { - /* Clear the temp table */ - memset(pParam->inlineInfo->lclTmpNum, -1, sizeof(pParam->inlineInfo->lclTmpNum)); + if (pParam->inlineInfo->inlineResult->IsCandidate()) + { + /* Clear the temp table */ + memset(pParam->inlineInfo->lclTmpNum, -1, sizeof(pParam->inlineInfo->lclTmpNum)); - // - // Prepare the call to jitNativeCode - // + // + // Prepare the call to jitNativeCode + // - pParam->inlineInfo->InlinerCompiler = pParam->pThis; - if (pParam->pThis->impInlineInfo == nullptr) - { - pParam->inlineInfo->InlineRoot = pParam->pThis; - } - else - { - pParam->inlineInfo->InlineRoot = pParam->pThis->impInlineInfo->InlineRoot; - } + pParam->inlineInfo->InlinerCompiler = pParam->pThis; + if (pParam->pThis->impInlineInfo == nullptr) + { + pParam->inlineInfo->InlineRoot = pParam->pThis; + } + else + { + pParam->inlineInfo->InlineRoot = pParam->pThis->impInlineInfo->InlineRoot; + } - // The inline context is part of debug info and must be created - // before we start creating statements; we lazily create it as - // late as possible, which is here. - pParam->inlineInfo->inlineContext = - pParam->inlineInfo->InlineRoot->m_inlineStrategy - ->NewContext(pParam->inlineInfo->inlineCandidateInfo->inlinersContext, - pParam->inlineInfo->iciStmt, pParam->inlineInfo->iciCall); - pParam->inlineInfo->argCnt = pParam->inlineCandidateInfo->methInfo.args.totalILArgs(); - pParam->inlineInfo->tokenLookupContextHandle = pParam->inlineCandidateInfo->exactContextHnd; - - JITLOG_THIS(pParam->pThis, - (LL_INFO100000, "INLINER: inlineInfo.tokenLookupContextHandle for %s set to 0x%p:\n", - pParam->pThis->eeGetMethodFullName(pParam->fncHandle), - pParam->pThis->dspPtr(pParam->inlineInfo->tokenLookupContextHandle))); - - JitFlags compileFlagsForInlinee = *pParam->pThis->opts.jitFlags; - - // The following flags are lost when inlining. - // (This is checked in Compiler::compInitOptions().) - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR_IF_LOOPS); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_PROF_ENTERLEAVE); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_EnC); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_REVERSE_PINVOKE); - compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_TRACK_TRANSITIONS); + // The inline context is part of debug info and must be created + // before we start creating statements; we lazily create it as + // late as possible, which is here. + pParam->inlineInfo->inlineContext = + pParam->inlineInfo->InlineRoot->m_inlineStrategy + ->NewContext(pParam->inlineInfo->inlineCandidateInfo->inlinersContext, pParam->inlineInfo->iciStmt, + pParam->inlineInfo->iciCall); + pParam->inlineInfo->argCnt = pParam->inlineCandidateInfo->methInfo.args.totalILArgs(); + pParam->inlineInfo->tokenLookupContextHandle = pParam->inlineCandidateInfo->exactContextHnd; + + JITLOG_THIS(pParam->pThis, + (LL_INFO100000, "INLINER: inlineInfo.tokenLookupContextHandle for %s set to 0x%p:\n", + pParam->pThis->eeGetMethodFullName(pParam->fncHandle), + pParam->pThis->dspPtr(pParam->inlineInfo->tokenLookupContextHandle))); + + JitFlags compileFlagsForInlinee = *pParam->pThis->opts.jitFlags; + + // The following flags are lost when inlining. + // (This is checked in Compiler::compInitOptions().) + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR_IF_LOOPS); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_PROF_ENTERLEAVE); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_EnC); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_REVERSE_PINVOKE); + compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_TRACK_TRANSITIONS); #ifdef DEBUG - if (pParam->pThis->verbose) - { - printf("\nInvoking compiler for the inlinee method %s :\n", - pParam->pThis->eeGetMethodFullName(pParam->fncHandle)); - } + if (pParam->pThis->verbose) + { + printf("\nInvoking compiler for the inlinee method %s :\n", + pParam->pThis->eeGetMethodFullName(pParam->fncHandle)); + } #endif // DEBUG - int result = - jitNativeCode(pParam->fncHandle, pParam->inlineCandidateInfo->methInfo.scope, - pParam->pThis->info.compCompHnd, &pParam->inlineCandidateInfo->methInfo, - (void**)pParam->inlineInfo, nullptr, &compileFlagsForInlinee, pParam->inlineInfo); + int result = + jitNativeCode(pParam->fncHandle, pParam->inlineCandidateInfo->methInfo.scope, + pParam->pThis->info.compCompHnd, &pParam->inlineCandidateInfo->methInfo, + (void**)pParam->inlineInfo, nullptr, &compileFlagsForInlinee, pParam->inlineInfo); - if (result != CORJIT_OK) - { - // If we haven't yet determined why this inline fails, use - // a catch-all something bad happened observation. - InlineResult* innerInlineResult = pParam->inlineInfo->inlineResult; + if (result != CORJIT_OK) + { + // If we haven't yet determined why this inline fails, use + // a catch-all something bad happened observation. + InlineResult* innerInlineResult = pParam->inlineInfo->inlineResult; - if (!innerInlineResult->IsFailure()) - { - innerInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_FAILURE); - } + if (!innerInlineResult->IsFailure()) + { + innerInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_FAILURE); } } - }, + } + }, ¶m); if (!success) { diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 2df8d2feeda45..9a98ea7f619cd 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -731,7 +731,6 @@ PhaseStatus Compiler::fgPostImportationCleanup() // auto addConditionalFlow = [this, entryStateVar, &entryJumpTarget, &addedBlocks](BasicBlock* fromBlock, BasicBlock* toBlock) { - // We may have previously though this try entry was unreachable, but now we're going to // step through it on the way to the OSR entry. So ensure it has plausible profile weight. // @@ -2600,7 +2599,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) assert(block->TargetIs(target)); /* Update bbRefs and bbNum - Conditional predecessors to the same - * block are counted twice so we have to remove one of them */ + * block are counted twice so we have to remove one of them */ noway_assert(target->countOfInEdges() > 1); fgRemoveRefPred(block->GetTargetEdge()); @@ -3969,8 +3968,8 @@ bool Compiler::fgReorderBlocks(bool useProfile) bNext = bEnd->Next(); bool connected_bDest = false; - if ((backwardBranch && !isRare) || - block->HasFlag(BBF_DONT_REMOVE)) // Don't choose option #1 when block is the start of a try region + if ((backwardBranch && !isRare) || block->HasFlag(BBF_DONT_REMOVE)) // Don't choose option #1 when block is the + // start of a try region { bStart = nullptr; bEnd = nullptr; @@ -4779,11 +4778,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh continue; } - /* We jump to the REPEAT label if we performed a change involving the current block - * This is in case there are other optimizations that can show up - * (e.g. - compact 3 blocks in a row) - * If nothing happens, we then finish the iteration and move to the next block - */ + /* We jump to the REPEAT label if we performed a change involving the current block + * This is in case there are other optimizations that can show up + * (e.g. - compact 3 blocks in a row) + * If nothing happens, we then finish the iteration and move to the next block + */ REPEAT:; @@ -5364,12 +5363,13 @@ unsigned Compiler::fgMeasureIR() { for (Statement* const stmt : block->Statements()) { - fgWalkTreePre(stmt->GetRootNodePointer(), - [](GenTree** slot, fgWalkData* data) -> Compiler::fgWalkResult { - (*reinterpret_cast(data->pCallbackData))++; - return Compiler::WALK_CONTINUE; - }, - &nodeCount); + fgWalkTreePre( + stmt->GetRootNodePointer(), + [](GenTree** slot, fgWalkData* data) -> Compiler::fgWalkResult { + (*reinterpret_cast(data->pCallbackData))++; + return Compiler::WALK_CONTINUE; + }, + &nodeCount); } } else @@ -5444,7 +5444,9 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) struct PredInfo { - PredInfo(BasicBlock* block, Statement* stmt) : m_block(block), m_stmt(stmt) + PredInfo(BasicBlock* block, Statement* stmt) + : m_block(block) + , m_stmt(stmt) { } BasicBlock* m_block; @@ -5750,7 +5752,6 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) }; auto iterateTailMerge = [&](BasicBlock* block) -> void { - int numOpts = 0; while (tailMerge(block)) @@ -5968,7 +5969,8 @@ bool Compiler::gtTreeContainsTailCall(GenTree* tree) DoPreOrder = true }; - HasTailCallCandidateVisitor(Compiler* comp) : GenTreeVisitor(comp) + HasTailCallCandidateVisitor(Compiler* comp) + : GenTreeVisitor(comp) { } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index d53abf356150c..9fa4e7273862a 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -309,7 +309,11 @@ class Instrumentor bool m_modifiedFlow; protected: - Instrumentor(Compiler* comp) : m_comp(comp), m_schemaCount(0), m_instrCount(0), m_modifiedFlow(false) + Instrumentor(Compiler* comp) + : m_comp(comp) + , m_schemaCount(0) + , m_instrCount(0) + , m_modifiedFlow(false) { } @@ -360,7 +364,8 @@ class Instrumentor class NonInstrumentor : public Instrumentor { public: - NonInstrumentor(Compiler* comp) : Instrumentor(comp) + NonInstrumentor(Compiler* comp) + : Instrumentor(comp) { } }; @@ -376,7 +381,9 @@ class BlockCountInstrumentor : public Instrumentor BasicBlock* m_entryBlock; public: - BlockCountInstrumentor(Compiler* comp) : Instrumentor(comp), m_entryBlock(nullptr) + BlockCountInstrumentor(Compiler* comp) + : Instrumentor(comp) + , m_entryBlock(nullptr) { } bool ShouldProcess(BasicBlock* block) override @@ -566,8 +573,8 @@ void BlockCountInstrumentor::BuildSchemaElements(BasicBlock* block, Schema& sche schemaElem.InstrumentationKind = m_comp->opts.compCollect64BitCounts ? ICorJitInfo::PgoInstrumentationKind::BasicBlockLongCount : ICorJitInfo::PgoInstrumentationKind::BasicBlockIntCount; - schemaElem.ILOffset = offset; - schemaElem.Offset = 0; + schemaElem.ILOffset = offset; + schemaElem.Offset = 0; schema.push_back(schemaElem); @@ -841,9 +848,9 @@ class SpanningTreeVisitor Duplicate }; - virtual void Badcode() = 0; - virtual void VisitBlock(BasicBlock* block) = 0; - virtual void VisitTreeEdge(BasicBlock* source, BasicBlock* target) = 0; + virtual void Badcode() = 0; + virtual void VisitBlock(BasicBlock* block) = 0; + virtual void VisitTreeEdge(BasicBlock* source, BasicBlock* target) = 0; virtual void VisitNonTreeEdge(BasicBlock* source, BasicBlock* target, EdgeKind kind) = 0; }; @@ -1239,7 +1246,9 @@ static int32_t EfficientEdgeCountBlockToKey(BasicBlock* block) // Based on "Optimally Profiling and Tracing Programs," // Ball and Larus PLDI '92. // -class EfficientEdgeCountInstrumentor : public Instrumentor, public SpanningTreeVisitor +class EfficientEdgeCountInstrumentor + : public Instrumentor + , public SpanningTreeVisitor { private: // A particular edge probe. These are linked @@ -1753,8 +1762,8 @@ void EfficientEdgeCountInstrumentor::BuildSchemaElements(BasicBlock* block, Sche schemaElem.InstrumentationKind = m_comp->opts.compCollect64BitCounts ? ICorJitInfo::PgoInstrumentationKind::EdgeLongCount : ICorJitInfo::PgoInstrumentationKind::EdgeIntCount; - schemaElem.ILOffset = sourceKey; - schemaElem.Offset = 0; + schemaElem.ILOffset = sourceKey; + schemaElem.Offset = 0; schema.push_back(schemaElem); @@ -1903,7 +1912,9 @@ class HandleHistogramProbeVisitor final : public GenTreeVisitor(compiler), m_functor(functor), m_compiler(compiler) + : GenTreeVisitor(compiler) + , m_functor(functor) + , m_compiler(compiler) { } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) @@ -1935,7 +1946,9 @@ class ValueHistogramProbeVisitor final : public GenTreeVisitor(compiler), m_functor(functor), m_compiler(compiler) + : GenTreeVisitor(compiler) + , m_functor(functor) + , m_compiler(compiler) { } @@ -1965,7 +1978,8 @@ class BuildHandleHistogramProbeSchemaGen public: BuildHandleHistogramProbeSchemaGen(Schema& schema, unsigned& schemaCount) - : m_schema(schema), m_schemaCount(schemaCount) + : m_schema(schema) + , m_schemaCount(schemaCount) { } @@ -2003,8 +2017,8 @@ class BuildHandleHistogramProbeSchemaGen schemaElem.InstrumentationKind = compiler->opts.compCollect64BitCounts ? ICorJitInfo::PgoInstrumentationKind::HandleHistogramLongCount : ICorJitInfo::PgoInstrumentationKind::HandleHistogramIntCount; - schemaElem.ILOffset = (int32_t)call->gtHandleHistogramProfileCandidateInfo->ilOffset; - schemaElem.Offset = 0; + schemaElem.ILOffset = (int32_t)call->gtHandleHistogramProfileCandidateInfo->ilOffset; + schemaElem.Offset = 0; m_schema.push_back(schemaElem); @@ -2013,7 +2027,7 @@ class BuildHandleHistogramProbeSchemaGen // Re-using ILOffset and Other fields from schema item for TypeHandleHistogramCount schemaElem.InstrumentationKind = isTypeHistogram ? ICorJitInfo::PgoInstrumentationKind::HandleHistogramTypes : ICorJitInfo::PgoInstrumentationKind::HandleHistogramMethods; - schemaElem.Count = ICorJitInfo::HandleHistogram32::SIZE; + schemaElem.Count = ICorJitInfo::HandleHistogram32::SIZE; m_schema.push_back(schemaElem); m_schemaCount++; @@ -2027,7 +2041,8 @@ class BuildValueHistogramProbeSchemaGen public: BuildValueHistogramProbeSchemaGen(Schema& schema, unsigned& schemaCount) - : m_schema(schema), m_schemaCount(schemaCount) + : m_schema(schema) + , m_schemaCount(schemaCount) { } @@ -2036,8 +2051,8 @@ class BuildValueHistogramProbeSchemaGen ICorJitInfo::PgoInstrumentationSchema schemaElem = {}; schemaElem.Count = 1; schemaElem.InstrumentationKind = compiler->opts.compCollect64BitCounts - ? ICorJitInfo::PgoInstrumentationKind::ValueHistogramLongCount - : ICorJitInfo::PgoInstrumentationKind::ValueHistogramIntCount; + ? ICorJitInfo::PgoInstrumentationKind::ValueHistogramLongCount + : ICorJitInfo::PgoInstrumentationKind::ValueHistogramIntCount; schemaElem.ILOffset = (int32_t)call->AsCall()->gtHandleHistogramProfileCandidateInfo->ilOffset; m_schema.push_back(schemaElem); m_schemaCount++; @@ -2332,7 +2347,8 @@ class ValueHistogramProbeInserter class HandleHistogramProbeInstrumentor : public Instrumentor { public: - HandleHistogramProbeInstrumentor(Compiler* comp) : Instrumentor(comp) + HandleHistogramProbeInstrumentor(Compiler* comp) + : Instrumentor(comp) { } bool ShouldProcess(BasicBlock* block) override @@ -2350,7 +2366,8 @@ class HandleHistogramProbeInstrumentor : public Instrumentor class ValueInstrumentor : public Instrumentor { public: - ValueInstrumentor(Compiler* comp) : Instrumentor(comp) + ValueInstrumentor(Compiler* comp) + : Instrumentor(comp) { } bool ShouldProcess(BasicBlock* block) override @@ -2727,7 +2744,7 @@ PhaseStatus Compiler::fgInstrumentMethod() // uint8_t* profileMemory; HRESULT res = info.compCompHnd->allocPgoInstrumentationBySchema(info.compMethodHnd, schema.data(), - (UINT32)schema.size(), &profileMemory); + (UINT32)schema.size(), &profileMemory); // Deal with allocation failures. // @@ -3102,7 +3119,7 @@ class EfficientEdgeCountReconstructor : public SpanningTreeVisitor // Map correlating block keys to blocks. // typedef JitHashTable, BasicBlock*> KeyToBlockMap; - KeyToBlockMap m_keyToBlockMap; + KeyToBlockMap m_keyToBlockMap; // Key for finding an edge based on schema info. // @@ -3111,7 +3128,9 @@ class EfficientEdgeCountReconstructor : public SpanningTreeVisitor int32_t const m_sourceKey; int32_t const m_targetKey; - EdgeKey(int32_t sourceKey, int32_t targetKey) : m_sourceKey(sourceKey), m_targetKey(targetKey) + EdgeKey(int32_t sourceKey, int32_t targetKey) + : m_sourceKey(sourceKey) + , m_targetKey(targetKey) { } @@ -3159,7 +3178,7 @@ class EfficientEdgeCountReconstructor : public SpanningTreeVisitor // Map for correlating EdgeIntCount schema entries with edges // typedef JitHashTable EdgeKeyToEdgeMap; - EdgeKeyToEdgeMap m_edgeKeyToEdgeMap; + EdgeKeyToEdgeMap m_edgeKeyToEdgeMap; // Per block data // @@ -3519,8 +3538,9 @@ void EfficientEdgeCountReconstructor::Solve() // if (m_badcode || m_mismatch || m_allWeightsZero) { - JITDUMP("... not solving because of the %s\n", - m_badcode ? "badcode" : m_allWeightsZero ? "zero counts" : "mismatch"); + JITDUMP("... not solving because of the %s\n", m_badcode ? "badcode" + : m_allWeightsZero ? "zero counts" + : "mismatch"); return; } diff --git a/src/coreclr/jit/fgprofilesynthesis.h b/src/coreclr/jit/fgprofilesynthesis.h index 216bd58297286..e2e7c58cbac4f 100644 --- a/src/coreclr/jit/fgprofilesynthesis.h +++ b/src/coreclr/jit/fgprofilesynthesis.h @@ -40,7 +40,8 @@ class ProfileSynthesis static constexpr weight_t epsilon = 0.001; private: - ProfileSynthesis(Compiler* compiler) : m_comp(compiler) + ProfileSynthesis(Compiler* compiler) + : m_comp(compiler) { } diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 691fa5ef349f2..96e0a3e785f1f 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -1063,7 +1063,7 @@ GenTree* Compiler::fgOptimizeDelegateConstructor(GenTreeCall* call, &genericLookup); GenTree* ctxTree = getRuntimeContextTree(pLookup.lookupKind.runtimeLookupKind); call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_DELEGATE_CTOR, TYP_VOID, thisPointer, - targetObjPointers, ctxTree); + targetObjPointers, ctxTree); call->setEntryPoint(genericLookup); } } @@ -1647,8 +1647,8 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) // try/finally, which must be the last EH region. EHblkDsc* ehDsc = ehGetDsc(tryIndex); - assert(ehDsc->ebdEnclosingTryIndex == - EHblkDsc::NO_ENCLOSING_INDEX); // There are no enclosing regions of the BBJ_RETURN block + assert(ehDsc->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX); // There are no enclosing regions of the + // BBJ_RETURN block assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. @@ -1823,7 +1823,8 @@ class MergedReturns bool mergingReturns = false; public: - MergedReturns(Compiler* comp) : comp(comp) + MergedReturns(Compiler* comp) + : comp(comp) { comp->fgReturnCount = 0; } @@ -2266,7 +2267,7 @@ class MergedReturns return nullptr; } }; -} +} // namespace //------------------------------------------------------------------------ // fgAddInternal: add blocks and trees to express special method semantics @@ -2325,7 +2326,7 @@ PhaseStatus Compiler::fgAddInternal() #ifndef JIT32_GCENCODER lva0CopiedForGenericsCtxt = ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0); #else // JIT32_GCENCODER - lva0CopiedForGenericsCtxt = false; + lva0CopiedForGenericsCtxt = false; #endif // JIT32_GCENCODER noway_assert(lva0CopiedForGenericsCtxt || !lvaTable[info.compThisArg].IsAddressExposed()); noway_assert(!lvaTable[info.compThisArg].lvHasILStoreOp); @@ -3589,7 +3590,9 @@ GenTree* Compiler::fgSetTreeSeq(GenTree* tree, bool isLIR) }; SetTreeSeqVisitor(Compiler* compiler, GenTree* tree, bool isLIR) - : GenTreeVisitor(compiler), m_prevNode(tree), m_isLIR(isLIR) + : GenTreeVisitor(compiler) + , m_prevNode(tree) + , m_isLIR(isLIR) { INDEBUG(tree->gtSeqNum = 0); } @@ -3677,7 +3680,8 @@ PhaseStatus Compiler::fgSetBlockOrder() class GCSafePointSuccessorEnumerator { BasicBlock* m_block; - union { + union + { BasicBlock* m_successors[2]; BasicBlock** m_pSuccessors; }; @@ -3688,7 +3692,8 @@ class GCSafePointSuccessorEnumerator public: // Constructs an enumerator of successors to be used for checking for GC // safe point cycles. - GCSafePointSuccessorEnumerator(Compiler* comp, BasicBlock* block) : m_block(block) + GCSafePointSuccessorEnumerator(Compiler* comp, BasicBlock* block) + : m_block(block) { m_numSuccs = 0; block->VisitRegularSuccs(comp, [this](BasicBlock* succ) { @@ -4203,7 +4208,9 @@ unsigned FlowGraphNaturalLoop::NumLoopBlocks() // dfs - A DFS tree. // FlowGraphNaturalLoops::FlowGraphNaturalLoops(const FlowGraphDfsTree* dfsTree) - : m_dfsTree(dfsTree), m_loops(m_dfsTree->GetCompiler()->getAllocator(CMK_Loops)), m_improperLoopHeaders(0) + : m_dfsTree(dfsTree) + , m_loops(m_dfsTree->GetCompiler()->getAllocator(CMK_Loops)) + , m_improperLoopHeaders(0) { } @@ -4838,7 +4845,9 @@ bool FlowGraphNaturalLoop::VisitDefs(TFunc func) DoPreOrder = true, }; - VisitDefsVisitor(Compiler* comp, TFunc& func) : GenTreeVisitor(comp), m_func(func) + VisitDefsVisitor(Compiler* comp, TFunc& func) + : GenTreeVisitor(comp) + , m_func(func) { } @@ -6089,7 +6098,9 @@ FlowGraphDominatorTree* FlowGraphDominatorTree::Build(const FlowGraphDfsTree* df public: NumberDomTreeVisitor(Compiler* comp, unsigned* preorderNums, unsigned* postorderNums) - : DomTreeVisitor(comp), m_preorderNums(preorderNums), m_postorderNums(postorderNums) + : DomTreeVisitor(comp) + , m_preorderNums(preorderNums) + , m_postorderNums(postorderNums) { } diff --git a/src/coreclr/jit/forwardsub.cpp b/src/coreclr/jit/forwardsub.cpp index 8e450d7cbb35e..de4ac5fe8a475 100644 --- a/src/coreclr/jit/forwardsub.cpp +++ b/src/coreclr/jit/forwardsub.cpp @@ -191,7 +191,9 @@ class ForwardSubVisitor final : public GenTreeVisitor UseExecutionOrder = true }; - ForwardSubVisitor(Compiler* compiler, unsigned lclNum) : GenTreeVisitor(compiler), m_lclNum(lclNum) + ForwardSubVisitor(Compiler* compiler, unsigned lclNum) + : GenTreeVisitor(compiler) + , m_lclNum(lclNum) { LclVarDsc* dsc = compiler->lvaGetDesc(m_lclNum); if (dsc->lvIsStructField) @@ -399,7 +401,9 @@ class EffectsVisitor final : public GenTreeVisitor UseExecutionOrder = true }; - EffectsVisitor(Compiler* compiler) : GenTreeVisitor(compiler), m_flags(GTF_EMPTY) + EffectsVisitor(Compiler* compiler) + : GenTreeVisitor(compiler) + , m_flags(GTF_EMPTY) { } diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp index d039cb3169379..9d521ebef799c 100644 --- a/src/coreclr/jit/gcencode.cpp +++ b/src/coreclr/jit/gcencode.cpp @@ -433,12 +433,13 @@ static void regenLog(unsigned encoding, InfoHdr* header, InfoHdr* state) EnterCriticalSection(&logFileLock); - fprintf(logFile, "InfoHdr( %2d, %2d, %1d, %1d, %1d," - " %1d, %1d, %1d, %1d, %1d," - " %1d, %1d, %1d, %1d, %1d, %1d," - " %1d, %1d, %1d," - " %1d, %2d, %2d," - " %2d, %2d, %2d, %2d, %2d, %2d), \n", + fprintf(logFile, + "InfoHdr( %2d, %2d, %1d, %1d, %1d," + " %1d, %1d, %1d, %1d, %1d," + " %1d, %1d, %1d, %1d, %1d, %1d," + " %1d, %1d, %1d," + " %1d, %2d, %2d," + " %2d, %2d, %2d, %2d, %2d, %2d), \n", state->prologSize, state->epilogSize, state->epilogCount, state->epilogAtEnd, state->ediSaved, state->esiSaved, state->ebxSaved, state->ebpSaved, state->ebpFrame, state->interruptible, state->doubleAlign, state->security, state->handlers, state->localloc, state->editNcontinue, state->varargs, @@ -1817,7 +1818,7 @@ static int (*zeroFunc)() = zeroFN; */ typedef unsigned pasMaskType; -#define BITS_IN_pasMask (BITS_PER_BYTE * sizeof(pasMaskType)) +#define BITS_IN_pasMask (BITS_PER_BYTE * sizeof(pasMaskType)) #define HIGHEST_pasMask_BIT (((pasMaskType)0x1) << (BITS_IN_pasMask - 1)) //----------------------------------------------------------------------------- @@ -1850,8 +1851,8 @@ class PendingArgsStack // Use these in the case where there actually are more ptrs than pasArgMask unsigned pasEnumGCoffsCount(); #define pasENUM_START ((unsigned)-1) -#define pasENUM_LAST ((unsigned)-2) -#define pasENUM_END ((unsigned)-3) +#define pasENUM_LAST ((unsigned)-2) +#define pasENUM_END ((unsigned)-3) unsigned pasEnumGCoffs(unsigned iter, unsigned* offs); protected: @@ -2331,7 +2332,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un unsigned varOffs = compiler->lvaTable[compiler->info.compThisArg].GetStackOffset(); /* For negative stack offsets we must reset the low bits, - * take abs and then set them back */ + * take abs and then set them back */ varOffs = abs(static_cast(varOffs)); varOffs |= this_OFFSET_FLAG; @@ -3285,7 +3286,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un assert(regMask || argMask || callArgCnt || pasStk.pasCurDepth()); -// Emit IPtrMask if needed + // Emit IPtrMask if needed #define CHK_NON_INTRPT_ESP_IPtrMask \ \ @@ -3571,7 +3572,7 @@ size_t GCInfo::gcInfoBlockHdrDump(const BYTE* table, InfoHdr* header, unsigned* #ifdef DEBUG gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM) #else - gcDump.gcPrintf = printf; + gcDump.gcPrintf = printf; #endif printf("Method info block:\n"); @@ -3590,7 +3591,7 @@ size_t GCInfo::gcDumpPtrTable(const BYTE* table, const InfoHdr& header, unsigned #ifdef DEBUG gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM) #else - gcDump.gcPrintf = printf; + gcDump.gcPrintf = printf; #endif return gcDump.DumpGCTable(table, header, methodSize, verifyGCTables); @@ -3608,7 +3609,7 @@ void GCInfo::gcFindPtrsInFrame(const void* infoBlock, const void* codeBlock, uns #ifdef DEBUG gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM) #else - gcDump.gcPrintf = printf; + gcDump.gcPrintf = printf; #endif gcDump.DumpPtrsInFrame((PTR_CBYTE)infoBlock, (const BYTE*)codeBlock, offs, verifyGCTables); @@ -3646,7 +3647,8 @@ class GcInfoEncoderWithLogging public: GcInfoEncoderWithLogging(GcInfoEncoder* gcInfoEncoder, bool verbose) - : m_gcInfoEncoder(gcInfoEncoder), m_doLogging(verbose INDEBUG(|| JitConfig.JitGCInfoLogging() != 0)) + : m_gcInfoEncoder(gcInfoEncoder) + , m_doLogging(verbose INDEBUG(|| JitConfig.JitGCInfoLogging() != 0)) { } @@ -4024,7 +4026,8 @@ struct InterruptibleRangeReporter Encoder* gcInfoEncoderWithLog; InterruptibleRangeReporter(unsigned _prevStart, Encoder* _gcInfo) - : prevStart(_prevStart), gcInfoEncoderWithLog(_gcInfo) + : prevStart(_prevStart) + , gcInfoEncoderWithLog(_gcInfo) { } @@ -4793,7 +4796,7 @@ void GCInfo::gcInfoRecordGCStackArgLive(GcInfoEncoder* gcInfoEncoder, MakeRegPtr StackSlotIdKey sskey(genStackPtr->rpdPtrArg, false, GcSlotFlags(genStackPtr->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE)); - GcSlotId varSlotId; + GcSlotId varSlotId; if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) { if (!m_stackSlotMap->Lookup(sskey, &varSlotId)) @@ -4841,8 +4844,8 @@ void GCInfo::gcInfoRecordGCStackArgsDead(GcInfoEncoder* gcInfoEncoder, StackSlotIdKey sskey(genRegPtrTemp->rpdPtrArg, false, genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE); - GcSlotId varSlotId; - bool b = m_stackSlotMap->Lookup(sskey, &varSlotId); + GcSlotId varSlotId; + bool b = m_stackSlotMap->Lookup(sskey, &varSlotId); assert(b); // Should have been added in the first pass. // Live until the call. gcInfoEncoderWithLog->SetSlotState(instrOffset, varSlotId, GC_SLOT_DEAD); diff --git a/src/coreclr/jit/gcinfo.cpp b/src/coreclr/jit/gcinfo.cpp index cb72d3e82ddeb..8045cd873260e 100644 --- a/src/coreclr/jit/gcinfo.cpp +++ b/src/coreclr/jit/gcinfo.cpp @@ -46,7 +46,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -GCInfo::GCInfo(Compiler* theCompiler) : compiler(theCompiler) +GCInfo::GCInfo(Compiler* theCompiler) + : compiler(theCompiler) { regSet = nullptr; gcVarPtrList = nullptr; diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 1063d84b75925..5125c3ad42fcf 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -21,13 +21,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX /*****************************************************************************/ const unsigned char GenTree::gtOperKindTable[] = { -#define GTNODE(en, st, cm, ivn, ok) ((ok)>K_MASK) + GTK_COMMUTE *cm, +#define GTNODE(en, st, cm, ivn, ok) ((ok) & GTK_MASK) + GTK_COMMUTE *cm, #include "gtlist.h" }; #ifdef DEBUG const GenTreeDebugOperKind GenTree::gtDebugOperKindTable[] = { -#define GTNODE(en, st, cm, ivn, ok) static_cast((ok)&DBK_MASK), +#define GTNODE(en, st, cm, ivn, ok) static_cast((ok) & DBK_MASK), #include "gtlist.h" }; #endif // DEBUG @@ -78,7 +78,8 @@ struct IndentStack const char** indents; // Constructor for IndentStack. Uses 'compiler' to determine the mode of printing. - IndentStack(Compiler* compiler) : stack(compiler->getAllocator(CMK_DebugOnly)) + IndentStack(Compiler* compiler) + : stack(compiler->getAllocator(CMK_DebugOnly)) { if (compiler->asciiTrees) { @@ -3233,7 +3234,8 @@ bool Compiler::gtHasLocalsWithAddrOp(GenTree* tree) DoLclVarsOnly = true, }; - LocalsWithAddrOpVisitor(Compiler* comp) : GenTreeVisitor(comp) + LocalsWithAddrOpVisitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -3274,7 +3276,8 @@ bool Compiler::gtHasAddressExposedLocals(GenTree* tree) DoLclVarsOnly = true, }; - Visitor(Compiler* comp) : GenTreeVisitor(comp) + Visitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -3363,7 +3366,7 @@ unsigned Compiler::gtHashValue(GenTree* tree) #ifdef HOST_64BIT add = bits; #else // 32-bit host - add = genTreeHashAdd(uhi32(bits), ulo32(bits)); + add = genTreeHashAdd(uhi32(bits), ulo32(bits)); #endif break; case GT_CNS_DBL: @@ -3373,7 +3376,7 @@ unsigned Compiler::gtHashValue(GenTree* tree) #ifdef HOST_64BIT add = bits; #else // 32-bit host - add = genTreeHashAdd(uhi32(bits), ulo32(bits)); + add = genTreeHashAdd(uhi32(bits), ulo32(bits)); #endif break; } @@ -6436,7 +6439,9 @@ bool Compiler::gtMayHaveStoreInterference(GenTree* treeWithStores, GenTree* tree DoPreOrder = true, }; - Visitor(Compiler* compiler, GenTree* readTree) : GenTreeVisitor(compiler), m_readTree(readTree) + Visitor(Compiler* compiler, GenTree* readTree) + : GenTreeVisitor(compiler) + , m_readTree(readTree) { } @@ -6497,7 +6502,9 @@ bool Compiler::gtTreeHasLocalRead(GenTree* tree, unsigned lclNum) unsigned m_lclNum; LclVarDsc* m_lclDsc; - Visitor(Compiler* compiler, unsigned lclNum) : GenTreeVisitor(compiler), m_lclNum(lclNum) + Visitor(Compiler* compiler, unsigned lclNum) + : GenTreeVisitor(compiler) + , m_lclNum(lclNum) { m_lclDsc = compiler->lvaGetDesc(lclNum); } @@ -7480,7 +7487,7 @@ GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper) switch (oper) { -// clang-format off + // clang-format off #define GTSTRUCT_0(nm, tag) /*handle explicitly*/ #define GTSTRUCT_1(nm, tag) \ @@ -7542,8 +7549,8 @@ GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper) } break; - // We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified - // in their proper subtype. Similarly for GenTreeIndir. + // We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified + // in their proper subtype. Similarly for GenTreeIndir. default: { @@ -9026,7 +9033,7 @@ GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg node->AsMultiRegOp()->gtOtherReg = REG_NEXT(argReg); } #else - node = gtNewOperNode(GT_PUTARG_REG, type, arg); + node = gtNewOperNode(GT_PUTARG_REG, type, arg); #endif node->SetRegNum(argReg); @@ -9057,7 +9064,7 @@ GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg) // A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers. node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr); #else - node = gtNewOperNode(GT_BITCAST, type, arg); + node = gtNewOperNode(GT_BITCAST, type, arg); #endif return node; @@ -9462,7 +9469,7 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree) tree->AsLclFld()->Data(), tree->AsLclFld()->GetLayout()); break; - /* These nodes sometimes get bashed to "fat" ones */ + /* These nodes sometimes get bashed to "fat" ones */ case GT_MUL: case GT_DIV: @@ -9844,7 +9851,9 @@ GenTreeCall* Compiler::gtCloneExprCallHelper(GenTreeCall* tree) copy->gtCallMoreFlags = tree->gtCallMoreFlags; INDEBUG(copy->gtCallDebugFlags = tree->gtCallDebugFlags); - copy->gtArgs.InternalCopyFrom(this, &tree->gtArgs, [=](GenTree* node) { return gtCloneExpr(node); }); + copy->gtArgs.InternalCopyFrom(this, &tree->gtArgs, [=](GenTree* node) { + return gtCloneExpr(node); + }); // The call sig comes from the EE and doesn't change throughout the compilation process, meaning // we only really need one physical copy of it. Therefore a shallow pointer copy will suffice. @@ -9994,7 +10003,8 @@ void Compiler::gtUpdateStmtSideEffects(Statement* stmt) DoPostOrder = true, }; - UpdateSideEffectsWalker(Compiler* comp) : GenTreeVisitor(comp) + UpdateSideEffectsWalker(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -10191,12 +10201,20 @@ bool GenTree::gtRequestSetFlags() } GenTreeUseEdgeIterator::GenTreeUseEdgeIterator() - : m_advance(nullptr), m_node(nullptr), m_edge(nullptr), m_statePtr(nullptr), m_state(-1) + : m_advance(nullptr) + , m_node(nullptr) + , m_edge(nullptr) + , m_statePtr(nullptr) + , m_state(-1) { } GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node) - : m_advance(nullptr), m_node(node), m_edge(nullptr), m_statePtr(nullptr), m_state(0) + : m_advance(nullptr) + , m_node(node) + , m_edge(nullptr) + , m_statePtr(nullptr) + , m_state(0) { assert(m_node != nullptr); @@ -10480,7 +10498,7 @@ void GenTreeUseEdgeIterator::AdvanceConditional() // `GTF_REVERSE_OPS` flag. // template -void GenTreeUseEdgeIterator::AdvanceBinOp() +void GenTreeUseEdgeIterator::AdvanceBinOp() { assert(ReverseOperands == ((m_node->gtFlags & GTF_REVERSE_OPS) != 0)); @@ -10603,7 +10621,7 @@ void GenTreeUseEdgeIterator::SetEntryStateForMultiOp() // component operands. // template -void GenTreeUseEdgeIterator::AdvanceCall() +void GenTreeUseEdgeIterator::AdvanceCall() { GenTreeCall* const call = m_node->AsCall(); @@ -10816,10 +10834,12 @@ bool GenTree::HandleKindDataIsInvariant(GenTreeFlags flags) printf("%c", (flags & GTF_EXCEPT) ? 'X' : '-'); printf("%c", (flags & GTF_GLOB_REF) ? 'G' : '-'); printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set - (flags & GTF_ORDER_SIDEEFF) ? 'O' : '-'); // otherwise print 'O' or '-' + (flags & GTF_ORDER_SIDEEFF) ? 'O' + : '-'); // otherwise print 'O' or '-' printf("%c", (flags & GTF_COLON_COND) ? '?' : '-'); - printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse - (flags & GTF_MAKE_CSE) ? 'H' : '-'); // H is for Hoist this expr + printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse + (flags & GTF_MAKE_CSE) ? 'H' + : '-'); // H is for Hoist this expr printf("%c", (flags & GTF_REVERSE_OPS) ? 'R' : '-'); printf("%c", (flags & GTF_UNSIGNED) ? 'U' : (flags & GTF_BOOLEAN) ? 'B' : '-'); #if FEATURE_SET_FLAGS @@ -11668,8 +11688,8 @@ void Compiler::gtDispRegVal(GenTree* tree) { switch (tree->GetRegTag()) { - // Don't display anything for the GT_REGTAG_NONE case; - // the absence of printed register values will imply this state. + // Don't display anything for the GT_REGTAG_NONE case; + // the absence of printed register values will imply this state. case GenTree::GT_REGTAG_REG: printf(" REG %s", compRegVarName(tree->GetRegNum())); @@ -11697,9 +11717,9 @@ void Compiler::gtDispRegVal(GenTree* tree) } // We usually/commonly don't expect to print anything longer than this string, -#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame" +#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame" #define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY)) -#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2) +#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2) void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut) { @@ -11934,7 +11954,7 @@ static const char* InsCflagsToString(insCflags flags) { const static char* s_table[16] = {"0", "v", "c", "cv", "z", "zv", "zc", "zcv", "n", "nv", "nc", "ncv", "nz", "nzv", "nzc", "nzcv"}; - unsigned index = (unsigned)flags; + unsigned index = (unsigned)flags; assert((0 <= index) && (index < ArrLen(s_table))); return s_table[index]; } @@ -12345,7 +12365,7 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack) break; #endif // !FEATURE_EH_FUNCLETS - // Vanilla leaves. No qualifying information available. So do nothing + // Vanilla leaves. No qualifying information available. So do nothing case GT_NOP: case GT_NO_OP: @@ -12497,8 +12517,8 @@ void Compiler::gtDispLocal(GenTreeLclVarCommon* tree, IndentStack* indentStack) void Compiler::gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, - _In_opt_ const char* msg, /* = nullptr */ - bool topOnly) /* = false */ + _In_opt_ const char* msg, /* = nullptr */ + bool topOnly) /* = false */ { indentStack->Push(arcType); gtDispTree(child, indentStack, msg, topOnly); @@ -12507,11 +12527,11 @@ void Compiler::gtDispChild(GenTree* child, /*****************************************************************************/ -void Compiler::gtDispTree(GenTree* tree, - IndentStack* indentStack, /* = nullptr */ - _In_ _In_opt_z_ const char* msg, /* = nullptr */ - bool topOnly, /* = false */ - bool isLIR) /* = false */ +void Compiler::gtDispTree(GenTree* tree, + IndentStack* indentStack, /* = nullptr */ + _In_ _In_opt_z_ const char* msg, /* = nullptr */ + bool topOnly, /* = false */ + bool isLIR) /* = false */ { if (tree == nullptr) { @@ -13008,9 +13028,10 @@ void Compiler::gtDispTree(GenTree* tree, case GT_HWINTRINSIC: if (tree->OperIs(GT_HWINTRINSIC)) { - printf(" %s %s", tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN - ? "" - : varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()), + printf(" %s %s", + tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN + ? "" + : varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()), HWIntrinsicInfo::lookupName(tree->AsHWIntrinsic()->GetHWIntrinsicId())); } @@ -15564,8 +15585,8 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) switch (switchType) { - // Fold constant REF of BYREF binary operator. - // These can only be comparisons or null pointers. + // Fold constant REF of BYREF binary operator. + // These can only be comparisons or null pointers. case TYP_REF: @@ -15634,7 +15655,7 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) return tree; - // Fold constant INT binary operator. + // Fold constant INT binary operator. case TYP_INT: @@ -15761,8 +15782,8 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f)); break; - // DIV and MOD can throw an exception - if the division is by 0 - // or there is overflow - when dividing MIN by -1. + // DIV and MOD can throw an exception - if the division is by 0 + // or there is overflow - when dividing MIN by -1. case GT_DIV: case GT_MOD: @@ -15831,7 +15852,7 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) goto DONE; - // Fold constant LONG binary operator. + // Fold constant LONG binary operator. case TYP_LONG: @@ -16054,7 +16075,7 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) goto DONE; - // Fold constant FLOAT or DOUBLE binary operator + // Fold constant FLOAT or DOUBLE binary operator case TYP_FLOAT: case TYP_DOUBLE: @@ -17092,7 +17113,9 @@ void Compiler::gtExtractSideEffList(GenTree* expr, return m_result; } - SideEffectExtractor(Compiler* compiler, GenTreeFlags flags) : GenTreeVisitor(compiler), m_flags(flags) + SideEffectExtractor(Compiler* compiler, GenTreeFlags flags) + : GenTreeVisitor(compiler) + , m_flags(flags) { } @@ -17373,7 +17396,9 @@ Compiler::FindLinkData Compiler::gtFindLink(Statement* stmt, GenTree* node) DoPreOrder = true, }; - FindLinkWalker(Compiler* comp, GenTree* node) : GenTreeVisitor(comp), m_node(node) + FindLinkWalker(Compiler* comp, GenTree* node) + : GenTreeVisitor(comp) + , m_node(node) { } @@ -17559,7 +17584,9 @@ bool Compiler::gtTreeContainsOper(GenTree* tree, genTreeOps oper) genTreeOps m_oper; public: - Visitor(Compiler* comp, genTreeOps oper) : GenTreeVisitor(comp), m_oper(oper) + Visitor(Compiler* comp, genTreeOps oper) + : GenTreeVisitor(comp) + , m_oper(oper) { } @@ -17600,7 +17627,8 @@ ExceptionSetFlags Compiler::gtCollectExceptions(GenTree* tree) ExceptionSetFlags m_preciseExceptions = ExceptionSetFlags::None; public: - ExceptionsWalker(Compiler* comp) : GenTreeVisitor(comp) + ExceptionsWalker(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -17658,7 +17686,9 @@ bool Compiler::gtComplexityExceeds(GenTree* tree, unsigned limit) DoPreOrder = true, }; - ComplexityVisitor(Compiler* comp, unsigned limit) : GenTreeVisitor(comp), m_limit(limit) + ComplexityVisitor(Compiler* comp, unsigned limit) + : GenTreeVisitor(comp) + , m_limit(limit) { } @@ -19415,7 +19445,8 @@ FieldSeq* FieldSeqStore::Append(FieldSeq* a, FieldSeq* b) return nullptr; } -FieldSeq::FieldSeq(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldKind fieldKind) : m_offset(offset) +FieldSeq::FieldSeq(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldKind fieldKind) + : m_offset(offset) { assert(fieldHnd != NO_FIELD_HANDLE); @@ -20631,8 +20662,8 @@ GenTree* Compiler::gtNewSimdBinOpNode( assert(!compIsaSupportedDebugOnly(InstructionSet_AVX512F_VL)); // Vector256 maskedProduct = Avx2.And(widenedProduct, vecCon1).AsInt16() - GenTree* maskedProduct = gtNewSimdBinOpNode(GT_AND, widenedType, widenedProduct, vecCon1, - widenedSimdBaseJitType, widenedSimdSize); + GenTree* maskedProduct = gtNewSimdBinOpNode(GT_AND, widenedType, widenedProduct, vecCon1, + widenedSimdBaseJitType, widenedSimdSize); GenTree* maskedProductDup = fgMakeMultiUse(&maskedProduct); // Vector256 packedProduct = Avx2.PackUnsignedSaturate(maskedProduct, @@ -21629,10 +21660,10 @@ GenTree* Compiler::gtNewSimdCmpOpNode( op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, CORINFO_TYPE_INT, simdSize); - u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize); - v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize); + u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, + CORINFO_TYPE_INT, simdSize); + v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, + CORINFO_TYPE_INT, simdSize); // Validate we can't use AVX512F_VL_TernaryLogic here assert(!compIsaSupportedDebugOnly(InstructionSet_AVX512F_VL)); @@ -21884,10 +21915,10 @@ GenTree* Compiler::gtNewSimdCmpOpNode( op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, CORINFO_TYPE_INT, simdSize); - u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize); - v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize); + u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, + CORINFO_TYPE_INT, simdSize); + v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, + CORINFO_TYPE_INT, simdSize); // Validate we can't use AVX512F_VL_TernaryLogic here assert(!compIsaSupportedDebugOnly(InstructionSet_AVX512F_VL)); @@ -25033,8 +25064,8 @@ GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType si tmp = fgMakeMultiUse(&op1); opShifted = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(shiftVal, TYP_INT), NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize); - op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, opShifted, tmp, simdBaseJitType, simdSize); - shiftVal = shiftVal / 2; + op1 = gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, opShifted, tmp, simdBaseJitType, simdSize); + shiftVal = shiftVal / 2; } return gtNewSimdToScalarNode(type, op1, simdBaseJitType, simdSize); @@ -26787,7 +26818,7 @@ genTreeOps GenTreeHWIntrinsic::HWOperGet() const return GT_AND_NOT; } #endif - // TODO: Handle other cases + // TODO: Handle other cases default: { @@ -26935,7 +26966,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, #else uint32_t floatFieldFlags = comp->info.compCompHnd->getRISCV64PassStructInRegisterFlags(retClsHnd); #endif - BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE}; + BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE}; comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]); if (floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index c1fc5b33a7175..7fbcf5471103e 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -156,10 +156,10 @@ inline ExceptionSetFlags& operator&=(ExceptionSetFlags& a, ExceptionSetFlags b) #ifdef DEBUG /***************************************************************************** -* -* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node. -* The values are such that they don't overlap with helper's or user function's handle. -*/ + * + * TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node. + * The values are such that they don't overlap with helper's or user function's handle. + */ enum TargetHandleType : BYTE { THT_Unknown = 2, @@ -200,17 +200,20 @@ class AssertionInfo unsigned short m_assertionIndex : 15; AssertionInfo(bool assertionHoldsOnFalseEdge, AssertionIndex assertionIndex) - : m_assertionHoldsOnFalseEdge(assertionHoldsOnFalseEdge), m_assertionIndex(assertionIndex) + : m_assertionHoldsOnFalseEdge(assertionHoldsOnFalseEdge) + , m_assertionIndex(assertionIndex) { assert(m_assertionIndex == assertionIndex); } public: - AssertionInfo() : AssertionInfo(false, 0) + AssertionInfo() + : AssertionInfo(false, 0) { } - AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex) + AssertionInfo(AssertionIndex assertionIndex) + : AssertionInfo(false, assertionIndex) { } @@ -314,7 +317,8 @@ class FieldSeqStore JitHashTable, FieldSeq> m_map; public: - FieldSeqStore(CompAllocator alloc) : m_map(alloc) + FieldSeqStore(CompAllocator alloc) + : m_map(alloc) { } @@ -331,13 +335,13 @@ struct Statement; /*****************************************************************************/ // Forward declarations of the subtypes -#define GTSTRUCT_0(fn, en) struct GenTree##fn; -#define GTSTRUCT_1(fn, en) struct GenTree##fn; -#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn; -#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn; -#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn; -#define GTSTRUCT_N(fn, ...) struct GenTree##fn; -#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2) +#define GTSTRUCT_0(fn, en) struct GenTree##fn; +#define GTSTRUCT_1(fn, en) struct GenTree##fn; +#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn; +#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn; +#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn; +#define GTSTRUCT_N(fn, ...) struct GenTree##fn; +#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2) #define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3) #include "gtstructs.h" @@ -690,11 +694,11 @@ struct GenTree return *As##fn(); \ } -#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en) -#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2) -#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3) -#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4) -#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2) +#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en) +#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2) +#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3) +#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4) +#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2) #define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3) #include "gtstructs.h" @@ -719,11 +723,11 @@ struct GenTree #define NO_CSE (0) -#define IS_CSE_INDEX(x) ((x) != 0) -#define IS_CSE_USE(x) ((x) > 0) -#define IS_CSE_DEF(x) ((x) < 0) +#define IS_CSE_INDEX(x) ((x) != 0) +#define IS_CSE_USE(x) ((x) > 0) +#define IS_CSE_DEF(x) ((x) < 0) #define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x)) -#define TO_CSE_DEF(x) (-(x)) +#define TO_CSE_DEF(x) (-(x)) signed char gtCSEnum; // 0 or the CSE index (negated if def) // valid only for CSE expressions @@ -766,7 +770,7 @@ struct GenTree bool gtCostsInitialized; #endif // DEBUG -#define MAX_COST UCHAR_MAX +#define MAX_COST UCHAR_MAX #define IND_COST_EX 3 // execution cost for an indirection unsigned char GetCostEx() const @@ -957,7 +961,7 @@ struct GenTree regMaskSmall gtRsvdRegs; // set of fixed trashed registers - unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const; + unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const; regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1); regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1); @@ -1509,7 +1513,7 @@ struct GenTree #if !defined(TARGET_64BIT) || (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI) #endif - ); + ); } bool OperMayOverflow() const @@ -1754,9 +1758,9 @@ struct GenTree return (DebugOperKind() & DBK_NOTLIR) == 0; } - bool OperSupportsReverseOpEvalOrder(Compiler* comp) const; + bool OperSupportsReverseOpEvalOrder(Compiler* comp) const; static bool RequiresNonNullOp2(genTreeOps oper); - bool IsValidCallArgument(); + bool IsValidCallArgument(); #endif // DEBUG inline bool IsIntegralConst(ssize_t constVal) const; @@ -1857,7 +1861,7 @@ struct GenTree bool OperRequiresCallFlag(Compiler* comp) const; ExceptionSetFlags OperExceptions(Compiler* comp); - bool OperMayThrow(Compiler* comp); + bool OperMayThrow(Compiler* comp); bool OperRequiresGlobRefFlag(Compiler* comp) const; @@ -1894,7 +1898,7 @@ struct GenTree static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false); -//--------------------------------------------------------------------- + //--------------------------------------------------------------------- #if defined(DEBUG) || CALL_ARG_STATS || COUNT_BASIC_BLOCKS || COUNT_LOOPS || EMITTER_STATS || MEASURE_MEM_ALLOC || \ NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS @@ -1938,8 +1942,8 @@ struct GenTree } template - void BashToConst(T value, var_types type = TYP_UNDEF); - void BashToZeroConst(var_types type); + void BashToConst(T value, var_types type = TYP_UNDEF); + void BashToZeroConst(var_types type); GenTreeLclVar* BashToLclVar(Compiler* comp, unsigned lclNum); #if NODEBASH_STATS @@ -1977,7 +1981,7 @@ struct GenTree unsigned* pSize = nullptr); GenTreeLclVarCommon* IsImplicitByrefParameterValuePreMorph(Compiler* compiler); - GenTreeLclVar* IsImplicitByrefParameterValuePostMorph(Compiler* compiler, GenTree** addr); + GenTreeLclVar* IsImplicitByrefParameterValuePostMorph(Compiler* compiler, GenTree** addr); // Determine whether this is an assignment tree of the form X = X (op) Y, // where Y is an arbitrary tree, and X is a lclVar. @@ -2256,7 +2260,7 @@ struct GenTree bool gtRequestSetFlags(); #ifdef DEBUG - static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags); + static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags); static const char* gtGetHandleKindString(GenTreeFlags flags); #endif @@ -2375,7 +2379,7 @@ struct GenTree typedef void* VtablePtr; VtablePtr GetVtableForOper(genTreeOps oper); - void SetVtableForOper(genTreeOps oper); + void SetVtableForOper(genTreeOps oper); static VtablePtr s_vtablesForOpers[GT_COUNT]; static VtablePtr s_vtableForOp; @@ -2409,7 +2413,9 @@ struct GenTreePhi final : public GenTree Use* m_next; public: - Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next) + Use(GenTree* node, Use* next = nullptr) + : m_node(node) + , m_next(next) { assert(node->OperIs(GT_PHI_ARG)); } @@ -2447,7 +2453,8 @@ struct GenTreePhi final : public GenTree Use* m_use; public: - UseIterator(Use* use) : m_use(use) + UseIterator(Use* use) + : m_use(use) { } @@ -2483,7 +2490,8 @@ struct GenTreePhi final : public GenTree Use* m_uses; public: - UseList(Use* uses) : m_uses(uses) + UseList(Use* uses) + : m_uses(uses) { } @@ -2500,7 +2508,9 @@ struct GenTreePhi final : public GenTree Use* gtUses; - GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr) + GenTreePhi(var_types type) + : GenTree(GT_PHI, type) + , gtUses(nullptr) { } @@ -2549,7 +2559,8 @@ struct GenTreePhi final : public GenTree } #if DEBUGGABLE_GENTREE - GenTreePhi() : GenTree() + GenTreePhi() + : GenTree() { } #endif @@ -2568,7 +2579,10 @@ struct GenTreeFieldList : public GenTree public: Use(GenTree* node, unsigned offset, var_types type) - : m_node(node), m_next(nullptr), m_offset(static_cast(offset)), m_type(type) + : m_node(node) + , m_next(nullptr) + , m_offset(static_cast(offset)) + , m_type(type) { // We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion // only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max @@ -2628,7 +2642,8 @@ struct GenTreeFieldList : public GenTree Use* use; public: - UseIterator(Use* use) : use(use) + UseIterator(Use* use) + : use(use) { } @@ -2664,7 +2679,9 @@ struct GenTreeFieldList : public GenTree Use* m_tail; public: - UseList() : m_head(nullptr), m_tail(nullptr) + UseList() + : m_head(nullptr) + , m_tail(nullptr) { } @@ -2744,7 +2761,8 @@ struct GenTreeFieldList : public GenTree UseList m_uses; public: - GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT) + GenTreeFieldList() + : GenTree(GT_FIELD_LIST, TYP_STRUCT) { SetContained(); } @@ -2848,12 +2866,12 @@ class GenTreeUseEdgeIterator final void AdvanceConditional(); template - void AdvanceBinOp(); - void SetEntryStateForBinOp(); + void AdvanceBinOp(); + void SetEntryStateForBinOp(); // The advance function for call nodes template - void AdvanceCall(); + void AdvanceCall(); #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) void AdvanceMultiOp(); @@ -2912,12 +2930,14 @@ class GenTreeOperandIterator final GenTreeUseEdgeIterator m_useEdges; - GenTreeOperandIterator(GenTree* node) : m_useEdges(node) + GenTreeOperandIterator(GenTree* node) + : m_useEdges(node) { } public: - GenTreeOperandIterator() : m_useEdges() + GenTreeOperandIterator() + : m_useEdges() { } @@ -2960,12 +2980,14 @@ struct GenTreeUnOp : public GenTree protected: GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false)) - : GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr) + : GenTree(oper, type DEBUGARG(largeNode)) + , gtOp1(nullptr) { } GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false)) - : GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1) + : GenTree(oper, type DEBUGARG(largeNode)) + , gtOp1(op1) { assert(op1 != nullptr || NullOp1Legal()); if (op1 != nullptr) @@ -2975,7 +2997,9 @@ struct GenTreeUnOp : public GenTree } #if DEBUGGABLE_GENTREE - GenTreeUnOp() : GenTree(), gtOp1(nullptr) + GenTreeUnOp() + : GenTree() + , gtOp1(nullptr) { } #endif @@ -2986,7 +3010,8 @@ struct GenTreeOp : public GenTreeUnOp GenTree* gtOp2; GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false)) - : GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2) + : GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)) + , gtOp2(op2) { // comparisons are always integral types assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type)); @@ -3005,7 +3030,8 @@ struct GenTreeOp : public GenTreeUnOp // A small set of types are unary operators with optional arguments. We use // this constructor to build those. GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false)) - : GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr) + : GenTreeUnOp(oper, type DEBUGARG(largeNode)) + , gtOp2(nullptr) { // Unary operators with optional arguments: assert(oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper)); @@ -3027,7 +3053,9 @@ struct GenTreeOp : public GenTreeUnOp #endif #if DEBUGGABLE_GENTREE - GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr) + GenTreeOp() + : GenTreeUnOp() + , gtOp2(nullptr) { } #endif @@ -3037,11 +3065,14 @@ struct GenTreeVal : public GenTree { size_t gtVal1; - GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val) + GenTreeVal(genTreeOps oper, var_types type, ssize_t val) + : GenTree(oper, type) + , gtVal1(val) { } #if DEBUGGABLE_GENTREE - GenTreeVal() : GenTree() + GenTreeVal() + : GenTree() { } #endif @@ -3049,12 +3080,12 @@ struct GenTreeVal : public GenTree struct GenTreeIntConCommon : public GenTree { - inline INT64 LngValue() const; - inline void SetLngValue(INT64 val); + inline INT64 LngValue() const; + inline void SetLngValue(INT64 val); inline ssize_t IconValue() const; - inline void SetIconValue(ssize_t val); - inline INT64 IntegralValue() const; - inline void SetIntegralValue(int64_t value); + inline void SetIconValue(ssize_t val); + inline INT64 IntegralValue() const; + inline void SetIntegralValue(int64_t value); template inline void SetValueTruncating(T value); @@ -3097,7 +3128,8 @@ struct GenTreeIntConCommon : public GenTree #endif #if DEBUGGABLE_GENTREE - GenTreeIntConCommon() : GenTree() + GenTreeIntConCommon() + : GenTree() { } #endif @@ -3110,11 +3142,14 @@ struct GenTreePhysReg : public GenTree // GetRegNum() indicates the destination (and can be changed) // whereas reg indicates the source regNumber gtSrcReg; - GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r) + GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) + : GenTree(GT_PHYSREG, type) + , gtSrcReg(r) { } #if DEBUGGABLE_GENTREE - GenTreePhysReg() : GenTree() + GenTreePhysReg() + : GenTree() { } #endif @@ -3173,7 +3208,8 @@ struct GenTreeIntCon : public GenTreeIntConCommon void FixupInitBlkValue(var_types type); #if DEBUGGABLE_GENTREE - GenTreeIntCon() : GenTreeIntConCommon() + GenTreeIntCon() + : GenTreeIntConCommon() { } #endif @@ -3194,12 +3230,14 @@ struct GenTreeLngCon : public GenTreeIntConCommon return (INT32)(gtLconVal >> 32); } - GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG) + GenTreeLngCon(INT64 val) + : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG) { SetLngValue(val); } #if DEBUGGABLE_GENTREE - GenTreeLngCon() : GenTreeIntConCommon() + GenTreeLngCon() + : GenTreeIntConCommon() { } #endif @@ -3330,13 +3368,15 @@ struct GenTreeDblCon : public GenTree return (bits == otherBits); } - GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type) + GenTreeDblCon(double val, var_types type = TYP_DOUBLE) + : GenTree(GT_CNS_DBL, type) { assert(varTypeIsFloating(type)); SetDconValue(val); } #if DEBUGGABLE_GENTREE - GenTreeDblCon() : GenTree() + GenTreeDblCon() + : GenTree() { } #endif @@ -3360,11 +3400,14 @@ struct GenTreeStrCon : public GenTree // Because this node can come from an inlined method we need to // have the scope handle, since it will become a helper call. GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false)) - : GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod) + : GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)) + , gtSconCPX(sconCPX) + , gtScpHnd(mod) { } #if DEBUGGABLE_GENTREE - GenTreeStrCon() : GenTree() + GenTreeStrCon() + : GenTree() { } #endif @@ -3407,12 +3450,14 @@ class SsaNumInfo final int m_value; - SsaNumInfo(int value) : m_value(value) + SsaNumInfo(int value) + : m_value(value) { } public: - SsaNumInfo() : m_value(SsaConfig::RESERVED_SSA_NUM) + SsaNumInfo() + : m_value(SsaConfig::RESERVED_SSA_NUM) { } @@ -3545,7 +3590,8 @@ struct GenTreeLclVarCommon : public GenTreeUnOp } #if DEBUGGABLE_GENTREE - GenTreeLclVarCommon() : GenTreeUnOp() + GenTreeLclVarCommon() + : GenTreeUnOp() { } #endif @@ -3684,7 +3730,7 @@ struct GenTreeLclVar : public GenTreeLclVarCommon } unsigned int GetFieldCount(Compiler* compiler) const; - var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx); + var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx); bool IsNeverNegative(Compiler* comp) const; @@ -3723,8 +3769,8 @@ struct GenTreeLclVar : public GenTreeLclVarCommon } #endif - GenTreeLclVar(genTreeOps oper, - var_types type, + GenTreeLclVar(genTreeOps oper, + var_types type, unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false)) : GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs)) { @@ -3737,7 +3783,8 @@ struct GenTreeLclVar : public GenTreeLclVarCommon } #if DEBUGGABLE_GENTREE - GenTreeLclVar() : GenTreeLclVarCommon() + GenTreeLclVar() + : GenTreeLclVarCommon() { } #endif @@ -3753,14 +3800,16 @@ struct GenTreeLclFld : public GenTreeLclVarCommon public: GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs, ClassLayout* layout = nullptr) - : GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast(lclOffs)) + : GenTreeLclVarCommon(oper, type, lclNum) + , m_lclOffs(static_cast(lclOffs)) { assert(lclOffs <= UINT16_MAX); SetLayout(layout); } GenTreeLclFld(var_types type, unsigned lclNum, unsigned lclOffs, GenTree* data, ClassLayout* layout) - : GenTreeLclVarCommon(GT_STORE_LCL_FLD, type, lclNum, data), m_lclOffs(static_cast(lclOffs)) + : GenTreeLclVarCommon(GT_STORE_LCL_FLD, type, lclNum, data) + , m_lclOffs(static_cast(lclOffs)) { assert(lclOffs <= UINT16_MAX); SetLayout(layout); @@ -3795,7 +3844,8 @@ struct GenTreeLclFld : public GenTreeLclVarCommon #endif // TARGET_ARM #if DEBUGGABLE_GENTREE - GenTreeLclFld() : GenTreeLclVarCommon() + GenTreeLclFld() + : GenTreeLclVarCommon() { } #endif @@ -3837,7 +3887,8 @@ struct GenTreeCast : public GenTreeOp var_types gtCastType; GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false)) - : GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType) + : GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)) + , gtCastType(castType) { // We do not allow casts from floating point types to be treated as from // unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the @@ -3847,7 +3898,8 @@ struct GenTreeCast : public GenTreeOp gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY; } #if DEBUGGABLE_GENTREE - GenTreeCast() : GenTreeOp() + GenTreeCast() + : GenTreeOp() { } #endif @@ -3896,7 +3948,8 @@ struct GenTreeBox : public GenTreeUnOp { } #if DEBUGGABLE_GENTREE - GenTreeBox() : GenTreeUnOp() + GenTreeBox() + : GenTreeUnOp() { } #endif @@ -3940,7 +3993,8 @@ struct GenTreeFieldAddr : public GenTreeUnOp } #if DEBUGGABLE_GENTREE - GenTreeFieldAddr() : GenTreeUnOp() + GenTreeFieldAddr() + : GenTreeUnOp() { } #endif @@ -4010,12 +4064,14 @@ struct GenTreeColon : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeColon() : GenTreeOp() + GenTreeColon() + : GenTreeOp() { } #endif - GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode) + GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) + : GenTreeOp(GT_COLON, typ, elseNode, thenNode) { } }; @@ -4028,13 +4084,15 @@ struct GenTreeConditional : public GenTreeOp GenTreeConditional( genTreeOps oper, var_types type, GenTree* cond, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false)) - : GenTreeOp(oper, type, op1, op2 DEBUGARG(largeNode)), gtCond(cond) + : GenTreeOp(oper, type, op1, op2 DEBUGARG(largeNode)) + , gtCond(cond) { assert(cond != nullptr); } #if DEBUGGABLE_GENTREE - GenTreeConditional() : GenTreeOp() + GenTreeConditional() + : GenTreeOp() { } #endif @@ -4315,7 +4373,7 @@ struct ReturnTypeDesc class TailCallSiteInfo { bool m_isCallvirt : 1; - bool m_isCalli : 1; + bool m_isCalli : 1; CORINFO_SIG_INFO m_sig; CORINFO_RESOLVED_TOKEN m_token; @@ -4501,7 +4559,7 @@ struct CallArgABIInformation bool IsHfaArg() const; bool IsHfaRegArg() const; var_types GetHfaType() const; - void SetHfaType(var_types type, unsigned hfaSlots); + void SetHfaType(var_types type, unsigned hfaSlots); regNumber GetRegNum() const { @@ -4676,7 +4734,8 @@ class CallArg public: CallArgABIInformation AbiInfo; - CallArg(const NewCallArg& arg) : CallArg() + CallArg(const NewCallArg& arg) + : CallArg() { m_earlyNode = arg.Node; m_wellKnownArg = arg.WellKnownArg; @@ -4684,7 +4743,7 @@ class CallArg m_signatureClsHnd = arg.SignatureClsHnd; } - CallArg(const CallArg&) = delete; + CallArg(const CallArg&) = delete; CallArg& operator=(CallArg&) = delete; // clang-format off @@ -4746,9 +4805,9 @@ class CallArgs // made for this call. unsigned m_padStkAlign; #endif - bool m_hasThisPointer : 1; - bool m_hasRetBuffer : 1; - bool m_isVarArgs : 1; + bool m_hasThisPointer : 1; + bool m_hasRetBuffer : 1; + bool m_isVarArgs : 1; bool m_abiInformationDetermined : 1; // True if we have one or more register arguments. bool m_hasRegArgs : 1; @@ -4762,15 +4821,15 @@ class CallArgs bool m_alignmentDone : 1; #endif - void AddedWellKnownArg(WellKnownArg arg); - void RemovedWellKnownArg(WellKnownArg arg); + void AddedWellKnownArg(WellKnownArg arg); + void RemovedWellKnownArg(WellKnownArg arg); regNumber GetCustomRegister(Compiler* comp, CorInfoCallConvExtension cc, WellKnownArg arg); - void SplitArg(CallArg* arg, unsigned numRegs, unsigned numSlots); - void SortArgs(Compiler* comp, GenTreeCall* call, CallArg** sortedArgs); + void SplitArg(CallArg* arg, unsigned numRegs, unsigned numSlots); + void SortArgs(Compiler* comp, GenTreeCall* call, CallArg** sortedArgs); public: CallArgs(); - CallArgs(const CallArgs&) = delete; + CallArgs(const CallArgs&) = delete; CallArgs& operator=(CallArgs&) = delete; CallArg* FindByNode(GenTree* node); @@ -4795,8 +4854,8 @@ class CallArgs CallArg* InsertAfterUnchecked(Compiler* comp, CallArg* after, const NewCallArg& arg); CallArg* InsertInstParam(Compiler* comp, GenTree* node); CallArg* InsertAfterThisOrFirst(Compiler* comp, const NewCallArg& arg); - void PushLateBack(CallArg* arg); - void Remove(CallArg* arg); + void PushLateBack(CallArg* arg); + void Remove(CallArg* arg); template void InternalCopyFrom(Compiler* comp, CallArgs* other, CopyNodeFunc copyFunc); @@ -4817,7 +4876,7 @@ class CallArgs bool IsNonStandard(Compiler* comp, GenTreeCall* call, CallArg* arg); GenTree* MakeTmpArgNode(Compiler* comp, CallArg* arg); - void SetTemp(CallArg* arg, unsigned tmpNum); + void SetTemp(CallArg* arg, unsigned tmpNum); // clang-format off bool HasThisPointer() const { return m_hasThisPointer; } @@ -4855,7 +4914,8 @@ class CallArgs CallArg* m_arg; public: - explicit CallArgIterator(CallArg* arg) : m_arg(arg) + explicit CallArgIterator(CallArg* arg) + : m_arg(arg) { } @@ -4899,7 +4959,8 @@ class CallArgs } public: - explicit EarlyArgIterator(CallArg* arg) : m_arg(arg) + explicit EarlyArgIterator(CallArg* arg) + : m_arg(arg) { } @@ -4955,7 +5016,8 @@ struct GenTreeCall final : public GenTree CORINFO_SIG_INFO* callSig; #endif - union { + union + { TailCallSiteInfo* tailCallInfo; // Only used for unmanaged calls, which cannot be tail-called CorInfoCallConvExtension unmgdCallConv; @@ -5216,7 +5278,7 @@ struct GenTreeCall final : public GenTree } bool HasNonStandardAddedArgs(Compiler* compiler) const; - int GetNonStandardAddedArgCount(Compiler* compiler) const; + int GetNonStandardAddedArgCount(Compiler* compiler) const; // Returns true if the ABI dictates that this call should get a ret buf // arg. This may be out of sync with gtArgs.HasRetBuffer during import @@ -5583,19 +5645,21 @@ struct GenTreeCall final : public GenTree } GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags - gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration + gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration var_types gtReturnType : 5; // exact return type uint8_t gtInlineInfoCount; // number of inline candidates for the given call CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available - union { + union + { void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined CORINFO_CLASS_HANDLE gtInitClsHnd; // Used by static init helpers, represents a class they init IL_OFFSET gtCastHelperILOffset; // Used by cast helpers to save corresponding IL offset }; - union { + union + { // only used for CALLI unmanaged calls (CT_INDIRECT) GenTree* gtCallCookie; @@ -5613,7 +5677,8 @@ struct GenTreeCall final : public GenTree // expression evaluated after args are placed which determines the control target GenTree* gtControlExpr; - union { + union + { CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER GenTree* gtCallAddr; // CT_INDIRECT }; @@ -5666,11 +5731,13 @@ struct GenTreeCall final : public GenTree static bool Equals(GenTreeCall* c1, GenTreeCall* c2); - GenTreeCall(var_types type) : GenTree(GT_CALL, type) + GenTreeCall(var_types type) + : GenTree(GT_CALL, type) { } #if DEBUGGABLE_GENTREE - GenTreeCall() : GenTree() + GenTreeCall() + : GenTree() { } #endif @@ -5689,7 +5756,8 @@ struct GenTreeMultiRegOp : public GenTreeOp MultiRegSpillFlags gtSpillFlags; GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2) - : GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA) + : GenTreeOp(oper, type, op1, op2) + , gtOtherReg(REG_NA) { ClearOtherRegFlags(); } @@ -5772,7 +5840,8 @@ struct GenTreeMultiRegOp : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeMultiRegOp() : GenTreeOp() + GenTreeMultiRegOp() + : GenTreeOp() { } #endif @@ -5790,7 +5859,9 @@ struct GenTreeFptrVal : public GenTree #endif GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth) - : GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false) + : GenTree(GT_FTN_ADDR, type) + , gtFptrMethod(meth) + , gtFptrDelegateTarget(false) { #ifdef FEATURE_READYTORUN gtEntryPoint.addr = nullptr; @@ -5798,7 +5869,8 @@ struct GenTreeFptrVal : public GenTree #endif } #if DEBUGGABLE_GENTREE - GenTreeFptrVal() : GenTree() + GenTreeFptrVal() + : GenTree() { } #endif @@ -5810,7 +5882,8 @@ struct GenTreeQmark : public GenTreeOp unsigned gtThenLikelihood; GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon, unsigned thenLikelihood = 50) - : GenTreeOp(GT_QMARK, type, cond, colon), gtThenLikelihood(thenLikelihood) + : GenTreeOp(GT_QMARK, type, cond, colon) + , gtThenLikelihood(thenLikelihood) { // These must follow a specific form. assert((cond != nullptr) && cond->TypeIs(TYP_INT)); @@ -5846,7 +5919,8 @@ struct GenTreeQmark : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeQmark() : GenTreeOp() + GenTreeQmark() + : GenTreeOp() { } #endif @@ -5865,20 +5939,25 @@ struct GenTreeIntrinsic : public GenTreeOp #endif GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle) - : GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle) + : GenTreeOp(GT_INTRINSIC, type, op1, nullptr) + , gtIntrinsicName(intrinsicName) + , gtMethodHandle(methodHandle) { assert(intrinsicName != NI_Illegal); } GenTreeIntrinsic( var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle) - : GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle) + : GenTreeOp(GT_INTRINSIC, type, op1, op2) + , gtIntrinsicName(intrinsicName) + , gtMethodHandle(methodHandle) { assert(intrinsicName != NI_Illegal); } #if DEBUGGABLE_GENTREE - GenTreeIntrinsic() : GenTreeOp() + GenTreeIntrinsic() + : GenTreeOp() { } #endif @@ -5898,7 +5977,8 @@ struct GenTreeMultiOp : public GenTree protected: GenTree** m_use; - Iterator(GenTree** use) : m_use(use) + Iterator(GenTree** use) + : m_use(use) { } @@ -5923,7 +6003,8 @@ struct GenTreeMultiOp : public GenTree class OperandsIterator final : public Iterator { public: - OperandsIterator(GenTree** use) : Iterator(use) + OperandsIterator(GenTree** use) + : Iterator(use) { } @@ -5936,7 +6017,8 @@ struct GenTreeMultiOp : public GenTree class UseEdgesIterator final : public Iterator { public: - UseEdgesIterator(GenTree** use) : Iterator(use) + UseEdgesIterator(GenTree** use) + : Iterator(use) { } @@ -5983,7 +6065,8 @@ struct GenTreeMultiOp : public GenTree public: #if DEBUGGABLE_GENTREE - GenTreeMultiOp() : GenTree() + GenTreeMultiOp() + : GenTree() { } #endif @@ -6056,7 +6139,8 @@ class IntrinsicNodeBuilder final GenTree* m_inlineOperands[2]; public: - IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount) + IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) + : m_operandCount(operandCount) { m_operands = (operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate(operandCount); @@ -6068,7 +6152,8 @@ class IntrinsicNodeBuilder final #endif // DEBUG } - IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount()) + IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) + : m_operandCount(source->GetOperandCount()) { m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate(m_operandCount); @@ -6280,7 +6365,8 @@ struct GenTreeJitIntrinsic : public GenTreeMultiOp } #if DEBUGGABLE_GENTREE - GenTreeJitIntrinsic() : GenTreeMultiOp() + GenTreeJitIntrinsic() + : GenTreeMultiOp() { } #endif @@ -6341,7 +6427,8 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic } #if DEBUGGABLE_GENTREE - GenTreeHWIntrinsic() : GenTreeJitIntrinsic() + GenTreeHWIntrinsic() + : GenTreeJitIntrinsic() { } #endif @@ -6383,7 +6470,7 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic bool OperRequiresGlobRefFlag() const; unsigned GetResultOpNumForRmwIntrinsic(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3); - uint8_t GetTernaryControlByte(GenTreeHWIntrinsic* second) const; + uint8_t GetTernaryControlByte(GenTreeHWIntrinsic* second) const; ClassLayout* GetLayout(Compiler* compiler) const; @@ -6486,7 +6573,8 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic // struct GenTreeVecCon : public GenTree { - union { + union + { simd8_t gtSimd8Val; simd12_t gtSimd12Val; simd16_t gtSimd16Val; @@ -6538,7 +6626,7 @@ struct GenTreeVecCon : public GenTree // These intrinsics are meant to set the same value to every element. if ((argCnt == 1) && HandleArgForHWIntrinsicCreate(node->Op(1), 0, simdVal, simdBaseType)) { -// CreateScalar leaves the upper bits as zero + // CreateScalar leaves the upper bits as zero #if defined(TARGET_XARCH) if ((intrinsic != NI_Vector128_CreateScalar) && (intrinsic != NI_Vector256_CreateScalar) && @@ -6858,7 +6946,8 @@ struct GenTreeVecCon : public GenTree } } - GenTreeVecCon(var_types type) : GenTree(GT_CNS_VEC, type) + GenTreeVecCon(var_types type) + : GenTree(GT_CNS_VEC, type) { assert(varTypeIsSIMD(type)); @@ -6874,7 +6963,8 @@ struct GenTreeVecCon : public GenTree } #if DEBUGGABLE_GENTREE - GenTreeVecCon() : GenTree() + GenTreeVecCon() + : GenTree() { } #endif @@ -6931,7 +7021,8 @@ struct GenTreeIndexAddr : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeIndexAddr() : GenTreeOp() + GenTreeIndexAddr() + : GenTreeOp() { } #endif @@ -6971,7 +7062,8 @@ struct GenTreeArrAddr : GenTreeUnOp } #if DEBUGGABLE_GENTREE - GenTreeArrAddr() : GenTreeUnOp() + GenTreeArrAddr() + : GenTreeUnOp() { } #endif @@ -7018,12 +7110,14 @@ struct GenTreeArrCommon : public GenTreeUnOp return gtOp1; } - GenTreeArrCommon(genTreeOps oper, var_types type, GenTree* arrRef) : GenTreeUnOp(oper, type, arrRef) + GenTreeArrCommon(genTreeOps oper, var_types type, GenTree* arrRef) + : GenTreeUnOp(oper, type, arrRef) { } #if DEBUGGABLE_GENTREE - GenTreeArrCommon() : GenTreeUnOp() + GenTreeArrCommon() + : GenTreeUnOp() { } #endif @@ -7048,12 +7142,14 @@ struct GenTreeArrLen : public GenTreeArrCommon } GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset) - : GenTreeArrCommon(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset) + : GenTreeArrCommon(GT_ARR_LENGTH, type, arrRef) + , gtArrLenOffset(lenOffset) { } #if DEBUGGABLE_GENTREE - GenTreeArrLen() : GenTreeArrCommon() + GenTreeArrLen() + : GenTreeArrCommon() { } #endif @@ -7080,13 +7176,16 @@ struct GenTreeMDArr : public GenTreeArrCommon } GenTreeMDArr(genTreeOps oper, GenTree* arrRef, unsigned dim, unsigned rank) - : GenTreeArrCommon(oper, TYP_INT, arrRef), gtDim(dim), gtRank(rank) + : GenTreeArrCommon(oper, TYP_INT, arrRef) + , gtDim(dim) + , gtRank(rank) { assert(OperIs(GT_MDARR_LENGTH, GT_MDARR_LOWER_BOUND)); } #if DEBUGGABLE_GENTREE - GenTreeMDArr() : GenTreeArrCommon() + GenTreeMDArr() + : GenTreeArrCommon() { } #endif @@ -7118,7 +7217,8 @@ struct GenTreeBoundsChk : public GenTreeOp gtFlags |= GTF_EXCEPT; } #if DEBUGGABLE_GENTREE - GenTreeBoundsChk() : GenTreeOp() + GenTreeBoundsChk() + : GenTreeOp() { } #endif @@ -7168,7 +7268,10 @@ struct GenTreeArrElem : public GenTree // Requires that "inds" is a pointer to an array of "rank" nodes for the indices. GenTreeArrElem(var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, GenTree** inds) - : GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize) + : GenTree(GT_ARR_ELEM, type) + , gtArrObj(arr) + , gtArrRank(rank) + , gtArrElemSize(elemSize) { assert(rank <= ArrLen(gtArrInds)); gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT); @@ -7180,7 +7283,8 @@ struct GenTreeArrElem : public GenTree gtFlags |= GTF_EXCEPT; } #if DEBUGGABLE_GENTREE - GenTreeArrElem() : GenTree() + GenTreeArrElem() + : GenTree() { } #endif @@ -7274,7 +7378,8 @@ struct GenTreeAddrMode : public GenTreeOp protected: friend GenTree; // Used only for GenTree::GetVtableForOper() - GenTreeAddrMode() : GenTreeOp() + GenTreeAddrMode() + : GenTreeOp() { } #endif @@ -7310,7 +7415,8 @@ struct GenTreeIndir : public GenTreeOp unsigned Size() const; - GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data) + GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) + : GenTreeOp(oper, type, addr, data) { } @@ -7336,12 +7442,14 @@ struct GenTreeIndir : public GenTreeOp #if DEBUGGABLE_GENTREE // Used only for GenTree::GetVtableForOper() - GenTreeIndir() : GenTreeOp() + GenTreeIndir() + : GenTreeOp() { } #else // Used by XARCH codegen to construct temporary trees to pass to the emitter. - GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF) + GenTreeIndir() + : GenTreeOp(GT_NOP, TYP_UNDEF) { } #endif @@ -7449,7 +7557,8 @@ struct GenTreeBlk : public GenTreeIndir #if DEBUGGABLE_GENTREE protected: friend GenTree; - GenTreeBlk() : GenTreeIndir() + GenTreeBlk() + : GenTreeIndir() { } #endif // DEBUGGABLE_GENTREE @@ -7555,7 +7664,8 @@ struct GenTreeStoreInd : public GenTreeIndir return gtOp2; } - GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data) + GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) + : GenTreeIndir(GT_STOREIND, type, destPtr, data) { SetRMWStatusDefault(); } @@ -7564,7 +7674,8 @@ struct GenTreeStoreInd : public GenTreeIndir protected: friend GenTree; // Used only for GenTree::GetVtableForOper() - GenTreeStoreInd() : GenTreeIndir() + GenTreeStoreInd() + : GenTreeIndir() { SetRMWStatusDefault(); } @@ -7578,13 +7689,15 @@ struct GenTreeCmpXchg : public GenTreeIndir public: GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand) - : GenTreeIndir(GT_CMPXCHG, type, loc, val), m_comparand(comparand) + : GenTreeIndir(GT_CMPXCHG, type, loc, val) + , m_comparand(comparand) { gtFlags |= comparand->gtFlags & GTF_ALL_EFFECT; } #if DEBUGGABLE_GENTREE - GenTreeCmpXchg() : GenTreeIndir() + GenTreeCmpXchg() + : GenTreeIndir() { } #endif @@ -7612,11 +7725,13 @@ struct GenTreeRetExpr : public GenTree // nullptr for cases where gtSubstExpr is not a tree from the inlinee. BasicBlock* gtSubstBB; - GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type) + GenTreeRetExpr(var_types type) + : GenTree(GT_RET_EXPR, type) { } #if DEBUGGABLE_GENTREE - GenTreeRetExpr() : GenTree() + GenTreeRetExpr() + : GenTree() { } #endif @@ -7640,7 +7755,8 @@ struct GenTreeILOffset : public GenTree } #if DEBUGGABLE_GENTREE - GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID) + GenTreeILOffset() + : GenTree(GT_IL_OFFSET, TYP_VOID) { } #endif @@ -7662,7 +7778,8 @@ class GenTreeList GenTree* m_tree; public: - explicit iterator(GenTree* tree) : m_tree(tree) + explicit iterator(GenTree* tree) + : m_tree(tree) { } @@ -7683,7 +7800,8 @@ class GenTreeList } }; - explicit GenTreeList(GenTree* trees) : m_trees(trees) + explicit GenTreeList(GenTree* trees) + : m_trees(trees) { } @@ -7708,7 +7826,8 @@ class LocalsGenTreeList GenTreeLclVarCommon* m_tree; public: - explicit iterator(GenTreeLclVarCommon* tree) : m_tree(tree) + explicit iterator(GenTreeLclVarCommon* tree) + : m_tree(tree) { } @@ -7737,7 +7856,8 @@ class LocalsGenTreeList } }; - explicit LocalsGenTreeList(Statement* stmt) : m_stmt(stmt) + explicit LocalsGenTreeList(Statement* stmt) + : m_stmt(stmt) { } @@ -7937,7 +8057,8 @@ class StatementList Statement* m_stmt; public: - iterator(Statement* stmt) : m_stmt(stmt) + iterator(Statement* stmt) + : m_stmt(stmt) { } @@ -7959,7 +8080,8 @@ class StatementList }; public: - StatementList(Statement* stmts) : m_stmts(stmts) + StatementList(Statement* stmts) + : m_stmts(stmts) { } @@ -7984,13 +8106,15 @@ struct GenTreePhiArg : public GenTreeLclVarCommon BasicBlock* gtPredBB; GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block) - : GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block) + : GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum) + , gtPredBB(block) { SetSsaNum(ssaNum); } #if DEBUGGABLE_GENTREE - GenTreePhiArg() : GenTreeLclVarCommon() + GenTreePhiArg() + : GenTreeLclVarCommon() { } #endif @@ -8028,8 +8152,13 @@ struct GenTreePutArgStk : public GenTreeUnOp // TODO-Throughput: The following information should be obtained from the child // block node. - enum class Kind : int8_t{ - Invalid, RepInstr, PartialRepInstr, Unroll, Push, + enum class Kind : int8_t + { + Invalid, + RepInstr, + PartialRepInstr, + Unroll, + Push, }; Kind gtPutArgStkKind; @@ -8161,7 +8290,8 @@ struct GenTreePutArgStk : public GenTreeUnOp #endif // !FEATURE_PUT_STRUCT_ARG_STK #if DEBUGGABLE_GENTREE - GenTreePutArgStk() : GenTreeUnOp() + GenTreePutArgStk() + : GenTreeUnOp() { } #endif @@ -8311,7 +8441,8 @@ struct GenTreePutArgSplit : public GenTreePutArgStk } #if DEBUGGABLE_GENTREE - GenTreePutArgSplit() : GenTreePutArgStk() + GenTreePutArgSplit() + : GenTreePutArgStk() { } #endif @@ -8438,7 +8569,8 @@ struct GenTreeCopyOrReload : public GenTreeUnOp return 1; } - GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1) + GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) + : GenTreeUnOp(oper, type, op1) { assert(type != TYP_STRUCT || op1->IsMultiRegNode()); SetRegNum(REG_NA); @@ -8446,7 +8578,8 @@ struct GenTreeCopyOrReload : public GenTreeUnOp } #if DEBUGGABLE_GENTREE - GenTreeCopyOrReload() : GenTreeUnOp() + GenTreeCopyOrReload() + : GenTreeUnOp() { } #endif @@ -8476,7 +8609,8 @@ struct GenTreeAllocObj final : public GenTreeUnOp #endif } #if DEBUGGABLE_GENTREE - GenTreeAllocObj() : GenTreeUnOp() + GenTreeAllocObj() + : GenTreeUnOp() { } #endif @@ -8490,12 +8624,15 @@ struct GenTreeRuntimeLookup final : public GenTreeUnOp CorInfoGenericHandleType gtHndType; GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree) - : GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp) + : GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)) + , gtHnd(hnd) + , gtHndType(hndTyp) { assert(hnd != nullptr); } #if DEBUGGABLE_GENTREE - GenTreeRuntimeLookup() : GenTreeUnOp() + GenTreeRuntimeLookup() + : GenTreeUnOp() { } #endif @@ -8659,11 +8796,13 @@ struct GenCondition return names[m_code]; } - GenCondition() : m_code() + GenCondition() + : m_code() { } - GenCondition(Code cond) : m_code(cond) + GenCondition(Code cond) + : m_code(cond) { } @@ -8787,13 +8926,15 @@ struct GenTreeCC final : public GenTree GenCondition gtCondition; GenTreeCC(genTreeOps oper, var_types type, GenCondition condition) - : GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition) + : GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)) + , gtCondition(condition) { assert(OperIs(GT_JCC, GT_SETCC)); } #if DEBUGGABLE_GENTREE - GenTreeCC() : GenTree() + GenTreeCC() + : GenTree() { } #endif // DEBUGGABLE_GENTREE @@ -8805,7 +8946,8 @@ struct GenTreeOpCC : public GenTreeOp GenCondition gtCondition; GenTreeOpCC(genTreeOps oper, var_types type, GenCondition condition, GenTree* op1 = nullptr, GenTree* op2 = nullptr) - : GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition) + : GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ FALSE)) + , gtCondition(condition) { #ifdef TARGET_ARM64 assert(OperIs(GT_SELECTCC, GT_SELECT_INCCC, GT_SELECT_INVCC, GT_SELECT_NEGCC)); @@ -8815,7 +8957,8 @@ struct GenTreeOpCC : public GenTreeOp } #if DEBUGGABLE_GENTREE - GenTreeOpCC() : GenTreeOp() + GenTreeOpCC() + : GenTreeOp() { } #endif // DEBUGGABLE_GENTREE @@ -8850,12 +8993,14 @@ struct GenTreeCCMP final : public GenTreeOpCC insCflags gtFlagsVal; GenTreeCCMP(var_types type, GenCondition condition, GenTree* op1, GenTree* op2, insCflags flagsVal) - : GenTreeOpCC(GT_CCMP, type, condition, op1, op2), gtFlagsVal(flagsVal) + : GenTreeOpCC(GT_CCMP, type, condition, op1, op2) + , gtFlagsVal(flagsVal) { } #if DEBUGGABLE_GENTREE - GenTreeCCMP() : GenTreeOpCC() + GenTreeCCMP() + : GenTreeOpCC() { } #endif // DEBUGGABLE_GENTREE diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp index 12c610ceaefa4..7b448b8ca3d24 100644 --- a/src/coreclr/jit/gschecks.cpp +++ b/src/coreclr/jit/gschecks.cpp @@ -455,7 +455,8 @@ void Compiler::gsParamsToShadows() DoPostOrder = true }; - ReplaceShadowParamsVisitor(Compiler* compiler) : GenTreeVisitor(compiler) + ReplaceShadowParamsVisitor(Compiler* compiler) + : GenTreeVisitor(compiler) { } diff --git a/src/coreclr/jit/hashbv.cpp b/src/coreclr/jit/hashbv.cpp index 3a648d4dfe7fa..87acddf099bcb 100644 --- a/src/coreclr/jit/hashbv.cpp +++ b/src/coreclr/jit/hashbv.cpp @@ -1948,7 +1948,7 @@ indexType hashBvIterator::nextBit() current_element++; // printf("current element is %d\n", current_element); // reached the end of this node - if (current_element == (indexType) this->currNode->numElements()) + if (current_element == (indexType)this->currNode->numElements()) { // printf("going to next node\n"); this->nextNode(); @@ -1956,7 +1956,7 @@ indexType hashBvIterator::nextBit() } else { - assert(current_element < (indexType) this->currNode->numElements()); + assert(current_element < (indexType)this->currNode->numElements()); // printf("getting more data\n"); current_data = this->currNode->elements[current_element]; current_base = this->currNode->baseIndex + current_element * BITS_PER_ELEMENT; diff --git a/src/coreclr/jit/hashbv.h b/src/coreclr/jit/hashbv.h index 7ad95998add8e..561a1c5641e49 100644 --- a/src/coreclr/jit/hashbv.h +++ b/src/coreclr/jit/hashbv.h @@ -13,15 +13,15 @@ #include #include -//#define TESTING 1 +// #define TESTING 1 -#define LOG2_BITS_PER_ELEMENT 5 +#define LOG2_BITS_PER_ELEMENT 5 #define LOG2_ELEMENTS_PER_NODE 2 -#define LOG2_BITS_PER_NODE (LOG2_BITS_PER_ELEMENT + LOG2_ELEMENTS_PER_NODE) +#define LOG2_BITS_PER_NODE (LOG2_BITS_PER_ELEMENT + LOG2_ELEMENTS_PER_NODE) -#define BITS_PER_ELEMENT (1 << LOG2_BITS_PER_ELEMENT) +#define BITS_PER_ELEMENT (1 << LOG2_BITS_PER_ELEMENT) #define ELEMENTS_PER_NODE (1 << LOG2_ELEMENTS_PER_NODE) -#define BITS_PER_NODE (1 << LOG2_BITS_PER_NODE) +#define BITS_PER_NODE (1 << LOG2_BITS_PER_NODE) #ifdef TARGET_AMD64 typedef unsigned __int64 elemType; @@ -128,8 +128,8 @@ class hashBvNode { } static hashBvNode* Create(indexType base, Compiler* comp); - void Reconstruct(indexType base); - int numElements() + void Reconstruct(indexType base); + int numElements() { return ELEMENTS_PER_NODE; } @@ -172,7 +172,8 @@ class hashBv hashBvNode** nodeArr; hashBvNode* initialVector[1]; - union { + union + { Compiler* compiler; // for freelist hashBv* next; @@ -186,9 +187,9 @@ class hashBv public: hashBv(Compiler* comp); static hashBv* Create(Compiler* comp); - static void Init(Compiler* comp); + static void Init(Compiler* comp); static hashBv* CreateFrom(hashBv* other, Compiler* comp); - void hbvFree(); + void hbvFree(); #ifdef DEBUG void dump(); void dumpFancy(); @@ -201,18 +202,18 @@ class hashBv hashBvGlobalData* globalData(); static hashBvNode*& nodeFreeList(hashBvGlobalData* globalData); - static hashBv*& hbvFreeList(hashBvGlobalData* data); + static hashBv*& hbvFreeList(hashBvGlobalData* data); hashBvNode** getInsertionPointForIndex(indexType index); private: hashBvNode* getNodeForIndexHelper(indexType index, bool canAdd); - int getHashForIndex(indexType index, int table_size); - int getRehashForIndex(indexType thisIndex, int thisTableSize, int newTableSize); + int getHashForIndex(indexType index, int table_size); + int getRehashForIndex(indexType thisIndex, int thisTableSize, int newTableSize); // maintain free lists for vectors hashBvNode** getNewVector(int vectorLength); - int getNodeCount(); + int getNodeCount(); public: inline hashBvNode* getOrAddNodeForIndex(indexType index) @@ -221,7 +222,7 @@ class hashBv return temp; } hashBvNode* getNodeForIndex(indexType index); - void removeNodeAtBase(indexType index); + void removeNodeAtBase(indexType index); public: void setBit(indexType index); diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 3bf37b93798e9..6c8251eee257f 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -1945,13 +1945,13 @@ static int PickCandidatesForTypeCheck(Compiler* comp, isCastClass = false; break; - // These are never expanded: - // CORINFO_HELP_ISINSTANCEOF_EXCEPTION - // CORINFO_HELP_CHKCASTCLASS_SPECIAL - // CORINFO_HELP_READYTORUN_ISINSTANCEOF, - // CORINFO_HELP_READYTORUN_CHKCAST, + // These are never expanded: + // CORINFO_HELP_ISINSTANCEOF_EXCEPTION + // CORINFO_HELP_CHKCASTCLASS_SPECIAL + // CORINFO_HELP_READYTORUN_ISINSTANCEOF, + // CORINFO_HELP_READYTORUN_CHKCAST, - // Other helper calls are not cast helpers + // Other helper calls are not cast helpers default: return 0; diff --git a/src/coreclr/jit/host.h b/src/coreclr/jit/host.h index 6667fbb3994a7..d10eb93ca9a12 100644 --- a/src/coreclr/jit/host.h +++ b/src/coreclr/jit/host.h @@ -28,10 +28,10 @@ class LogEnv }; bool vlogf(unsigned level, const char* fmt, va_list args); -int vflogf(FILE* file, const char* fmt, va_list args); +int vflogf(FILE* file, const char* fmt, va_list args); -int logf(const char* fmt, ...); -int flogf(FILE* file, const char* fmt, ...); +int logf(const char* fmt, ...); +int flogf(FILE* file, const char* fmt, ...); void gcDump_logf(const char* fmt, ...); void logf(unsigned level, const char* fmt, ...); diff --git a/src/coreclr/jit/hostallocator.h b/src/coreclr/jit/hostallocator.h index a91f7f1fb4ab9..0e8f192063fb0 100644 --- a/src/coreclr/jit/hostallocator.h +++ b/src/coreclr/jit/hostallocator.h @@ -37,7 +37,7 @@ class HostAllocator final private: void* allocateHostMemory(size_t size); - void freeHostMemory(void* p); + void freeHostMemory(void* p); }; // Global operator new overloads that work with HostAllocator diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp index e8b60b07909d9..53970ef4a7460 100644 --- a/src/coreclr/jit/hwintrinsic.cpp +++ b/src/coreclr/jit/hwintrinsic.cpp @@ -832,7 +832,7 @@ GenTree* Compiler::addRangeCheckIfNeeded( #ifdef TARGET_XARCH && !HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic) && !HWIntrinsicInfo::HasFullRangeImm(intrinsic) #endif - ) + ) { assert(!immOp->IsCnsIntOrI()); assert(varTypeIsUnsigned(immOp)); diff --git a/src/coreclr/jit/hwintrinsic.h b/src/coreclr/jit/hwintrinsic.h index cac041eb83ea6..5ca302e126f32 100644 --- a/src/coreclr/jit/hwintrinsic.h +++ b/src/coreclr/jit/hwintrinsic.h @@ -451,13 +451,13 @@ struct TernaryLogicInfo // We have 256 entries, so we compress as much as possible // This gives us 3-bytes per entry (21-bits) - TernaryLogicOperKind oper1 : 4; + TernaryLogicOperKind oper1 : 4; TernaryLogicUseFlags oper1Use : 3; - TernaryLogicOperKind oper2 : 4; + TernaryLogicOperKind oper2 : 4; TernaryLogicUseFlags oper2Use : 3; - TernaryLogicOperKind oper3 : 4; + TernaryLogicOperKind oper3 : 4; TernaryLogicUseFlags oper3Use : 3; static const TernaryLogicInfo& lookup(uint8_t control); @@ -491,11 +491,11 @@ struct HWIntrinsicInfo static const HWIntrinsicInfo& lookup(NamedIntrinsic id); - static NamedIntrinsic lookupId(Compiler* comp, - CORINFO_SIG_INFO* sig, - const char* className, - const char* methodName, - const char* enclosingClassName); + static NamedIntrinsic lookupId(Compiler* comp, + CORINFO_SIG_INFO* sig, + const char* className, + const char* methodName, + const char* enclosingClassName); static CORINFO_InstructionSet lookupIsa(const char* className, const char* enclosingClassName); static unsigned lookupSimdSize(Compiler* comp, NamedIntrinsic id, CORINFO_SIG_INFO* sig); @@ -514,7 +514,7 @@ struct HWIntrinsicInfo static bool isScalarIsa(CORINFO_InstructionSet isa); #ifdef TARGET_XARCH - static bool isAVX2GatherIntrinsic(NamedIntrinsic id); + static bool isAVX2GatherIntrinsic(NamedIntrinsic id); static FloatComparisonMode lookupFloatComparisonModeForSwappedArgs(FloatComparisonMode comparison); #endif @@ -927,7 +927,12 @@ struct HWIntrinsicInfo struct HWIntrinsic final { HWIntrinsic(const GenTreeHWIntrinsic* node) - : op1(nullptr), op2(nullptr), op3(nullptr), op4(nullptr), numOperands(0), baseType(TYP_UNDEF) + : op1(nullptr) + , op2(nullptr) + , op3(nullptr) + , op4(nullptr) + , numOperands(0) + , baseType(TYP_UNDEF) { assert(node != nullptr); diff --git a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp index 6418b72a8f307..9a3a98e087a27 100644 --- a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp +++ b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp @@ -36,7 +36,10 @@ // of a for-loop. // CodeGen::HWIntrinsicImmOpHelper::HWIntrinsicImmOpHelper(CodeGen* codeGen, GenTree* immOp, GenTreeHWIntrinsic* intrin) - : codeGen(codeGen), endLabel(nullptr), nonZeroLabel(nullptr), branchTargetReg(REG_NA) + : codeGen(codeGen) + , endLabel(nullptr) + , nonZeroLabel(nullptr) + , branchTargetReg(REG_NA) { assert(codeGen != nullptr); assert(varTypeIsIntegral(immOp)); diff --git a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp index 5e44772e7115a..79e6b497c368a 100644 --- a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp +++ b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp @@ -317,9 +317,9 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) { case 1: { - regNumber targetReg = node->GetRegNum(); - GenTree* rmOp = node->Op(1); - auto emitSwCase = [&](int8_t i) { + regNumber targetReg = node->GetRegNum(); + GenTree* rmOp = node->Op(1); + auto emitSwCase = [&](int8_t i) { insOpts newInstOptions = AddEmbRoundingMode(instOptions, i); genHWIntrinsic_R_RM(node, ins, simdSize, targetReg, rmOp, newInstOptions); }; @@ -559,7 +559,9 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) if (HWIntrinsicInfo::isImmOp(intrinsicId, op3)) { - auto emitSwCase = [&](int8_t i) { genHWIntrinsic_R_R_RM_I(node, ins, simdSize, i); }; + auto emitSwCase = [&](int8_t i) { + genHWIntrinsic_R_R_RM_I(node, ins, simdSize, i); + }; if (op3->IsCnsIntOrI()) { @@ -653,7 +655,9 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) if (HWIntrinsicInfo::isImmOp(intrinsicId, op4)) { - auto emitSwCase = [&](int8_t i) { genHWIntrinsic_R_R_R_RM_I(node, ins, simdSize, i); }; + auto emitSwCase = [&](int8_t i) { + genHWIntrinsic_R_R_R_RM_I(node, ins, simdSize, i); + }; if (op4->IsCnsIntOrI()) { @@ -1208,10 +1212,10 @@ void CodeGen::genHWIntrinsic_R_R_R_RM_I(GenTreeHWIntrinsic* node, instruction in if (op2->isContained()) { -// op2 is never selected by the table so -// we can contain and ignore any register -// allocated to it resulting in better -// non-RMW based codegen. + // op2 is never selected by the table so + // we can contain and ignore any register + // allocated to it resulting in better + // non-RMW based codegen. #if defined(DEBUG) NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); @@ -1364,8 +1368,8 @@ void CodeGen::genNonTableDrivenHWIntrinsicsJumpTableFallback(GenTreeHWIntrinsic* { // This intrinsic has several overloads, only the ones with floating number inputs should reach this part. assert(varTypeIsFloating(baseType)); - GenTree* rmOp = node->Op(1); - auto emitSwCase = [&](int8_t i) { + GenTree* rmOp = node->Op(1); + auto emitSwCase = [&](int8_t i) { insOpts newInstOptions = AddEmbRoundingMode(instOptions, i); genHWIntrinsic_R_RM(node, ins, attr, targetReg, rmOp, newInstOptions); }; @@ -2055,7 +2059,9 @@ void CodeGen::genSSE41Intrinsic(GenTreeHWIntrinsic* node) instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType); emitAttr attr = emitActualTypeSize(node->TypeGet()); - auto emitSwCase = [&](int8_t i) { inst_RV_TT_IV(ins, attr, targetReg, op1, i); }; + auto emitSwCase = [&](int8_t i) { + inst_RV_TT_IV(ins, attr, targetReg, op1, i); + }; if (op2->IsCnsIntOrI()) { diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index f88cf6ec99ec3..87332c07f0113 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -371,7 +371,7 @@ FloatComparisonMode HWIntrinsicInfo::lookupFloatComparisonModeForSwappedArgs(Flo { switch (comparison) { - // These comparison modes are the same even if the operands are swapped + // These comparison modes are the same even if the operands are swapped case FloatComparisonMode::OrderedEqualNonSignaling: return FloatComparisonMode::OrderedEqualNonSignaling; @@ -406,7 +406,7 @@ FloatComparisonMode HWIntrinsicInfo::lookupFloatComparisonModeForSwappedArgs(Flo case FloatComparisonMode::UnorderedTrueSignaling: return FloatComparisonMode::UnorderedTrueSignaling; - // These comparison modes need a different mode if the operands are swapped + // These comparison modes need a different mode if the operands are swapped case FloatComparisonMode::OrderedLessThanSignaling: return FloatComparisonMode::OrderedGreaterThanSignaling; @@ -2498,7 +2498,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, // TODO-XARCH-CQ: We should support long/ulong multiplication break; } -// else if simdSize == 64 then above assert would check if baseline isa supported + // else if simdSize == 64 then above assert would check if baseline isa supported #if defined(TARGET_X86) // TODO-XARCH-CQ: We need to support 64-bit CreateBroadcast @@ -3274,13 +3274,13 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, int ival = HWIntrinsicInfo::lookupIval(this, intrinsic, simdBaseType); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, gtNewIconNode(ival), NI_AVX_CompareScalar, - simdBaseJitType, simdSize); + simdBaseJitType, simdSize); } else { GenTree* clonedOp1 = nullptr; op1 = impCloneExpr(op1, &clonedOp1, CHECK_SPILL_ALL, - nullptr DEBUGARG("Clone op1 for Sse.CompareScalarGreaterThan")); + nullptr DEBUGARG("Clone op1 for Sse.CompareScalarGreaterThan")); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_SSE_MoveScalar, simdBaseJitType, @@ -3333,13 +3333,13 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, int ival = HWIntrinsicInfo::lookupIval(this, intrinsic, simdBaseType); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, gtNewIconNode(ival), NI_AVX_CompareScalar, - simdBaseJitType, simdSize); + simdBaseJitType, simdSize); } else { GenTree* clonedOp1 = nullptr; op1 = impCloneExpr(op1, &clonedOp1, CHECK_SPILL_ALL, - nullptr DEBUGARG("Clone op1 for Sse2.CompareScalarGreaterThan")); + nullptr DEBUGARG("Clone op1 for Sse2.CompareScalarGreaterThan")); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_SSE2_MoveScalar, simdBaseJitType, diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 5ed3064fa8750..bb5231b11ce63 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -70,16 +70,16 @@ bool Compiler::impILConsumesAddr(const BYTE* codeAddr) switch (opcode) { - // case CEE_LDFLDA: We're taking this one out as if you have a sequence - // like - // - // ldloca.0 - // ldflda whatever - // - // of a primitivelike struct, you end up after morphing with addr of a local - // that's not marked as addrtaken, which is wrong. Also ldflda is usually used - // for structs that contain other structs, which isnt a case we handle very - // well now for other reasons. + // case CEE_LDFLDA: We're taking this one out as if you have a sequence + // like + // + // ldloca.0 + // ldflda whatever + // + // of a primitivelike struct, you end up after morphing with addr of a local + // that's not marked as addrtaken, which is wrong. Also ldflda is usually used + // for structs that contain other structs, which isnt a case we handle very + // well now for other reasons. case CEE_LDFLD: { @@ -670,7 +670,7 @@ void Compiler::impStoreTemp(unsigned lclNum, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ - ) +) { GenTree* store = gtNewTempStore(lclNum, val, curLevel, pAfterStmt, di, block); @@ -815,7 +815,7 @@ GenTree* Compiler::impStoreStruct(GenTree* store, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ - ) +) { assert(varTypeIsStruct(store) && store->OperIsStore()); @@ -1718,7 +1718,7 @@ bool Compiler::impSpillStackEntry(unsigned level, bool bAssertOnRecursion, const char* reason #endif - ) +) { #ifdef DEBUG @@ -2067,9 +2067,9 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H * If the tree has side-effects, it will be spilled to a temp. */ -GenTree* Compiler::impCloneExpr(GenTree* tree, - GenTree** pClone, - unsigned curLevel, +GenTree* Compiler::impCloneExpr(GenTree* tree, + GenTree** pClone, + unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) @@ -4260,12 +4260,13 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op) // In contrast, we can only use multi-reg calls directly if they have the exact same ABI. // Calling convention equality is a conservative approximation for that check. - if (op->IsCall() && (op->AsCall()->GetUnmanagedCallConv() == info.compCallConv) + if (op->IsCall() && + (op->AsCall()->GetUnmanagedCallConv() == info.compCallConv) #if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // TODO-Review: this seems unnecessary. Return ABI doesn't change under varargs. && !op->AsCall()->IsVarargs() #endif // defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) - ) + ) { return op; } @@ -6172,7 +6173,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) bool ovfl, unordered, callNode; CORINFO_CLASS_HANDLE tokenType; - union { + union + { int intVal; float fltVal; __int64 lngVal; @@ -6919,7 +6921,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Create the store node and append it. ClassLayout* layout = (lclTyp == TYP_STRUCT) ? typGetObjLayout(stelemClsHnd) : nullptr; op1 = (lclTyp == TYP_STRUCT) ? gtNewStoreBlkNode(layout, op1, op2)->AsIndir() - : gtNewStoreIndNode(lclTyp, op1, op2); + : gtNewStoreIndNode(lclTyp, op1, op2); if (varTypeIsStruct(op1)) { op1 = impStoreStruct(op1, CHECK_SPILL_ALL); @@ -6977,7 +6979,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; - // Other binary math operations + // Other binary math operations case CEE_DIV: oper = GT_DIV; @@ -7266,7 +7268,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } - // fall through + // fall through COND_JUMP: @@ -7595,7 +7597,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) goto SPILL_APPEND; - /************************** Casting OPCODES ***************************/ + /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; @@ -7737,12 +7739,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (varTypeIsFloating(lclTyp)) { - callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl + callNode = varTypeIsLong(impStackTop().val) || + uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT - // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? - // TYP_BYREF could be used as TYP_I_IMPL which is long. - // TODO-CQ: remove this when we lower casts long/ulong --> float/double - // and generate SSE2 code instead of going through helper calls. + // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? + // TYP_BYREF could be used as TYP_I_IMPL which is long. + // TODO-CQ: remove this when we lower casts long/ulong --> float/double + // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; @@ -8922,7 +8925,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else - op1 = gtNewIconNode(1, lclTyp); + op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } @@ -8937,7 +8940,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) ClassLayout* layout; lclTyp = TypeHandleToVarType(fieldInfo.fieldType, clsHnd, &layout); op1 = (lclTyp == TYP_STRUCT) ? gtNewBlkIndir(layout, op1, indirFlags) - : gtNewIndir(lclTyp, op1, indirFlags); + : gtNewIndir(lclTyp, op1, indirFlags); if ((indirFlags & GTF_IND_INVARIANT) != 0) { // TODO-ASG: delete this zero-diff quirk. @@ -9799,10 +9802,10 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. - (helper == CORINFO_HELP_UNBOX_NULLABLE && - varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. - ); + assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. + (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a + // struct. + ); /* ---------------------------------------------------------------------- @@ -10066,7 +10069,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, impPopStack().val); - // Fall through to clear out the eval stack. + // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) @@ -10353,7 +10356,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } break; - /******************************** NYI *******************************/ + /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); @@ -10506,7 +10509,7 @@ void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) // Returns: // Tree with reference to struct local to use as call return value. -GenTree* Compiler::impStoreMultiRegValueToVar(GenTree* op, +GenTree* Compiler::impStoreMultiRegValueToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); @@ -11794,7 +11797,7 @@ unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. - unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); + unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor @@ -12097,7 +12100,7 @@ void Compiler::impFixPredLists() unsigned XTnum = 0; bool added = false; - for (EHblkDsc *HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) + for (EHblkDsc* HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (HBtab->HasFinallyHandler()) { @@ -12246,7 +12249,7 @@ void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, I { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. - ); + ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); @@ -13291,7 +13294,7 @@ GenTree* Compiler::impInlineFetchArg(InlArgInfo& argInfo, const InlLclVarInfo& l assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. - * Use a large size node as we may change it later */ + * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 53f33c45a98c7..52fdf5ab3cd47 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -2203,10 +2203,10 @@ void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, } else { - unsigned relOffset = 0; - auto addSegment = [=, &loweredNode, &relOffset](var_types type) { + unsigned relOffset = 0; + auto addSegment = [=, &loweredNode, &relOffset](var_types type) { GenTree* val = gtNewLclFldNode(structVal->GetLclNum(), type, - structVal->GetLclOffs() + offset + relOffset); + structVal->GetLclOffs() + offset + relOffset); if (loweredType == TYP_LONG) { @@ -2216,7 +2216,7 @@ void Compiler::impPopArgsForSwiftCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, if (relOffset > 0) { val = gtNewOperNode(GT_LSH, genActualType(loweredType), val, - gtNewIconNode(relOffset * 8)); + gtNewIconNode(relOffset * 8)); } if (loweredNode == nullptr) @@ -3286,7 +3286,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, GenTree* op1 = impPopStack().val; GenTree* addr = gtNewIndexAddr(op1, op2, TYP_USHORT, NO_CLASS_HANDLE, OFFSETOF__CORINFO_String__chars, OFFSETOF__CORINFO_String__stringLen); - retNode = gtNewIndexIndir(addr->AsIndexAddr()); + retNode = gtNewIndexIndir(addr->AsIndexAddr()); break; } @@ -3633,8 +3633,8 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtArgs.CountArgs() == 1); - op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, - op1->AsCall()->gtArgs.GetArgByIndex(0)->GetEarlyNode()); + op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, + op1->AsCall()->gtArgs.GetArgByIndex(0)->GetEarlyNode()); op1->gtType = TYP_REF; retNode = op1; } @@ -6115,7 +6115,8 @@ void Compiler::impCheckForPInvokeCall( class SpillRetExprHelper { public: - SpillRetExprHelper(Compiler* comp) : comp(comp) + SpillRetExprHelper(Compiler* comp) + : comp(comp) { } @@ -6783,7 +6784,9 @@ void Compiler::considerGuardedDevirtualization(GenTreeCall* call, #ifdef DEBUG char buffer[256]; JITDUMP("%s call would invoke method %s\n", - isInterface ? "interface" : call->IsDelegateInvoke() ? "delegate" : "virtual", + isInterface ? "interface" + : call->IsDelegateInvoke() ? "delegate" + : "virtual", eeGetMethodFullName(likelyMethod, true, true, buffer, sizeof(buffer))); #endif @@ -7278,8 +7281,8 @@ bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) #if defined(TARGET_XARCH) switch (intrinsicName) { - // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 - // instructions to directly compute round/ceiling/floor/truncate. + // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 + // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: @@ -7482,8 +7485,8 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call, // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodHnd, - rootCompiler->info.compClassHnd, - &rootCompiler->info.compMethodInfo->args); + rootCompiler->info.compClassHnd, + &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. @@ -8432,161 +8435,160 @@ void Compiler::impCheckCanInline(GenTreeCall* call, bool success = eeRunWithErrorTrap( [](Param* pParam) { - - // Cache some frequently accessed state. - // - Compiler* const compiler = pParam->pThis; - COMP_HANDLE compCompHnd = compiler->info.compCompHnd; - CORINFO_METHOD_HANDLE ftn = pParam->fncHandle; - InlineResult* const inlineResult = pParam->result; + // Cache some frequently accessed state. + // + Compiler* const compiler = pParam->pThis; + COMP_HANDLE compCompHnd = compiler->info.compCompHnd; + CORINFO_METHOD_HANDLE ftn = pParam->fncHandle; + InlineResult* const inlineResult = pParam->result; #ifdef DEBUG - if (JitConfig.JitNoInline()) - { - inlineResult->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); - return; - } + if (JitConfig.JitNoInline()) + { + inlineResult->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); + return; + } #endif - JITDUMP("\nCheckCanInline: fetching method info for inline candidate %s -- context %p\n", - compiler->eeGetMethodName(ftn), compiler->dspPtr(pParam->exactContextHnd)); + JITDUMP("\nCheckCanInline: fetching method info for inline candidate %s -- context %p\n", + compiler->eeGetMethodName(ftn), compiler->dspPtr(pParam->exactContextHnd)); - if (pParam->exactContextHnd == METHOD_BEING_COMPILED_CONTEXT()) - { - JITDUMP("Current method context\n"); - } - else if ((((size_t)pParam->exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)) - { - JITDUMP("Method context: %s\n", - compiler->eeGetMethodFullName((CORINFO_METHOD_HANDLE)pParam->exactContextHnd)); - } - else - { - JITDUMP("Class context: %s\n", compiler->eeGetClassName((CORINFO_CLASS_HANDLE)( - (size_t)pParam->exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK))); - } - - // Fetch method info. This may fail, if the method doesn't have IL. - // - CORINFO_METHOD_INFO methInfo; - if (!compCompHnd->getMethodInfo(ftn, &methInfo, pParam->exactContextHnd)) - { - inlineResult->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); - return; - } + if (pParam->exactContextHnd == METHOD_BEING_COMPILED_CONTEXT()) + { + JITDUMP("Current method context\n"); + } + else if ((((size_t)pParam->exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)) + { + JITDUMP("Method context: %s\n", + compiler->eeGetMethodFullName((CORINFO_METHOD_HANDLE)pParam->exactContextHnd)); + } + else + { + JITDUMP("Class context: %s\n", + compiler->eeGetClassName( + (CORINFO_CLASS_HANDLE)((size_t)pParam->exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK))); + } - // Profile data allows us to avoid early "too many IL bytes" outs. - // - inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE_WEIGHTS, - compiler->fgHaveSufficientProfileWeights()); - inlineResult->NoteBool(InlineObservation::CALLSITE_INSIDE_THROW_BLOCK, - compiler->compCurBB->KindIs(BBJ_THROW)); + // Fetch method info. This may fail, if the method doesn't have IL. + // + CORINFO_METHOD_INFO methInfo; + if (!compCompHnd->getMethodInfo(ftn, &methInfo, pParam->exactContextHnd)) + { + inlineResult->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); + return; + } - bool const forceInline = (pParam->methAttr & CORINFO_FLG_FORCEINLINE) != 0; + // Profile data allows us to avoid early "too many IL bytes" outs. + // + inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE_WEIGHTS, + compiler->fgHaveSufficientProfileWeights()); + inlineResult->NoteBool(InlineObservation::CALLSITE_INSIDE_THROW_BLOCK, compiler->compCurBB->KindIs(BBJ_THROW)); - compiler->impCanInlineIL(ftn, &methInfo, forceInline, inlineResult); + bool const forceInline = (pParam->methAttr & CORINFO_FLG_FORCEINLINE) != 0; - if (inlineResult->IsFailure()) - { - assert(inlineResult->IsNever()); - return; - } + compiler->impCanInlineIL(ftn, &methInfo, forceInline, inlineResult); - // Speculatively check if initClass() can be done. - // If it can be done, we will try to inline the method. - CorInfoInitClassResult const initClassResult = - compCompHnd->initClass(nullptr /* field */, ftn /* method */, pParam->exactContextHnd /* context */); + if (inlineResult->IsFailure()) + { + assert(inlineResult->IsNever()); + return; + } - if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) - { - inlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); - return; - } + // Speculatively check if initClass() can be done. + // If it can be done, we will try to inline the method. + CorInfoInitClassResult const initClassResult = + compCompHnd->initClass(nullptr /* field */, ftn /* method */, pParam->exactContextHnd /* context */); - // Given the VM the final say in whether to inline or not. - // This should be last since for verifiable code, this can be expensive - // - CorInfoInline const vmResult = compCompHnd->canInline(compiler->info.compMethodHnd, ftn); + if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) + { + inlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); + return; + } - if (vmResult == INLINE_FAIL) - { - inlineResult->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); - } - else if (vmResult == INLINE_NEVER) - { - inlineResult->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); - } + // Given the VM the final say in whether to inline or not. + // This should be last since for verifiable code, this can be expensive + // + CorInfoInline const vmResult = compCompHnd->canInline(compiler->info.compMethodHnd, ftn); - if (inlineResult->IsFailure()) - { - // The VM already self-reported this failure, so mark it specially - // so the JIT doesn't also try reporting it. - // - inlineResult->SetVMFailure(); - return; - } + if (vmResult == INLINE_FAIL) + { + inlineResult->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); + } + else if (vmResult == INLINE_NEVER) + { + inlineResult->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); + } - // Get the method's class properties + if (inlineResult->IsFailure()) + { + // The VM already self-reported this failure, so mark it specially + // so the JIT doesn't also try reporting it. // - CORINFO_CLASS_HANDLE clsHandle = compCompHnd->getMethodClass(ftn); - unsigned const clsAttr = compCompHnd->getClassAttribs(clsHandle); + inlineResult->SetVMFailure(); + return; + } - // Return type - // - var_types const fncRetType = pParam->call->TypeGet(); + // Get the method's class properties + // + CORINFO_CLASS_HANDLE clsHandle = compCompHnd->getMethodClass(ftn); + unsigned const clsAttr = compCompHnd->getClassAttribs(clsHandle); + + // Return type + // + var_types const fncRetType = pParam->call->TypeGet(); #ifdef DEBUG - var_types fncRealRetType = JITtype2varType(methInfo.args.retType); + var_types fncRealRetType = JITtype2varType(methInfo.args.retType); - assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || - // VSW 288602 - // In case of IJW, we allow to assign a native pointer to a BYREF. - (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || - (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); + assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || + // VSW 288602 + // In case of IJW, we allow to assign a native pointer to a BYREF. + (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || + (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif - // Allocate an InlineCandidateInfo structure, - // - // Or, reuse the existing GuardedDevirtualizationCandidateInfo, - // which was pre-allocated to have extra room. - // - InlineCandidateInfo* pInfo; + // Allocate an InlineCandidateInfo structure, + // + // Or, reuse the existing GuardedDevirtualizationCandidateInfo, + // which was pre-allocated to have extra room. + // + InlineCandidateInfo* pInfo; - if (pParam->call->IsGuardedDevirtualizationCandidate()) - { - pInfo = pParam->call->GetGDVCandidateInfo(pParam->candidateIndex); - } - else - { - pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; + if (pParam->call->IsGuardedDevirtualizationCandidate()) + { + pInfo = pParam->call->GetGDVCandidateInfo(pParam->candidateIndex); + } + else + { + pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; - // Null out bits we don't use when we're just inlining - // - pInfo->guardedClassHandle = nullptr; - pInfo->guardedMethodHandle = nullptr; - pInfo->guardedMethodUnboxedEntryHandle = nullptr; - pInfo->likelihood = 0; - pInfo->requiresInstMethodTableArg = false; - } - - pInfo->methInfo = methInfo; - pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; - pInfo->clsHandle = clsHandle; - pInfo->exactContextHnd = pParam->exactContextHnd; - pInfo->retExpr = nullptr; - pInfo->preexistingSpillTemp = BAD_VAR_NUM; - pInfo->clsAttr = clsAttr; - pInfo->methAttr = pParam->methAttr; - pInfo->initClassResult = initClassResult; - pInfo->fncRetType = fncRetType; - pInfo->exactContextNeedsRuntimeLookup = false; - pInfo->inlinersContext = pParam->pThis->compInlineContext; - - // Note exactContextNeedsRuntimeLookup is reset later on, - // over in impMarkInlineCandidate. + // Null out bits we don't use when we're just inlining // - *(pParam->ppInlineCandidateInfo) = pInfo; - }, + pInfo->guardedClassHandle = nullptr; + pInfo->guardedMethodHandle = nullptr; + pInfo->guardedMethodUnboxedEntryHandle = nullptr; + pInfo->likelihood = 0; + pInfo->requiresInstMethodTableArg = false; + } + + pInfo->methInfo = methInfo; + pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; + pInfo->clsHandle = clsHandle; + pInfo->exactContextHnd = pParam->exactContextHnd; + pInfo->retExpr = nullptr; + pInfo->preexistingSpillTemp = BAD_VAR_NUM; + pInfo->clsAttr = clsAttr; + pInfo->methAttr = pParam->methAttr; + pInfo->initClassResult = initClassResult; + pInfo->fncRetType = fncRetType; + pInfo->exactContextNeedsRuntimeLookup = false; + pInfo->inlinersContext = pParam->pThis->compInlineContext; + + // Note exactContextNeedsRuntimeLookup is reset later on, + // over in impMarkInlineCandidate. + // + *(pParam->ppInlineCandidateInfo) = pInfo; + }, ¶m); if (!success) @@ -9560,372 +9562,374 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) else #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) if (strcmp(namespaceName, "Collections.Generic") == 0) - { - if (strcmp(className, "Comparer`1") == 0) { - if (strcmp(methodName, "get_Default") == 0) + if (strcmp(className, "Comparer`1") == 0) + { + if (strcmp(methodName, "get_Default") == 0) + { + result = NI_System_Collections_Generic_Comparer_get_Default; + } + } + else if (strcmp(className, "EqualityComparer`1") == 0) { - result = NI_System_Collections_Generic_Comparer_get_Default; + if (strcmp(methodName, "get_Default") == 0) + { + result = NI_System_Collections_Generic_EqualityComparer_get_Default; + } } } - else if (strcmp(className, "EqualityComparer`1") == 0) + else if (strcmp(namespaceName, "Numerics") == 0) { - if (strcmp(methodName, "get_Default") == 0) + if (strcmp(className, "BitOperations") == 0) { - result = NI_System_Collections_Generic_EqualityComparer_get_Default; + result = lookupPrimitiveIntNamedIntrinsic(method, methodName); } - } - } - else if (strcmp(namespaceName, "Numerics") == 0) - { - if (strcmp(className, "BitOperations") == 0) - { - result = lookupPrimitiveIntNamedIntrinsic(method, methodName); - } - else - { + else + { #ifdef FEATURE_HW_INTRINSICS - CORINFO_SIG_INFO sig; - info.compCompHnd->getMethodSig(method, &sig); + CORINFO_SIG_INFO sig; + info.compCompHnd->getMethodSig(method, &sig); - result = SimdAsHWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); + result = SimdAsHWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); #endif // FEATURE_HW_INTRINSICS - if (result == NI_Illegal) - { - // This allows the relevant code paths to be dropped as dead code even - // on platforms where FEATURE_HW_INTRINSICS is not supported. - - if (strcmp(methodName, "get_IsSupported") == 0) - { - assert(strcmp(className, "Vector`1") == 0); - result = NI_IsSupported_Type; - } - else if (strcmp(methodName, "get_IsHardwareAccelerated") == 0) - { - result = NI_IsSupported_False; - } - else if (strcmp(methodName, "get_Count") == 0) - { - assert(strcmp(className, "Vector`1") == 0); - result = NI_Vector_GetCount; - } - else if (gtIsRecursiveCall(method)) + if (result == NI_Illegal) { - // For the framework itself, any recursive intrinsics will either be - // only supported on a single platform or will be guarded by a relevant - // IsSupported check so the throw PNSE will be valid or dropped. + // This allows the relevant code paths to be dropped as dead code even + // on platforms where FEATURE_HW_INTRINSICS is not supported. - result = NI_Throw_PlatformNotSupportedException; + if (strcmp(methodName, "get_IsSupported") == 0) + { + assert(strcmp(className, "Vector`1") == 0); + result = NI_IsSupported_Type; + } + else if (strcmp(methodName, "get_IsHardwareAccelerated") == 0) + { + result = NI_IsSupported_False; + } + else if (strcmp(methodName, "get_Count") == 0) + { + assert(strcmp(className, "Vector`1") == 0); + result = NI_Vector_GetCount; + } + else if (gtIsRecursiveCall(method)) + { + // For the framework itself, any recursive intrinsics will either be + // only supported on a single platform or will be guarded by a relevant + // IsSupported check so the throw PNSE will be valid or dropped. + + result = NI_Throw_PlatformNotSupportedException; + } } } } - } - else if (strncmp(namespaceName, "Runtime.", 8) == 0) - { - namespaceName += 8; - - if (strcmp(namespaceName, "CompilerServices") == 0) + else if (strncmp(namespaceName, "Runtime.", 8) == 0) { - if (strcmp(className, "RuntimeHelpers") == 0) + namespaceName += 8; + + if (strcmp(namespaceName, "CompilerServices") == 0) { - if (strcmp(methodName, "CreateSpan") == 0) + if (strcmp(className, "RuntimeHelpers") == 0) { - result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; - } - else if (strcmp(methodName, "InitializeArray") == 0) - { - result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; + if (strcmp(methodName, "CreateSpan") == 0) + { + result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; + } + else if (strcmp(methodName, "InitializeArray") == 0) + { + result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; + } + else if (strcmp(methodName, "IsKnownConstant") == 0) + { + result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; + } } - else if (strcmp(methodName, "IsKnownConstant") == 0) + else if (strcmp(className, "Unsafe") == 0) { - result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; + if (strcmp(methodName, "Add") == 0) + { + result = NI_SRCS_UNSAFE_Add; + } + else if (strcmp(methodName, "AddByteOffset") == 0) + { + result = NI_SRCS_UNSAFE_AddByteOffset; + } + else if (strcmp(methodName, "AreSame") == 0) + { + result = NI_SRCS_UNSAFE_AreSame; + } + else if (strcmp(methodName, "As") == 0) + { + result = NI_SRCS_UNSAFE_As; + } + else if (strcmp(methodName, "AsPointer") == 0) + { + result = NI_SRCS_UNSAFE_AsPointer; + } + else if (strcmp(methodName, "AsRef") == 0) + { + result = NI_SRCS_UNSAFE_AsRef; + } + else if (strcmp(methodName, "BitCast") == 0) + { + result = NI_SRCS_UNSAFE_BitCast; + } + else if (strcmp(methodName, "ByteOffset") == 0) + { + result = NI_SRCS_UNSAFE_ByteOffset; + } + else if (strcmp(methodName, "Copy") == 0) + { + result = NI_SRCS_UNSAFE_Copy; + } + else if (strcmp(methodName, "CopyBlock") == 0) + { + result = NI_SRCS_UNSAFE_CopyBlock; + } + else if (strcmp(methodName, "CopyBlockUnaligned") == 0) + { + result = NI_SRCS_UNSAFE_CopyBlockUnaligned; + } + else if (strcmp(methodName, "InitBlock") == 0) + { + result = NI_SRCS_UNSAFE_InitBlock; + } + else if (strcmp(methodName, "InitBlockUnaligned") == 0) + { + result = NI_SRCS_UNSAFE_InitBlockUnaligned; + } + else if (strcmp(methodName, "IsAddressGreaterThan") == 0) + { + result = NI_SRCS_UNSAFE_IsAddressGreaterThan; + } + else if (strcmp(methodName, "IsAddressLessThan") == 0) + { + result = NI_SRCS_UNSAFE_IsAddressLessThan; + } + else if (strcmp(methodName, "IsNullRef") == 0) + { + result = NI_SRCS_UNSAFE_IsNullRef; + } + else if (strcmp(methodName, "NullRef") == 0) + { + result = NI_SRCS_UNSAFE_NullRef; + } + else if (strcmp(methodName, "Read") == 0) + { + result = NI_SRCS_UNSAFE_Read; + } + else if (strcmp(methodName, "ReadUnaligned") == 0) + { + result = NI_SRCS_UNSAFE_ReadUnaligned; + } + else if (strcmp(methodName, "SizeOf") == 0) + { + result = NI_SRCS_UNSAFE_SizeOf; + } + else if (strcmp(methodName, "SkipInit") == 0) + { + result = NI_SRCS_UNSAFE_SkipInit; + } + else if (strcmp(methodName, "Subtract") == 0) + { + result = NI_SRCS_UNSAFE_Subtract; + } + else if (strcmp(methodName, "SubtractByteOffset") == 0) + { + result = NI_SRCS_UNSAFE_SubtractByteOffset; + } + else if (strcmp(methodName, "Unbox") == 0) + { + result = NI_SRCS_UNSAFE_Unbox; + } + else if (strcmp(methodName, "Write") == 0) + { + result = NI_SRCS_UNSAFE_Write; + } + else if (strcmp(methodName, "WriteUnaligned") == 0) + { + result = NI_SRCS_UNSAFE_WriteUnaligned; + } } } - else if (strcmp(className, "Unsafe") == 0) + else if (strcmp(namespaceName, "InteropServices") == 0) { - if (strcmp(methodName, "Add") == 0) - { - result = NI_SRCS_UNSAFE_Add; - } - else if (strcmp(methodName, "AddByteOffset") == 0) - { - result = NI_SRCS_UNSAFE_AddByteOffset; - } - else if (strcmp(methodName, "AreSame") == 0) - { - result = NI_SRCS_UNSAFE_AreSame; - } - else if (strcmp(methodName, "As") == 0) - { - result = NI_SRCS_UNSAFE_As; - } - else if (strcmp(methodName, "AsPointer") == 0) - { - result = NI_SRCS_UNSAFE_AsPointer; - } - else if (strcmp(methodName, "AsRef") == 0) - { - result = NI_SRCS_UNSAFE_AsRef; - } - else if (strcmp(methodName, "BitCast") == 0) - { - result = NI_SRCS_UNSAFE_BitCast; - } - else if (strcmp(methodName, "ByteOffset") == 0) - { - result = NI_SRCS_UNSAFE_ByteOffset; - } - else if (strcmp(methodName, "Copy") == 0) - { - result = NI_SRCS_UNSAFE_Copy; - } - else if (strcmp(methodName, "CopyBlock") == 0) - { - result = NI_SRCS_UNSAFE_CopyBlock; - } - else if (strcmp(methodName, "CopyBlockUnaligned") == 0) - { - result = NI_SRCS_UNSAFE_CopyBlockUnaligned; - } - else if (strcmp(methodName, "InitBlock") == 0) - { - result = NI_SRCS_UNSAFE_InitBlock; - } - else if (strcmp(methodName, "InitBlockUnaligned") == 0) - { - result = NI_SRCS_UNSAFE_InitBlockUnaligned; - } - else if (strcmp(methodName, "IsAddressGreaterThan") == 0) - { - result = NI_SRCS_UNSAFE_IsAddressGreaterThan; - } - else if (strcmp(methodName, "IsAddressLessThan") == 0) - { - result = NI_SRCS_UNSAFE_IsAddressLessThan; - } - else if (strcmp(methodName, "IsNullRef") == 0) - { - result = NI_SRCS_UNSAFE_IsNullRef; - } - else if (strcmp(methodName, "NullRef") == 0) - { - result = NI_SRCS_UNSAFE_NullRef; - } - else if (strcmp(methodName, "Read") == 0) - { - result = NI_SRCS_UNSAFE_Read; - } - else if (strcmp(methodName, "ReadUnaligned") == 0) - { - result = NI_SRCS_UNSAFE_ReadUnaligned; - } - else if (strcmp(methodName, "SizeOf") == 0) - { - result = NI_SRCS_UNSAFE_SizeOf; - } - else if (strcmp(methodName, "SkipInit") == 0) - { - result = NI_SRCS_UNSAFE_SkipInit; - } - else if (strcmp(methodName, "Subtract") == 0) - { - result = NI_SRCS_UNSAFE_Subtract; - } - else if (strcmp(methodName, "SubtractByteOffset") == 0) - { - result = NI_SRCS_UNSAFE_SubtractByteOffset; - } - else if (strcmp(methodName, "Unbox") == 0) - { - result = NI_SRCS_UNSAFE_Unbox; - } - else if (strcmp(methodName, "Write") == 0) - { - result = NI_SRCS_UNSAFE_Write; - } - else if (strcmp(methodName, "WriteUnaligned") == 0) + if (strcmp(className, "MemoryMarshal") == 0) { - result = NI_SRCS_UNSAFE_WriteUnaligned; + if (strcmp(methodName, "GetArrayDataReference") == 0) + { + result = NI_System_Runtime_InteropService_MemoryMarshal_GetArrayDataReference; + } } } - } - else if (strcmp(namespaceName, "InteropServices") == 0) - { - if (strcmp(className, "MemoryMarshal") == 0) + else if (strncmp(namespaceName, "Intrinsics", 10) == 0) { - if (strcmp(methodName, "GetArrayDataReference") == 0) - { - result = NI_System_Runtime_InteropService_MemoryMarshal_GetArrayDataReference; - } - } - } - else if (strncmp(namespaceName, "Intrinsics", 10) == 0) - { - // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled - // so we can specially handle IsSupported and recursive calls. - - // This is required to appropriately handle the intrinsics on platforms - // which don't support them. On such a platform methods like Vector64.Create - // will be seen as `Intrinsic` and `mustExpand` due to having a code path - // which is recursive. When such a path is hit we expect it to be handled by - // the importer and we fire an assert if it wasn't and in previous versions - // of the JIT would fail fast. This was changed to throw a PNSE instead but - // we still assert as most intrinsics should have been recognized/handled. - - // In order to avoid the assert, we specially handle the IsSupported checks - // (to better allow dead-code optimizations) and we explicitly throw a PNSE - // as we know that is the desired behavior for the HWIntrinsics when not - // supported. For cases like Vector64.Create, this is fine because it will - // be behind a relevant IsSupported check and will never be hit and the - // software fallback will be executed instead. - - CLANG_FORMAT_COMMENT_ANCHOR; + // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled + // so we can specially handle IsSupported and recursive calls. + + // This is required to appropriately handle the intrinsics on platforms + // which don't support them. On such a platform methods like Vector64.Create + // will be seen as `Intrinsic` and `mustExpand` due to having a code path + // which is recursive. When such a path is hit we expect it to be handled by + // the importer and we fire an assert if it wasn't and in previous versions + // of the JIT would fail fast. This was changed to throw a PNSE instead but + // we still assert as most intrinsics should have been recognized/handled. + + // In order to avoid the assert, we specially handle the IsSupported checks + // (to better allow dead-code optimizations) and we explicitly throw a PNSE + // as we know that is the desired behavior for the HWIntrinsics when not + // supported. For cases like Vector64.Create, this is fine because it will + // be behind a relevant IsSupported check and will never be hit and the + // software fallback will be executed instead. + + CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS - namespaceName += 10; - const char* platformNamespaceName; + namespaceName += 10; + const char* platformNamespaceName; #if defined(TARGET_XARCH) - platformNamespaceName = ".X86"; + platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) - platformNamespaceName = ".Arm"; + platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif - if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) - { - CORINFO_SIG_INFO sig; - info.compCompHnd->getMethodSig(method, &sig); + if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) + { + CORINFO_SIG_INFO sig; + info.compCompHnd->getMethodSig(method, &sig); - result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); - } + result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); + } #endif // FEATURE_HW_INTRINSICS - if (result == NI_Illegal) - { - // This allows the relevant code paths to be dropped as dead code even - // on platforms where FEATURE_HW_INTRINSICS is not supported. - - if (strcmp(methodName, "get_IsSupported") == 0) + if (result == NI_Illegal) { - if (strncmp(className, "Vector", 6) == 0) + // This allows the relevant code paths to be dropped as dead code even + // on platforms where FEATURE_HW_INTRINSICS is not supported. + + if (strcmp(methodName, "get_IsSupported") == 0) { - assert( - (strcmp(className, "Vector64`1") == 0) || (strcmp(className, "Vector128`1") == 0) || - (strcmp(className, "Vector256`1") == 0) || (strcmp(className, "Vector512`1") == 0)); + if (strncmp(className, "Vector", 6) == 0) + { + assert((strcmp(className, "Vector64`1") == 0) || + (strcmp(className, "Vector128`1") == 0) || + (strcmp(className, "Vector256`1") == 0) || + (strcmp(className, "Vector512`1") == 0)); - result = NI_IsSupported_Type; + result = NI_IsSupported_Type; + } + else + { + result = NI_IsSupported_False; + } } - else + else if (strcmp(methodName, "get_IsHardwareAccelerated") == 0) { result = NI_IsSupported_False; } - } - else if (strcmp(methodName, "get_IsHardwareAccelerated") == 0) - { - result = NI_IsSupported_False; - } - else if (strcmp(methodName, "get_Count") == 0) - { - assert((strcmp(className, "Vector64`1") == 0) || (strcmp(className, "Vector128`1") == 0) || - (strcmp(className, "Vector256`1") == 0) || (strcmp(className, "Vector512`1") == 0)); + else if (strcmp(methodName, "get_Count") == 0) + { + assert( + (strcmp(className, "Vector64`1") == 0) || (strcmp(className, "Vector128`1") == 0) || + (strcmp(className, "Vector256`1") == 0) || (strcmp(className, "Vector512`1") == 0)); - result = NI_Vector_GetCount; - } - else if (gtIsRecursiveCall(method)) - { - // For the framework itself, any recursive intrinsics will either be - // only supported on a single platform or will be guarded by a relevant - // IsSupported check so the throw PNSE will be valid or dropped. + result = NI_Vector_GetCount; + } + else if (gtIsRecursiveCall(method)) + { + // For the framework itself, any recursive intrinsics will either be + // only supported on a single platform or will be guarded by a relevant + // IsSupported check so the throw PNSE will be valid or dropped. - result = NI_Throw_PlatformNotSupportedException; + result = NI_Throw_PlatformNotSupportedException; + } } } } - } - else if (strcmp(namespaceName, "StubHelpers") == 0) - { - if (strcmp(className, "StubHelpers") == 0) + else if (strcmp(namespaceName, "StubHelpers") == 0) { - if (strcmp(methodName, "GetStubContext") == 0) + if (strcmp(className, "StubHelpers") == 0) { - result = NI_System_StubHelpers_GetStubContext; - } - else if (strcmp(methodName, "NextCallReturnAddress") == 0) - { - result = NI_System_StubHelpers_NextCallReturnAddress; - } - } - } - else if (strcmp(namespaceName, "Text") == 0) - { - if (strcmp(className, "UTF8EncodingSealed") == 0) - { - if (strcmp(methodName, "ReadUtf8") == 0) - { - assert(strcmp(enclosingClassName, "UTF8Encoding") == 0); - result = NI_System_Text_UTF8Encoding_UTF8EncodingSealed_ReadUtf8; + if (strcmp(methodName, "GetStubContext") == 0) + { + result = NI_System_StubHelpers_GetStubContext; + } + else if (strcmp(methodName, "NextCallReturnAddress") == 0) + { + result = NI_System_StubHelpers_NextCallReturnAddress; + } } } - } - else if (strcmp(namespaceName, "Threading") == 0) - { - if (strcmp(className, "Interlocked") == 0) + else if (strcmp(namespaceName, "Text") == 0) { - if (strcmp(methodName, "And") == 0) + if (strcmp(className, "UTF8EncodingSealed") == 0) { - result = NI_System_Threading_Interlocked_And; - } - else if (strcmp(methodName, "Or") == 0) - { - result = NI_System_Threading_Interlocked_Or; - } - else if (strcmp(methodName, "CompareExchange") == 0) - { - result = NI_System_Threading_Interlocked_CompareExchange; - } - else if (strcmp(methodName, "Exchange") == 0) - { - result = NI_System_Threading_Interlocked_Exchange; - } - else if (strcmp(methodName, "ExchangeAdd") == 0) - { - result = NI_System_Threading_Interlocked_ExchangeAdd; - } - else if (strcmp(methodName, "MemoryBarrier") == 0) - { - result = NI_System_Threading_Interlocked_MemoryBarrier; - } - else if (strcmp(methodName, "ReadMemoryBarrier") == 0) - { - result = NI_System_Threading_Interlocked_ReadMemoryBarrier; + if (strcmp(methodName, "ReadUtf8") == 0) + { + assert(strcmp(enclosingClassName, "UTF8Encoding") == 0); + result = NI_System_Text_UTF8Encoding_UTF8EncodingSealed_ReadUtf8; + } } } - else if (strcmp(className, "Thread") == 0) + else if (strcmp(namespaceName, "Threading") == 0) { - if (strcmp(methodName, "get_CurrentThread") == 0) + if (strcmp(className, "Interlocked") == 0) { - result = NI_System_Threading_Thread_get_CurrentThread; - } - else if (strcmp(methodName, "get_ManagedThreadId") == 0) - { - result = NI_System_Threading_Thread_get_ManagedThreadId; + if (strcmp(methodName, "And") == 0) + { + result = NI_System_Threading_Interlocked_And; + } + else if (strcmp(methodName, "Or") == 0) + { + result = NI_System_Threading_Interlocked_Or; + } + else if (strcmp(methodName, "CompareExchange") == 0) + { + result = NI_System_Threading_Interlocked_CompareExchange; + } + else if (strcmp(methodName, "Exchange") == 0) + { + result = NI_System_Threading_Interlocked_Exchange; + } + else if (strcmp(methodName, "ExchangeAdd") == 0) + { + result = NI_System_Threading_Interlocked_ExchangeAdd; + } + else if (strcmp(methodName, "MemoryBarrier") == 0) + { + result = NI_System_Threading_Interlocked_MemoryBarrier; + } + else if (strcmp(methodName, "ReadMemoryBarrier") == 0) + { + result = NI_System_Threading_Interlocked_ReadMemoryBarrier; + } } - } - else if (strcmp(className, "Volatile") == 0) - { - if (strcmp(methodName, "Read") == 0) + else if (strcmp(className, "Thread") == 0) { - result = NI_System_Threading_Volatile_Read; + if (strcmp(methodName, "get_CurrentThread") == 0) + { + result = NI_System_Threading_Thread_get_CurrentThread; + } + else if (strcmp(methodName, "get_ManagedThreadId") == 0) + { + result = NI_System_Threading_Thread_get_ManagedThreadId; + } } - else if (strcmp(methodName, "Write") == 0) + else if (strcmp(className, "Volatile") == 0) { - result = NI_System_Threading_Volatile_Write; + if (strcmp(methodName, "Read") == 0) + { + result = NI_System_Threading_Volatile_Read; + } + else if (strcmp(methodName, "Write") == 0) + { + result = NI_System_Threading_Volatile_Write; + } } } - } } } else if (strcmp(namespaceName, "Internal.Runtime") == 0) diff --git a/src/coreclr/jit/importervectorization.cpp b/src/coreclr/jit/importervectorization.cpp index af7a2f2791d16..26ae9225cbd7e 100644 --- a/src/coreclr/jit/importervectorization.cpp +++ b/src/coreclr/jit/importervectorization.cpp @@ -182,7 +182,7 @@ GenTree* Compiler::impExpandHalfConstEqualsSIMD( xor1 = gtNewSimdBinOpNode(GT_XOR, simdType, vec1, cnsVec1, baseType, simdSize); } -// ((v1 ^ cns1) | (v2 ^ cns2)) == zero + // ((v1 ^ cns1) | (v2 ^ cns2)) == zero #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX512F_VL)) @@ -317,7 +317,7 @@ GenTree* Compiler::impExpandHalfConstEqualsSWAR( assert(len >= 1 && len <= 8); // Compose Int32 or Int64 values from ushort components -#define MAKEINT32(c1, c2) ((UINT64)c2 << 16) | ((UINT64)c1 << 0) +#define MAKEINT32(c1, c2) ((UINT64)c2 << 16) | ((UINT64)c1 << 0) #define MAKEINT64(c1, c2, c3, c4) ((UINT64)c4 << 48) | ((UINT64)c3 << 32) | ((UINT64)c2 << 16) | ((UINT64)c1 << 0) if (len == 1) @@ -516,10 +516,10 @@ GenTree* Compiler::impExpandHalfConstEquals(GenTreeLclVarCommon* data, GenTree* castedLen = gtNewCastNode(TYP_I_IMPL, gtCloneExpr(lengthFld), false, TYP_I_IMPL); GenTree* byteLen = gtNewOperNode(GT_MUL, TYP_I_IMPL, castedLen, gtNewIconNode(2, TYP_I_IMPL)); GenTreeOp* cmpStart = gtNewOperNode(GT_ADD, TYP_BYREF, gtClone(data), - gtNewOperNode(GT_SUB, TYP_I_IMPL, byteLen, - gtNewIconNode((ssize_t)(len * 2), TYP_I_IMPL))); - GenTree* storeTmp = gtNewTempStore(dataAddr->GetLclNum(), cmpStart); - indirCmp = gtNewOperNode(GT_COMMA, indirCmp->TypeGet(), storeTmp, indirCmp); + gtNewOperNode(GT_SUB, TYP_I_IMPL, byteLen, + gtNewIconNode((ssize_t)(len * 2), TYP_I_IMPL))); + GenTree* storeTmp = gtNewTempStore(dataAddr->GetLclNum(), cmpStart); + indirCmp = gtNewOperNode(GT_COMMA, indirCmp->TypeGet(), storeTmp, indirCmp); } GenTreeColon* lenCheckColon = gtNewColonNode(TYP_INT, indirCmp, gtNewFalse()); diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 0839f9fc2a045..a85ba05596b51 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -67,7 +67,8 @@ class IndirectCallTransformer { public: - IndirectCallTransformer(Compiler* compiler) : compiler(compiler) + IndirectCallTransformer(Compiler* compiler) + : compiler(compiler) { } @@ -157,7 +158,9 @@ class IndirectCallTransformer { public: Transformer(Compiler* compiler, BasicBlock* block, Statement* stmt) - : compiler(compiler), currBlock(block), stmt(stmt) + : compiler(compiler) + , currBlock(block) + , stmt(stmt) { remainderBlock = nullptr; checkBlock = nullptr; @@ -197,7 +200,7 @@ class IndirectCallTransformer virtual const char* Name() = 0; virtual void ClearFlag() = 0; virtual GenTreeCall* GetCall(Statement* callStmt) = 0; - virtual void FixupRetExpr() = 0; + virtual void FixupRetExpr() = 0; //------------------------------------------------------------------------ // CreateRemainder: split current block at the call stmt and @@ -473,7 +476,8 @@ class IndirectCallTransformer { public: GuardedDevirtualizationTransformer(Compiler* compiler, BasicBlock* block, Statement* stmt) - : Transformer(compiler, block, stmt), returnTemp(BAD_VAR_NUM) + : Transformer(compiler, block, stmt) + , returnTemp(BAD_VAR_NUM) { } @@ -1259,7 +1263,9 @@ class IndirectCallTransformer unsigned m_nodeCount; ClonabilityVisitor(Compiler* compiler) - : GenTreeVisitor(compiler), m_unclonableNode(nullptr), m_nodeCount(0) + : GenTreeVisitor(compiler) + , m_unclonableNode(nullptr) + , m_nodeCount(0) { } diff --git a/src/coreclr/jit/inductionvariableopts.cpp b/src/coreclr/jit/inductionvariableopts.cpp index 59e5b6a0d497d..19755c312de35 100644 --- a/src/coreclr/jit/inductionvariableopts.cpp +++ b/src/coreclr/jit/inductionvariableopts.cpp @@ -66,7 +66,6 @@ bool Compiler::optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop) LclVarDsc* dsc = lvaGetDesc(lclNum); BasicBlockVisit result = loop->VisitRegularExitBlocks([=](BasicBlock* exit) { - if (!VarSetOps::IsMember(this, exit->bbLiveIn, dsc->lvVarIndex)) { JITDUMP(" Exit " FMT_BB " does not need a sink; V%02u is not live-in\n", exit->bbNum, lclNum); @@ -94,7 +93,6 @@ bool Compiler::optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop) // unprofitable. If this ever changes we need some more expansive handling // here. loop->VisitLoopBlocks([=](BasicBlock* block) { - block->VisitAllSuccs(this, [=](BasicBlock* succ) { if (!loop->ContainsBlock(succ) && bbIsHandlerBeg(succ)) { @@ -334,7 +332,10 @@ void Compiler::optReplaceWidenedIV(unsigned lclNum, unsigned ssaNum, unsigned ne }; ReplaceVisitor(Compiler* comp, unsigned lclNum, unsigned ssaNum, unsigned newLclNum) - : GenTreeVisitor(comp), m_lclNum(lclNum), m_ssaNum(ssaNum), m_newLclNum(newLclNum) + : GenTreeVisitor(comp) + , m_lclNum(lclNum) + , m_ssaNum(ssaNum) + , m_newLclNum(newLclNum) { } diff --git a/src/coreclr/jit/inline.cpp b/src/coreclr/jit/inline.cpp index 06ca71126f855..c8831a75b39bc 100644 --- a/src/coreclr/jit/inline.cpp +++ b/src/coreclr/jit/inline.cpp @@ -383,7 +383,7 @@ void InlineContext::Dump(bool verbose, unsigned indent) #if defined(DEBUG) calleeName = compiler->eeGetMethodFullName(m_Callee); #else - calleeName = "callee"; + calleeName = "callee"; #endif // defined(DEBUG) } diff --git a/src/coreclr/jit/inline.h b/src/coreclr/jit/inline.h index 56f44947b3664..8c1cb56124ad2 100644 --- a/src/coreclr/jit/inline.h +++ b/src/coreclr/jit/inline.h @@ -222,9 +222,9 @@ class InlinePolicy } // Policy observations - virtual void NoteSuccess() = 0; - virtual void NoteBool(InlineObservation obs, bool value) = 0; - virtual void NoteFatal(InlineObservation obs) = 0; + virtual void NoteSuccess() = 0; + virtual void NoteBool(InlineObservation obs, bool value) = 0; + virtual void NoteFatal(InlineObservation obs) = 0; virtual void NoteInt(InlineObservation obs, int value) = 0; virtual void NoteDouble(InlineObservation obs, double value) = 0; @@ -321,7 +321,7 @@ class InlinePolicy private: // No copying or assignment supported - InlinePolicy(const InlinePolicy&) = delete; + InlinePolicy(const InlinePolicy&) = delete; InlinePolicy& operator=(const InlinePolicy&) = delete; protected: @@ -558,7 +558,7 @@ class InlineResult private: // No copying or assignment allowed. - InlineResult(const InlineResult&) = delete; + InlineResult(const InlineResult&) = delete; InlineResult& operator=(const InlineResult&) = delete; // Report/log/dump decision as appropriate @@ -637,16 +637,16 @@ struct InlArgInfo CallArg* arg; // the caller argument GenTree* argBashTmpNode; // tmp node created, if it may be replaced with actual arg unsigned argTmpNum; // the argument tmp number - unsigned argIsUsed : 1; // is this arg used at all? - unsigned argIsInvariant : 1; // the argument is a constant or a local variable address - unsigned argIsLclVar : 1; // the argument is a local variable - unsigned argIsThis : 1; // the argument is the 'this' pointer - unsigned argHasSideEff : 1; // the argument has side effects - unsigned argHasGlobRef : 1; // the argument has a global ref - unsigned argHasCallerLocalRef : 1; // the argument value depends on an aliased caller local - unsigned argHasTmp : 1; // the argument will be evaluated to a temp - unsigned argHasLdargaOp : 1; // Is there LDARGA(s) operation on this argument? - unsigned argHasStargOp : 1; // Is there STARG(s) operation on this argument? + unsigned argIsUsed : 1; // is this arg used at all? + unsigned argIsInvariant : 1; // the argument is a constant or a local variable address + unsigned argIsLclVar : 1; // the argument is a local variable + unsigned argIsThis : 1; // the argument is the 'this' pointer + unsigned argHasSideEff : 1; // the argument has side effects + unsigned argHasGlobRef : 1; // the argument has a global ref + unsigned argHasCallerLocalRef : 1; // the argument value depends on an aliased caller local + unsigned argHasTmp : 1; // the argument will be evaluated to a temp + unsigned argHasLdargaOp : 1; // Is there LDARGA(s) operation on this argument? + unsigned argHasStargOp : 1; // Is there STARG(s) operation on this argument? unsigned argIsByRefToStructLocal : 1; // Is this arg an address of a struct local or a normed struct local or a // field in them? unsigned argIsExact : 1; // Is this arg of an exact class? @@ -658,10 +658,10 @@ struct InlLclVarInfo { CORINFO_CLASS_HANDLE lclTypeHandle; // Type handle from the signature. Available for structs and REFs. var_types lclTypeInfo; // Type from the signature. - unsigned char lclHasLdlocaOp : 1; // Is there LDLOCA(s) operation on this local? - unsigned char lclHasStlocOp : 1; // Is there a STLOC on this local? + unsigned char lclHasLdlocaOp : 1; // Is there LDLOCA(s) operation on this local? + unsigned char lclHasStlocOp : 1; // Is there a STLOC on this local? unsigned char lclHasMultipleStlocOp : 1; // Is there more than one STLOC on this local - unsigned char lclIsPinned : 1; + unsigned char lclIsPinned : 1; }; // InlineInfo provides detailed information about a particular inline candidate. @@ -887,8 +887,8 @@ class InlineContext InlinePolicy* m_Policy; // policy that evaluated this inline unsigned m_TreeID; // ID of the GenTreeCall in the parent bool m_Devirtualized : 1; // true if this was a devirtualized call - bool m_Guarded : 1; // true if this was a guarded call - bool m_Unboxed : 1; // true if this call now invokes the unboxed entry + bool m_Guarded : 1; // true if this was a guarded call + bool m_Unboxed : 1; // true if this call now invokes the unboxed entry #endif // defined(DEBUG) @@ -1026,7 +1026,7 @@ class InlineStrategy void DumpDataContents(FILE* file); // Dump xml-formatted description of inlines - void DumpXml(FILE* file = stderr, unsigned indent = 0); + void DumpXml(FILE* file = stderr, unsigned indent = 0); static void FinalizeXml(FILE* file = stderr); // Cache for file position of this method in the inline xml diff --git a/src/coreclr/jit/inlinepolicy.cpp b/src/coreclr/jit/inlinepolicy.cpp index d057ccd09ed0d..3b771f291607b 100644 --- a/src/coreclr/jit/inlinepolicy.cpp +++ b/src/coreclr/jit/inlinepolicy.cpp @@ -945,8 +945,9 @@ void DefaultPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) { // Inline appears to be unprofitable JITLOG_THIS(m_RootCompiler, - (LL_INFO100000, "Native estimate for function size exceeds threshold" - " for inlining %g > %g (multiplier = %g)\n", + (LL_INFO100000, + "Native estimate for function size exceeds threshold" + " for inlining %g > %g (multiplier = %g)\n", (double)m_CalleeNativeSizeEstimate / SIZE_SCALE, (double)threshold / SIZE_SCALE, m_Multiplier)); // Fail the inline @@ -963,8 +964,9 @@ void DefaultPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) { // Inline appears to be profitable JITLOG_THIS(m_RootCompiler, - (LL_INFO100000, "Native estimate for function size is within threshold" - " for inlining %g <= %g (multiplier = %g)\n", + (LL_INFO100000, + "Native estimate for function size is within threshold" + " for inlining %g <= %g (multiplier = %g)\n", (double)m_CalleeNativeSizeEstimate / SIZE_SCALE, (double)threshold / SIZE_SCALE, m_Multiplier)); // Update candidacy @@ -1072,7 +1074,8 @@ bool DefaultPolicy::PropagateNeverToRuntime() const // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -RandomPolicy::RandomPolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +RandomPolicy::RandomPolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { m_Random = compiler->m_inlineStrategy->GetRandom(); } @@ -2768,7 +2771,8 @@ void DiscretionaryPolicy::DumpData(FILE* file) const // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -ModelPolicy::ModelPolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +ModelPolicy::ModelPolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { // Empty } @@ -2969,7 +2973,8 @@ void ModelPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -ProfilePolicy::ProfilePolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +ProfilePolicy::ProfilePolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { // Empty } @@ -3169,7 +3174,8 @@ void ProfilePolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -FullPolicy::FullPolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +FullPolicy::FullPolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { // Empty } @@ -3236,7 +3242,8 @@ void FullPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) // compiler -- compiler instance doing the inlining (root compiler) // isPrejitRoot -- true if this compiler is prejitting the root method -SizePolicy::SizePolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot) +SizePolicy::SizePolicy(Compiler* compiler, bool isPrejitRoot) + : DiscretionaryPolicy(compiler, isPrejitRoot) { // Empty } diff --git a/src/coreclr/jit/inlinepolicy.h b/src/coreclr/jit/inlinepolicy.h index 52333d5aacac3..a8d8e67f1db3c 100644 --- a/src/coreclr/jit/inlinepolicy.h +++ b/src/coreclr/jit/inlinepolicy.h @@ -48,7 +48,8 @@ class LegalPolicy : public InlinePolicy public: // Constructor - LegalPolicy(bool isPrejitRoot) : InlinePolicy(isPrejitRoot) + LegalPolicy(bool isPrejitRoot) + : InlinePolicy(isPrejitRoot) { // empty } @@ -157,7 +158,7 @@ class DefaultPolicy : public LegalPolicy // Helper methods virtual double DetermineMultiplier(); int DetermineNativeSizeEstimate(); - int DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methodInfo); + int DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methodInfo); // Data members Compiler* m_RootCompiler; // root compiler instance @@ -174,20 +175,20 @@ class DefaultPolicy : public LegalPolicy unsigned m_ConstantArgFeedsConstantTest; int m_CalleeNativeSizeEstimate; int m_CallsiteNativeSizeEstimate; - bool m_IsForceInline : 1; - bool m_IsForceInlineKnown : 1; - bool m_IsInstanceCtor : 1; + bool m_IsForceInline : 1; + bool m_IsForceInlineKnown : 1; + bool m_IsInstanceCtor : 1; bool m_IsFromPromotableValueClass : 1; - bool m_HasSimd : 1; - bool m_LooksLikeWrapperMethod : 1; - bool m_MethodIsMostlyLoadStore : 1; - bool m_CallsiteIsInTryRegion : 1; - bool m_CallsiteIsInLoop : 1; - bool m_IsNoReturn : 1; - bool m_IsNoReturnKnown : 1; - bool m_ConstArgFeedsIsKnownConst : 1; - bool m_ArgFeedsIsKnownConst : 1; - bool m_InsideThrowBlock : 1; + bool m_HasSimd : 1; + bool m_LooksLikeWrapperMethod : 1; + bool m_MethodIsMostlyLoadStore : 1; + bool m_CallsiteIsInTryRegion : 1; + bool m_CallsiteIsInLoop : 1; + bool m_IsNoReturn : 1; + bool m_IsNoReturnKnown : 1; + bool m_ConstArgFeedsIsKnownConst : 1; + bool m_ArgFeedsIsKnownConst : 1; + bool m_InsideThrowBlock : 1; }; // ExtendedDefaultPolicy is a slightly more aggressive variant of @@ -271,11 +272,11 @@ class ExtendedDefaultPolicy : public DefaultPolicy unsigned m_UnrollableMemop; unsigned m_Switch; unsigned m_DivByCns; - bool m_ReturnsStructByValue : 1; - bool m_IsFromValueClass : 1; - bool m_NonGenericCallsGeneric : 1; + bool m_ReturnsStructByValue : 1; + bool m_IsFromValueClass : 1; + bool m_NonGenericCallsGeneric : 1; bool m_IsCallsiteInNoReturnRegion : 1; - bool m_HasProfileWeights : 1; + bool m_HasProfileWeights : 1; }; // DiscretionaryPolicy is a variant of the default policy. It diff --git a/src/coreclr/jit/instr.cpp b/src/coreclr/jit/instr.cpp index dd82e7c08f92b..7866c8a5e7b0f 100644 --- a/src/coreclr/jit/instr.cpp +++ b/src/coreclr/jit/instr.cpp @@ -876,7 +876,7 @@ CodeGen::OperandDesc CodeGen::genOperandDesc(GenTree* op) // broadcast -> LCL_VAR(TYP_(U)INT) ssize_t scalarValue = hwintrinsicChild->AsIntCon()->IconValue(); UNATIVE_OFFSET cnum = emit->emitDataConst(&scalarValue, genTypeSize(simdBaseType), - genTypeSize(simdBaseType), simdBaseType); + genTypeSize(simdBaseType), simdBaseType); return OperandDesc(compiler->eeFindJitDataOffs(cnum)); } else @@ -1124,9 +1124,9 @@ void CodeGen::inst_RV_TT(instruction ins, emitAttr size, regNumber op1Reg, GenTr } /***************************************************************************** -* -* Generate an instruction of the form "op reg1, reg2, icon". -*/ + * + * Generate an instruction of the form "op reg1, reg2, icon". + */ void CodeGen::inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival) { @@ -1256,8 +1256,8 @@ void CodeGen::inst_RV_RV_TT(instruction ins, emitter* emit = GetEmitter(); noway_assert(emit->emitVerifyEncodable(ins, EA_SIZE(size), targetReg)); -// TODO-XArch-CQ: Commutative operations can have op1 be contained -// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained + // TODO-XArch-CQ: Commutative operations can have op1 be contained + // TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained #if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS) if (CodeGenInterface::IsEmbeddedBroadcastEnabled(ins, op2)) diff --git a/src/coreclr/jit/instrsarm.h b/src/coreclr/jit/instrsarm.h index 9356150d4b2e8..3a1c871d316f6 100644 --- a/src/coreclr/jit/instrsarm.h +++ b/src/coreclr/jit/instrsarm.h @@ -19,7 +19,7 @@ * e8 -- encoding 8 * e9 -- encoding 9 * -******************************************************************************/ + ******************************************************************************/ #if !defined(TARGET_ARM) #error Unexpected target type diff --git a/src/coreclr/jit/instrsarm64.h b/src/coreclr/jit/instrsarm64.h index c07976f1eca0b..c6ac7404c569d 100644 --- a/src/coreclr/jit/instrsarm64.h +++ b/src/coreclr/jit/instrsarm64.h @@ -18,7 +18,7 @@ * e8 -- encoding 8 * e9 -- encoding 9 * -******************************************************************************/ + ******************************************************************************/ #if !defined(TARGET_ARM64) #error Unexpected target type diff --git a/src/coreclr/jit/instrsloongarch64.h b/src/coreclr/jit/instrsloongarch64.h index 4f94516c5fb91..3794d91e02e38 100644 --- a/src/coreclr/jit/instrsloongarch64.h +++ b/src/coreclr/jit/instrsloongarch64.h @@ -11,7 +11,7 @@ * mask -- instruction's mask * fmt -- disasmbly format * -******************************************************************************/ + ******************************************************************************/ #if !defined(TARGET_LOONGARCH64) #error Unexpected target type diff --git a/src/coreclr/jit/instrsxarch.h b/src/coreclr/jit/instrsxarch.h index 17443cb978492..440cc0033c82f 100644 --- a/src/coreclr/jit/instrsxarch.h +++ b/src/coreclr/jit/instrsxarch.h @@ -18,7 +18,7 @@ * tt -- the tupletype for the instruction * flags -- flags, see INS_FLAGS_* enum * -******************************************************************************/ + ******************************************************************************/ // clang-format off #if !defined(TARGET_XARCH) diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h index 2c09afc6e1d8c..1094740a8e25d 100644 --- a/src/coreclr/jit/jit.h +++ b/src/coreclr/jit/jit.h @@ -26,8 +26,9 @@ #define ZERO 0 #ifdef _MSC_VER -#define CHECK_STRUCT_PADDING 0 // Set this to '1' to enable warning C4820 "'bytes' bytes padding added after - // construct 'member_name'" on interesting structs/classes +#define CHECK_STRUCT_PADDING \ + 0 // Set this to '1' to enable warning C4820 "'bytes' bytes padding added after + // construct 'member_name'" on interesting structs/classes #else #define CHECK_STRUCT_PADDING 0 // Never enable it for non-MSFT compilers #endif @@ -295,9 +296,9 @@ typedef ptrdiff_t ssize_t; #include "corjit.h" #include "jitee.h" -#define __OPERATOR_NEW_INLINE 1 // indicate that I will define these -#define __PLACEMENT_NEW_INLINE // don't bring in the global placement new, it is easy to make a mistake - // with our new(compiler*) pattern. +#define __OPERATOR_NEW_INLINE 1 // indicate that I will define these +#define __PLACEMENT_NEW_INLINE // don't bring in the global placement new, it is easy to make a mistake + // with our new(compiler*) pattern. #include "utilcode.h" // this defines assert as _ASSERTE #include "host.h" // this redefines assert for the JIT to use assertAbort @@ -319,7 +320,7 @@ typedef ptrdiff_t ssize_t; #endif #ifdef DEBUG -#define INDEBUG(x) x +#define INDEBUG(x) x #define DEBUGARG(x) , x #else #define INDEBUG(x) @@ -334,7 +335,7 @@ typedef ptrdiff_t ssize_t; #if defined(UNIX_AMD64_ABI) #define UNIX_AMD64_ABI_ONLY_ARG(x) , x -#define UNIX_AMD64_ABI_ONLY(x) x +#define UNIX_AMD64_ABI_ONLY(x) x #else // !defined(UNIX_AMD64_ABI) #define UNIX_AMD64_ABI_ONLY_ARG(x) #define UNIX_AMD64_ABI_ONLY(x) @@ -342,7 +343,7 @@ typedef ptrdiff_t ssize_t; #if defined(TARGET_LOONGARCH64) #define UNIX_LOONGARCH64_ONLY_ARG(x) , x -#define UNIX_LOONGARCH64_ONLY(x) x +#define UNIX_LOONGARCH64_ONLY(x) x #else // !TARGET_LOONGARCH64 #define UNIX_LOONGARCH64_ONLY_ARG(x) #define UNIX_LOONGARCH64_ONLY(x) @@ -355,16 +356,16 @@ typedef ptrdiff_t ssize_t; #if defined(UNIX_AMD64_ABI) #define UNIX_AMD64_ABI_ONLY_ARG(x) , x -#define UNIX_AMD64_ABI_ONLY(x) x +#define UNIX_AMD64_ABI_ONLY(x) x #else // !defined(UNIX_AMD64_ABI) #define UNIX_AMD64_ABI_ONLY_ARG(x) #define UNIX_AMD64_ABI_ONLY(x) #endif // defined(UNIX_AMD64_ABI) #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) -#define MULTIREG_HAS_SECOND_GC_RET 1 +#define MULTIREG_HAS_SECOND_GC_RET 1 #define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) , x -#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x +#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x #else // !defined(UNIX_AMD64_ABI) #define MULTIREG_HAS_SECOND_GC_RET 0 #define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) @@ -385,7 +386,7 @@ typedef ptrdiff_t ssize_t; #define DUMMY_INIT(x) (x) #define REGEN_SHORTCUTS 0 -#define REGEN_CALLPAT 0 +#define REGEN_CALLPAT 0 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX @@ -473,9 +474,9 @@ class GlobalJitOptions /*****************************************************************************/ -#define CSE_INTO_HANDLERS 0 -#define DUMP_FLOWGRAPHS DEBUG // Support for creating Xml Flowgraph reports in *.fgx files -#define HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION 0 // if 1 we must have all handler entry points in the Hot code section +#define CSE_INTO_HANDLERS 0 +#define DUMP_FLOWGRAPHS DEBUG // Support for creating Xml Flowgraph reports in *.fgx files +#define HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION 0 // if 1 we must have all handler entry points in the Hot code section /*****************************************************************************/ @@ -483,40 +484,43 @@ class GlobalJitOptions /*****************************************************************************/ -#define DUMP_GC_TABLES DEBUG +#define DUMP_GC_TABLES DEBUG #define VERIFY_GC_TABLES 0 -#define REARRANGE_ADDS 1 +#define REARRANGE_ADDS 1 -#define FUNC_INFO_LOGGING 1 // Support dumping function info to a file. In retail, only NYIs, with no function name, - // are dumped. +#define FUNC_INFO_LOGGING \ + 1 // Support dumping function info to a file. In retail, only NYIs, with no function name, + // are dumped. /*****************************************************************************/ /*****************************************************************************/ /* Set these to 1 to collect and output various statistics about the JIT */ -#define CALL_ARG_STATS 0 // Collect stats about calls and call arguments. -#define COUNT_BASIC_BLOCKS 0 // Create a histogram of basic block sizes, and a histogram of IL sizes in the simple - // case of single block methods. -#define COUNT_LOOPS 0 // Collect stats about loops, such as the total number of natural loops, a histogram of +#define CALL_ARG_STATS 0 // Collect stats about calls and call arguments. +#define COUNT_BASIC_BLOCKS \ + 0 // Create a histogram of basic block sizes, and a histogram of IL sizes in the simple + // case of single block methods. +#define COUNT_LOOPS \ + 0 // Collect stats about loops, such as the total number of natural loops, a histogram of // the number of loop exits, etc. -#define DISPLAY_SIZES 0 // Display generated code, data, and GC information sizes. -#define MEASURE_BLOCK_SIZE 0 // Collect stats about basic block and FlowEdge node sizes and memory allocations. -#define MEASURE_FATAL 0 // Count the number of calls to fatal(), including NYIs and noway_asserts. -#define MEASURE_NODE_SIZE 0 // Collect stats about GenTree node allocations. +#define DISPLAY_SIZES 0 // Display generated code, data, and GC information sizes. +#define MEASURE_BLOCK_SIZE 0 // Collect stats about basic block and FlowEdge node sizes and memory allocations. +#define MEASURE_FATAL 0 // Count the number of calls to fatal(), including NYIs and noway_asserts. +#define MEASURE_NODE_SIZE 0 // Collect stats about GenTree node allocations. #define MEASURE_PTRTAB_SIZE 0 // Collect stats about GC pointer table allocations. -#define EMITTER_STATS 0 // Collect stats on the emitter. -#define NODEBASH_STATS 0 // Collect stats on changed gtOper values in GenTree's. -#define COUNT_AST_OPERS 0 // Display use counts for GenTree operators. +#define EMITTER_STATS 0 // Collect stats on the emitter. +#define NODEBASH_STATS 0 // Collect stats on changed gtOper values in GenTree's. +#define COUNT_AST_OPERS 0 // Display use counts for GenTree operators. #ifdef DEBUG #define MEASURE_MEM_ALLOC 1 // Collect memory allocation stats. -#define LOOP_HOIST_STATS 1 // Collect loop hoisting stats. -#define TRACK_LSRA_STATS 1 // Collect LSRA stats +#define LOOP_HOIST_STATS 1 // Collect loop hoisting stats. +#define TRACK_LSRA_STATS 1 // Collect LSRA stats #define TRACK_ENREG_STATS 1 // Collect enregistration stats #else #define MEASURE_MEM_ALLOC 0 // You can set this to 1 to get memory stats in retail, as well -#define LOOP_HOIST_STATS 0 // You can set this to 1 to get loop hoist stats in retail, as well -#define TRACK_LSRA_STATS 0 // You can set this to 1 to get LSRA stats in retail, as well +#define LOOP_HOIST_STATS 0 // You can set this to 1 to get loop hoist stats in retail, as well +#define TRACK_LSRA_STATS 0 // You can set this to 1 to get LSRA stats in retail, as well #define TRACK_ENREG_STATS 0 #endif @@ -602,7 +606,7 @@ const bool dspGCtbls = true; JitTls::GetCompiler()->fgTableDispBasicBlock(b); #define VERBOSE JitTls::GetCompiler()->verbose // Development-time only macros, simplify guards for specified IL methods one wants to debug/add log messages for -#define ISMETHOD(name) (strcmp(JitTls::GetCompiler()->impInlineRoot()->info.compMethodName, name) == 0) +#define ISMETHOD(name) (strcmp(JitTls::GetCompiler()->impInlineRoot()->info.compMethodName, name) == 0) #define ISMETHODHASH(hash) (JitTls::GetCompiler()->impInlineRoot()->info.compMethodHash() == hash) #else // !DEBUG #define JITDUMP(...) @@ -628,8 +632,9 @@ const bool dspGCtbls = true; */ #ifdef TARGET_X86 -#define DOUBLE_ALIGN 1 // permit the double alignment of ESP in prolog, - // and permit the double alignment of local offsets +#define DOUBLE_ALIGN \ + 1 // permit the double alignment of ESP in prolog, + // and permit the double alignment of local offsets #else #define DOUBLE_ALIGN 0 // no special handling for double alignment #endif @@ -673,7 +678,7 @@ inline bool IsUninitialized(T data); #define MISALIGNED_RD_U2(src) (*castto(src, unsigned short*)) #define MISALIGNED_WR_I2(dst, val) *castto(dst, short*) = val; -#define MISALIGNED_WR_I4(dst, val) *castto(dst, int*) = val; +#define MISALIGNED_WR_I4(dst, val) *castto(dst, int*) = val; #define MISALIGNED_WR_ST(dst, val) *castto(dst, ssize_t*) = val; @@ -740,16 +745,16 @@ inline size_t unsigned_abs(__int64 x) #define FEATURE_TAILCALL_OPT_SHARED_RETURN 0 #endif // !FEATURE_TAILCALL_OPT -#define CLFLG_CODESIZE 0x00001 -#define CLFLG_CODESPEED 0x00002 -#define CLFLG_CSE 0x00004 -#define CLFLG_REGVAR 0x00008 -#define CLFLG_RNGCHKOPT 0x00010 -#define CLFLG_DEADSTORE 0x00020 +#define CLFLG_CODESIZE 0x00001 +#define CLFLG_CODESPEED 0x00002 +#define CLFLG_CSE 0x00004 +#define CLFLG_REGVAR 0x00008 +#define CLFLG_RNGCHKOPT 0x00010 +#define CLFLG_DEADSTORE 0x00020 #define CLFLG_CODEMOTION 0x00040 -#define CLFLG_QMARK 0x00080 -#define CLFLG_TREETRANS 0x00100 -#define CLFLG_INLINING 0x00200 +#define CLFLG_QMARK 0x00080 +#define CLFLG_TREETRANS 0x00100 +#define CLFLG_INLINING 0x00200 #if FEATURE_STRUCTPROMOTE #define CLFLG_STRUCTPROMOTE 0x00400 @@ -813,7 +818,7 @@ class JitTls #endif static Compiler* GetCompiler(); - static void SetCompiler(Compiler* compiler); + static void SetCompiler(Compiler* compiler); }; #if defined(DEBUG) diff --git a/src/coreclr/jit/jitconfig.cpp b/src/coreclr/jit/jitconfig.cpp index 3c85031cee6cd..19730be75c2cd 100644 --- a/src/coreclr/jit/jitconfig.cpp +++ b/src/coreclr/jit/jitconfig.cpp @@ -193,7 +193,7 @@ void JitConfigValues::initialize(ICorJitHost* host) assert(!m_isInitialized); #define CONFIG_INTEGER(name, key, defaultValue) m_##name = host->getIntConfigValue(key, defaultValue); -#define CONFIG_STRING(name, key) m_##name = host->getStringConfigValue(key); +#define CONFIG_STRING(name, key) m_##name = host->getStringConfigValue(key); #define CONFIG_METHODSET(name, key) \ const WCHAR* name##value = host->getStringConfigValue(key); \ m_##name.initialize(name##value, host); \ @@ -212,7 +212,7 @@ void JitConfigValues::destroy(ICorJitHost* host) } #define CONFIG_INTEGER(name, key, defaultValue) -#define CONFIG_STRING(name, key) host->freeStringConfigValue(m_##name); +#define CONFIG_STRING(name, key) host->freeStringConfigValue(m_##name); #define CONFIG_METHODSET(name, key) m_##name.destroy(host); #include "jitconfigvalues.h" diff --git a/src/coreclr/jit/jitconfig.h b/src/coreclr/jit/jitconfig.h index e19021cd52f22..bd1c552f59438 100644 --- a/src/coreclr/jit/jitconfig.h +++ b/src/coreclr/jit/jitconfig.h @@ -31,7 +31,7 @@ class JitConfigValues char* m_list; MethodName* m_names; - MethodSet(const MethodSet& other) = delete; + MethodSet(const MethodSet& other) = delete; MethodSet& operator=(const MethodSet& other) = delete; public: @@ -56,8 +56,8 @@ class JitConfigValues private: #define CONFIG_INTEGER(name, key, defaultValue) int m_##name; -#define CONFIG_STRING(name, key) const WCHAR* m_##name; -#define CONFIG_METHODSET(name, key) MethodSet m_##name; +#define CONFIG_STRING(name, key) const WCHAR* m_##name; +#define CONFIG_METHODSET(name, key) MethodSet m_##name; #include "jitconfigvalues.h" public: @@ -81,7 +81,7 @@ class JitConfigValues private: bool m_isInitialized; - JitConfigValues(const JitConfigValues& other) = delete; + JitConfigValues(const JitConfigValues& other) = delete; JitConfigValues& operator=(const JitConfigValues& other) = delete; public: diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index e0a2d7cb16fcb..28d75fc93c310 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -377,10 +377,10 @@ CONFIG_INTEGER(JitDisableSimdVN, W("JitDisableSimdVN"), 0) // Default 0, ValueNu // CONFIG_INTEGER(JitConstCSE, W("JitConstCSE"), 0) -#define CONST_CSE_ENABLE_ARM 0 -#define CONST_CSE_DISABLE_ALL 1 +#define CONST_CSE_ENABLE_ARM 0 +#define CONST_CSE_DISABLE_ALL 1 #define CONST_CSE_ENABLE_ARM_NO_SHARING 2 -#define CONST_CSE_ENABLE_ALL 3 +#define CONST_CSE_ENABLE_ALL 3 #define CONST_CSE_ENABLE_ALL_NO_SHARING 4 // If nonzero, use the greedy RL policy. diff --git a/src/coreclr/jit/jitee.h b/src/coreclr/jit/jitee.h index 27963ac356efb..71f53b4e10d7d 100644 --- a/src/coreclr/jit/jitee.h +++ b/src/coreclr/jit/jitee.h @@ -54,13 +54,15 @@ class JitFlags }; // clang-format on - JitFlags() : m_jitFlags(0) + JitFlags() + : m_jitFlags(0) { // empty } // Convenience constructor to set exactly one flags. - JitFlags(JitFlag flag) : m_jitFlags(0) + JitFlags(JitFlag flag) + : m_jitFlags(0) { Set(flag); } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index e3ad6aa1023c7..573191fecb38c 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -1718,7 +1718,7 @@ void Compiler::fgSortEHTable() (hndBegOff >= xtab1->ebdHndBegOffset && hndEndOff <= xtab1->ebdHndEndOffset) || (xtab1->HasFilter() && (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset)) // Note that end of filter is beginning of handler - ) + ) { #ifdef DEBUG if (verbose) @@ -2082,7 +2082,7 @@ bool Compiler::fgNormalizeEHCase2() if (ehOuter->ebdIsSameTry(mutualTryBeg, mutualTryLast)) { -// clang-format off + // clang-format off // Don't touch mutually-protect regions: their 'try' regions must remain identical! // We want to continue the looping outwards, in case we have something like this: // @@ -2131,7 +2131,7 @@ bool Compiler::fgNormalizeEHCase2() // // In this case, all the 'try' start at the same block! Note that there are two sets of mutually-protect regions, // separated by some nesting. -// clang-format on + // clang-format on #ifdef DEBUG if (verbose) @@ -2361,7 +2361,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions() { GenTree* ctxTree = getRuntimeContextTree(embedInfo.lookup.lookupKind.runtimeLookupKind); runtimeLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, - TYP_I_IMPL, &embedInfo.lookup.lookupKind, ctxTree); + TYP_I_IMPL, &embedInfo.lookup.lookupKind, ctxTree); } else { @@ -3026,8 +3026,8 @@ void Compiler::fgVerifyHandlerTab() assert(blockNumMap[block->bbNum] == 0); // If this fails, we have two blocks with the same block number. blockNumMap[block->bbNum] = newBBnum++; } -// Note that there may be some blockNumMap[x] == 0, for a block number 'x' that has been deleted, if the blocks -// haven't been renumbered since the deletion. + // Note that there may be some blockNumMap[x] == 0, for a block number 'x' that has been deleted, if the blocks + // haven't been renumbered since the deletion. #if 0 // Useful for debugging, but don't want to put this in the dump all the time if (verbose) @@ -3274,9 +3274,9 @@ void Compiler::fgVerifyHandlerTab() assert(bbNumOuterHndLast != 0); assert(bbNumOuterHndBeg <= bbNumOuterHndLast); -// The outer handler must completely contain all the blocks in the EH region nested within it. However, if -// funclets have been created, it's harder to make any relationship asserts about the order of nested -// handlers, which also have been made into funclets. + // The outer handler must completely contain all the blocks in the EH region nested within it. However, if + // funclets have been created, it's harder to make any relationship asserts about the order of nested + // handlers, which also have been made into funclets. #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) @@ -4339,7 +4339,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) bFilterLast->bbNum, bPrev->bbNum); } #endif // DEBUG - // Change the target for bFilterLast from the old first 'block' to the new first 'bPrev' + // Change the target for bFilterLast from the old first 'block' to the new first 'bPrev' fgRedirectTargetEdge(bFilterLast, bPrev); } } diff --git a/src/coreclr/jit/jiteh.h b/src/coreclr/jit/jiteh.h index 95ae62527897b..55b56ac9833c4 100644 --- a/src/coreclr/jit/jiteh.h +++ b/src/coreclr/jit/jiteh.h @@ -83,7 +83,8 @@ struct EHblkDsc BasicBlock* ebdTryLast; // Last block of the try BasicBlock* ebdHndBeg; // First block of the handler BasicBlock* ebdHndLast; // Last block of the handler - union { + union + { BasicBlock* ebdFilter; // First block of filter, if HasFilter() unsigned ebdTyp; // Exception type (a class token), otherwise }; @@ -165,8 +166,8 @@ struct EHblkDsc unsigned ebdGetEnclosingRegionIndex(bool* inTryRegion); static bool ebdIsSameTry(EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare begin/last blocks. - bool ebdIsSameTry(Compiler* comp, unsigned t2); - bool ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast); + bool ebdIsSameTry(Compiler* comp, unsigned t2); + bool ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast); #ifdef DEBUG void DispEntry(unsigned num); // Display this table entry diff --git a/src/coreclr/jit/jitexpandarray.h b/src/coreclr/jit/jitexpandarray.h index 646f9e6747a3b..8eaf52705986a 100644 --- a/src/coreclr/jit/jitexpandarray.h +++ b/src/coreclr/jit/jitexpandarray.h @@ -54,7 +54,10 @@ class JitExpandArray // of size max(`minSize`, `idx`) is allocated. // JitExpandArray(CompAllocator alloc, unsigned minSize = 1) - : m_alloc(alloc), m_members(nullptr), m_size(0), m_minSize(minSize) + : m_alloc(alloc) + , m_members(nullptr) + , m_size(0) + , m_minSize(minSize) { assert(minSize > 0); } @@ -219,7 +222,9 @@ class JitExpandArrayStack : public JitExpandArray // Notes: // See JitExpandArray constructor notes. // - JitExpandArrayStack(CompAllocator alloc, unsigned minSize = 1) : JitExpandArray(alloc, minSize), m_used(0) + JitExpandArrayStack(CompAllocator alloc, unsigned minSize = 1) + : JitExpandArray(alloc, minSize) + , m_used(0) { } diff --git a/src/coreclr/jit/jitgcinfo.h b/src/coreclr/jit/jitgcinfo.h index b73e8fbc68773..288042d4c6b1e 100644 --- a/src/coreclr/jit/jitgcinfo.h +++ b/src/coreclr/jit/jitgcinfo.h @@ -27,7 +27,9 @@ struct RegSlotIdKey { } - RegSlotIdKey(unsigned short regNum, unsigned flags) : m_regNum(regNum), m_flags((unsigned short)flags) + RegSlotIdKey(unsigned short regNum, unsigned flags) + : m_regNum(regNum) + , m_flags((unsigned short)flags) { assert(m_flags == flags); } @@ -54,7 +56,9 @@ struct StackSlotIdKey } StackSlotIdKey(int offset, bool fpRel, unsigned flags) - : m_offset(offset), m_fpRel(fpRel), m_flags((unsigned short)flags) + : m_offset(offset) + , m_fpRel(fpRel) + , m_flags((unsigned short)flags) { assert(flags == m_flags); } @@ -165,7 +169,7 @@ class GCInfo unsigned char rpdCallInstrSize; // Length of the call instruction. #endif - unsigned short rpdArg : 1; // is this an argument descriptor? + unsigned short rpdArg : 1; // is this an argument descriptor? unsigned short rpdArgType : 2; // is this an argument push,pop, or kill? rpdArgType_t rpdArgTypeGet() { @@ -179,8 +183,8 @@ class GCInfo } unsigned short rpdIsThis : 1; // is it the 'this' pointer - unsigned short rpdCall : 1; // is this a true call site? - unsigned short : 1; // Padding bit, so next two start on a byte boundary + unsigned short rpdCall : 1; // is this a true call site? + unsigned short : 1; // Padding bit, so next two start on a byte boundary unsigned short rpdCallGCrefRegs : CNT_CALLEE_SAVED; // Callee-saved registers containing GC pointers. unsigned short rpdCallByrefRegs : CNT_CALLEE_SAVED; // Callee-saved registers containing byrefs. @@ -261,7 +265,8 @@ class GCInfo unsigned short cdArgCnt; - union { + union + { struct // used if cdArgCnt == 0 { unsigned cdArgMask; // ptr arg bitfield @@ -278,7 +283,7 @@ class GCInfo CallDsc* gcCallDescList; CallDsc* gcCallDescLast; -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- #ifdef JIT32_GCENCODER void gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount, UNALIGNED unsigned int* pVarPtrTableSize); @@ -303,7 +308,7 @@ class GCInfo #ifdef JIT32_GCENCODER size_t gcPtrTableSize(const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset); - BYTE* gcPtrTableSave(BYTE* destPtr, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset); + BYTE* gcPtrTableSave(BYTE* destPtr, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset); #endif void gcRegPtrSetInit(); /*****************************************************************************/ @@ -382,7 +387,7 @@ class GCInfo #ifdef JIT32_GCENCODER size_t gcInfoBlockHdrDump(const BYTE* table, - InfoHdr* header, /* OUT */ + InfoHdr* header, /* OUT */ unsigned* methodSize); /* OUT */ size_t gcDumpPtrTable(const BYTE* table, const InfoHdr& header, unsigned methodSize); diff --git a/src/coreclr/jit/jithashtable.h b/src/coreclr/jit/jithashtable.h index 9ad73dbf2f7d5..f699c3eee19d2 100644 --- a/src/coreclr/jit/jithashtable.h +++ b/src/coreclr/jit/jithashtable.h @@ -57,10 +57,16 @@ class JitHashTableBehavior class JitPrimeInfo { public: - constexpr JitPrimeInfo() : prime(0), magic(0), shift(0) + constexpr JitPrimeInfo() + : prime(0) + , magic(0) + , shift(0) { } - constexpr JitPrimeInfo(unsigned p, unsigned m, unsigned s) : prime(p), magic(m), shift(s) + constexpr JitPrimeInfo(unsigned p, unsigned m, unsigned s) + : prime(p) + , magic(m) + , shift(s) { } unsigned prime; @@ -130,7 +136,10 @@ class JitHashTable Value m_val; template - Node(Node* next, Key k, Args&&... args) : m_next(next), m_key(k), m_val(std::forward(args)...) + Node(Node* next, Key k, Args&&... args) + : m_next(next) + , m_key(k) + , m_val(std::forward(args)...) { } @@ -166,7 +175,12 @@ class JitHashTable // JitHashTable always starts out empty, with no allocation overhead. // Call Reallocate to prime with an initial size if desired. // - JitHashTable(Allocator alloc) : m_alloc(alloc), m_table(nullptr), m_tableSizeInfo(), m_tableCount(0), m_tableMax(0) + JitHashTable(Allocator alloc) + : m_alloc(alloc) + , m_table(nullptr) + , m_tableSizeInfo() + , m_tableCount(0) + , m_tableMax(0) { #ifndef __GNUC__ // these crash GCC static_assert_no_msg(Behavior::s_growth_factor_numerator > Behavior::s_growth_factor_denominator); @@ -492,7 +506,8 @@ class JitHashTable class KeyIterator : public NodeIterator { public: - KeyIterator(const JitHashTable* hash, bool begin) : NodeIterator(hash, begin) + KeyIterator(const JitHashTable* hash, bool begin) + : NodeIterator(hash, begin) { } @@ -506,7 +521,8 @@ class JitHashTable class ValueIterator : public NodeIterator { public: - ValueIterator(const JitHashTable* hash, bool begin) : NodeIterator(hash, begin) + ValueIterator(const JitHashTable* hash, bool begin) + : NodeIterator(hash, begin) { } @@ -521,7 +537,8 @@ class JitHashTable class KeyValueIterator : public NodeIterator { public: - KeyValueIterator(const JitHashTable* hash, bool begin) : NodeIterator(hash, begin) + KeyValueIterator(const JitHashTable* hash, bool begin) + : NodeIterator(hash, begin) { } @@ -538,7 +555,8 @@ class JitHashTable const JitHashTable* const m_hash; public: - KeyIteration(const JitHashTable* hash) : m_hash(hash) + KeyIteration(const JitHashTable* hash) + : m_hash(hash) { } @@ -559,7 +577,8 @@ class JitHashTable const JitHashTable* const m_hash; public: - ValueIteration(const JitHashTable* hash) : m_hash(hash) + ValueIteration(const JitHashTable* hash) + : m_hash(hash) { } @@ -580,7 +599,8 @@ class JitHashTable const JitHashTable* const m_hash; public: - KeyValueIteration(const JitHashTable* hash) : m_hash(hash) + KeyValueIteration(const JitHashTable* hash) + : m_hash(hash) { } diff --git a/src/coreclr/jit/jitpch.h b/src/coreclr/jit/jitpch.h index ebb18e5177dfe..6e9a0a6f80023 100644 --- a/src/coreclr/jit/jitpch.h +++ b/src/coreclr/jit/jitpch.h @@ -18,8 +18,8 @@ #endif #include -using std::min; using std::max; +using std::min; // Don't allow using the windows.h #defines for the BitScan* APIs. Using the #defines means our // `BitOperations::BitScan*` functions have their name mapped, which is confusing and messes up diff --git a/src/coreclr/jit/layout.cpp b/src/coreclr/jit/layout.cpp index 918fd4ab6521d..ad4c0077c22bd 100644 --- a/src/coreclr/jit/layout.cpp +++ b/src/coreclr/jit/layout.cpp @@ -21,7 +21,8 @@ class ClassLayoutTable typedef JitHashTable, unsigned> BlkLayoutIndexMap; typedef JitHashTable, unsigned> ObjLayoutIndexMap; - union { + union + { // Up to 3 layouts can be stored "inline" and finding a layout by handle/size can be done using linear search. // Most methods need no more than 2 layouts. ClassLayout* m_layoutArray[3]; @@ -43,7 +44,10 @@ class ClassLayoutTable ClassLayout m_zeroSizedBlockLayout; public: - ClassLayoutTable() : m_layoutCount(0), m_layoutLargeCapacity(0), m_zeroSizedBlockLayout(0) + ClassLayoutTable() + : m_layoutCount(0) + , m_layoutLargeCapacity(0) + , m_zeroSizedBlockLayout(0) { } diff --git a/src/coreclr/jit/layout.h b/src/coreclr/jit/layout.h index 59ecaa9405485..3c6487e516b91 100644 --- a/src/coreclr/jit/layout.h +++ b/src/coreclr/jit/layout.h @@ -30,7 +30,8 @@ class ClassLayout // Array of CorInfoGCType (as BYTE) that describes the GC layout of the class. // For small classes the array is stored inline, avoiding an extra allocation // and the pointer size overhead. - union { + union + { BYTE* m_gcPtrs; BYTE m_gcPtrsArray[sizeof(BYTE*)]; }; @@ -69,7 +70,7 @@ class ClassLayout ClassLayout(CORINFO_CLASS_HANDLE classHandle, bool isValueClass, unsigned size, - var_types type DEBUGARG(const char* className) DEBUGARG(const char* shortClassName)) + var_types type DEBUGARG(const char* className) DEBUGARG(const char* shortClassName)) : m_classHandle(classHandle) , m_size(size) , m_isValueClass(isValueClass) diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp index 44b0afe1caf92..6b4c6cc693f9b 100644 --- a/src/coreclr/jit/lclmorph.cpp +++ b/src/coreclr/jit/lclmorph.cpp @@ -14,7 +14,9 @@ class LocalSequencer final : public GenTreeVisitor UseExecutionOrder = true, }; - LocalSequencer(Compiler* comp) : GenTreeVisitor(comp), m_prevNode(nullptr) + LocalSequencer(Compiler* comp) + : GenTreeVisitor(comp) + , m_prevNode(nullptr) { } @@ -918,9 +920,9 @@ class LocalAddressVisitor final : public GenTreeVisitor break; #ifdef FEATURE_HW_INTRINSICS - // We have two cases we want to handle: - // 1. Vector2/3/4 and Quaternion where we have 4x float fields - // 2. Plane where we have 1x Vector3 and 1x float field + // We have two cases we want to handle: + // 1. Vector2/3/4 and Quaternion where we have 4x float fields + // 2. Plane where we have 1x Vector3 and 1x float field case IndirTransform::GetElement: { @@ -934,7 +936,7 @@ class LocalAddressVisitor final : public GenTreeVisitor { GenTree* indexNode = m_compiler->gtNewIconNode(offset / genTypeSize(elementType)); hwiNode = m_compiler->gtNewSimdGetElementNode(elementType, lclNode, indexNode, - CORINFO_TYPE_FLOAT, genTypeSize(varDsc)); + CORINFO_TYPE_FLOAT, genTypeSize(varDsc)); break; } case TYP_SIMD12: diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index f500ea13202d9..0de4f52eee756 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -332,9 +332,9 @@ void Compiler::lvaInitTypeRef() } if ( // If there already exist unsafe buffers, don't mark more structs as unsafe - // as that will cause them to be placed along with the real unsafe buffers, - // unnecessarily exposing them to overruns. This can affect GS tests which - // intentionally do buffer-overruns. + // as that will cause them to be placed along with the real unsafe buffers, + // unnecessarily exposing them to overruns. This can affect GS tests which + // intentionally do buffer-overruns. !getNeedsGSSecurityCookie() && // GS checks require the stack to be re-ordered, which can't be done with EnC !opts.compDbgEnC && compStressCompile(STRESS_UNSAFE_BUFFER_CHECKS, 25)) @@ -440,7 +440,7 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) lvaInitRetBuffArg(varDscInfo, true); } -//====================================================================== + //====================================================================== #if USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods @@ -602,9 +602,9 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBuf // void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs) { -//------------------------------------------------------------------------- -// Walk the function signature for the explicit arguments -//------------------------------------------------------------------------- + //------------------------------------------------------------------------- + // Walk the function signature for the explicit arguments + //------------------------------------------------------------------------- #if defined(TARGET_X86) // Only (some of) the implicit args are enregistered for varargs @@ -1319,8 +1319,8 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); #else // !TARGET_X86 - // TODO-CQ: We shouldn't have to go as far as to declare these - // address-exposed -- DoNotEnregister should suffice. + // TODO-CQ: We shouldn't have to go as far as to declare these + // address-exposed -- DoNotEnregister should suffice. lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 @@ -1926,7 +1926,9 @@ void Compiler::lvSetMinOptsDoNotEnreg() // Arguments: // compiler - pointer to a compiler to get access to an allocator, compHandle etc. // -Compiler::StructPromotionHelper::StructPromotionHelper(Compiler* compiler) : compiler(compiler), structPromotionInfo() +Compiler::StructPromotionHelper::StructPromotionHelper(Compiler* compiler) + : compiler(compiler) + , structPromotionInfo() { } @@ -2505,12 +2507,12 @@ bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) // with something else occupying the same 4-byte slot, it will // overwrite other fields. if (structPromotionInfo.fieldCnt != 1) - { - JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = " - "%d.\n", - lclNum, structPromotionInfo.fieldCnt); - shouldPromote = false; - } + { + JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = " + "%d.\n", + lclNum, structPromotionInfo.fieldCnt); + shouldPromote = false; + } } else if ((lclNum == compiler->genReturnLocal) && (structPromotionInfo.fieldCnt > 1)) { @@ -2549,8 +2551,8 @@ void Compiler::StructPromotionHelper::SortStructFields() { jitstd::sort(structPromotionInfo.fields, structPromotionInfo.fields + structPromotionInfo.fieldCnt, [](const lvaStructFieldInfo& lhs, const lvaStructFieldInfo& rhs) { - return lhs.fldOffset < rhs.fldOffset; - }); + return lhs.fldOffset < rhs.fldOffset; + }); structPromotionInfo.fieldsSorted = true; } } @@ -2605,7 +2607,7 @@ void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum) compiler->compFloatingPointUsed = true; } -// Now grab the temp for the field local. + // Now grab the temp for the field local. #ifdef DEBUG char fieldNameBuffer[128]; @@ -3809,8 +3811,8 @@ void Compiler::lvaSortByRefCount() if (varDsc->IsAddressExposed()) { varDsc->lvTracked = 0; - assert(varDsc->lvType != TYP_STRUCT || - varDsc->lvDoNotEnregister); // For structs, should have set this when we set m_addrExposed. + assert(varDsc->lvType != TYP_STRUCT || varDsc->lvDoNotEnregister); // For structs, should have set this when + // we set m_addrExposed. } if (varTypeIsStruct(varDsc)) { @@ -4042,8 +4044,8 @@ unsigned LclVarDsc::lvSize() const // Size needed for storage representation. On } /********************************************************************************** -* Get stack size of the varDsc. -*/ + * Get stack size of the varDsc. + */ size_t LclVarDsc::lvArgStackSize() const { // Make sure this will have a stack size @@ -4463,7 +4465,10 @@ void Compiler::lvaMarkLocalVars(BasicBlock* block, bool isRecompute) }; MarkLocalVarsVisitor(Compiler* compiler, BasicBlock* block, Statement* stmt, bool isRecompute) - : GenTreeVisitor(compiler), m_block(block), m_stmt(stmt), m_isRecompute(isRecompute) + : GenTreeVisitor(compiler) + , m_block(block) + , m_stmt(stmt) + , m_isRecompute(isRecompute) { } @@ -4888,11 +4893,11 @@ inline void Compiler::lvaIncrementFrameSize(unsigned size) } /**************************************************************************** -* -* Return true if absolute offsets of temps are larger than vars, or in other -* words, did we allocate temps before of after vars. The /GS buffer overrun -* checks want temps to be at low stack addresses than buffers -*/ + * + * Return true if absolute offsets of temps are larger than vars, or in other + * words, did we allocate temps before of after vars. The /GS buffer overrun + * checks want temps to be at low stack addresses than buffers + */ bool Compiler::lvaTempsHaveLargerOffsetThanVars() { #ifdef TARGET_ARM @@ -4911,10 +4916,10 @@ bool Compiler::lvaTempsHaveLargerOffsetThanVars() } /**************************************************************************** -* -* Return an upper bound estimate for the size of the compiler spill temps -* -*/ + * + * Return an upper bound estimate for the size of the compiler spill temps + * + */ unsigned Compiler::lvaGetMaxSpillTempSize() { unsigned result = 0; @@ -5531,7 +5536,7 @@ void Compiler::lvaFixVirtualFrameOffsets() #endif ) #endif // !defined(TARGET_AMD64) - ) + ) { doAssignStkOffs = false; // Not on frame or an incoming stack arg } @@ -5552,8 +5557,8 @@ void Compiler::lvaFixVirtualFrameOffsets() // We need to re-adjust the offsets of the parameters so they are EBP // relative rather than stack/frame pointer relative - varDsc->SetStackOffset(varDsc->GetStackOffset() + - (2 * TARGET_POINTER_SIZE)); // return address and pushed EBP + varDsc->SetStackOffset(varDsc->GetStackOffset() + (2 * TARGET_POINTER_SIZE)); // return address and + // pushed EBP noway_assert(varDsc->GetStackOffset() >= FIRST_ARG_STACK_OFFS); } @@ -5731,7 +5736,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); } #elif !defined(UNIX_AMD64_ABI) - argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); + argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); #endif // TARGET_X86 lclNum++; userArgsToSkip++; @@ -5892,8 +5897,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() // ret address slot, stack frame padding, alloca instructions, etc. // Note: This is the implementation for UNIX_AMD64 System V platforms. // -int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, - unsigned argSize, +int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, + unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); @@ -5984,8 +5989,8 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existence, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This implementation for all the platforms but UNIX_AMD64 OSs (System V 64 bit.) -int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, - unsigned argSize, +int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, + unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); @@ -6213,8 +6218,8 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, (codeGen->regSet.rsMaskPreSpillAlign & genRegMask(REG_ARG_LAST)); noway_assert(cond); - noway_assert(sizeofPreSpillRegArgs <= - argOffs + TARGET_POINTER_SIZE); // at most one register of alignment + noway_assert(sizeofPreSpillRegArgs <= argOffs + TARGET_POINTER_SIZE); // at most one register of + // alignment } argOffs = sizeofPreSpillRegArgs; } @@ -6385,8 +6390,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() stkOffs -= initialStkOffs; } - if (codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() || - !isFramePointerUsed()) // Note that currently we always have a frame pointer + if (codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() || !isFramePointerUsed()) // Note that currently we always have + // a frame pointer { stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; } @@ -7126,8 +7131,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() #endif // FEATURE_EH_FUNCLETS && defined(TARGET_AMD64) #ifdef TARGET_ARM64 - if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && - isFramePointerUsed()) // Note that currently we always have a frame pointer + if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && isFramePointerUsed()) // Note that currently we always have + // a frame pointer { // Create space for saving FP and LR. stkOffs -= 2 * REGSIZE_BYTES; @@ -7412,9 +7417,9 @@ void Compiler::lvaAlignFrame() } // Align the stack with STACK_ALIGN value. - int adjustFrameSize = compLclFrameSize; + int adjustFrameSize = compLclFrameSize; #if defined(UNIX_X86_ABI) - bool isEbpPushed = codeGen->isFramePointerUsed(); + bool isEbpPushed = codeGen->isFramePointerUsed(); #if DOUBLE_ALIGN isEbpPushed |= genDoubleAlign(); #endif @@ -7892,9 +7897,9 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t r } /***************************************************************************** -* -* dump the lvaTable -*/ + * + * dump the lvaTable + */ void Compiler::lvaTableDump(FrameLayoutState curState) { diff --git a/src/coreclr/jit/likelyclass.cpp b/src/coreclr/jit/likelyclass.cpp index fa0839725c9fb..e181a2e9a135a 100644 --- a/src/coreclr/jit/likelyclass.cpp +++ b/src/coreclr/jit/likelyclass.cpp @@ -255,8 +255,8 @@ static unsigned getLikelyClassesOrMethods(LikelyClassMethodRecord* jitstd::sort(sortedEntries, sortedEntries + knownHandles, [](const LikelyClassMethodHistogramEntry& h1, const LikelyClassMethodHistogramEntry& h2) -> bool { - return h1.m_count > h2.m_count; - }); + return h1.m_count > h2.m_count; + }); const UINT32 numberOfClasses = min(knownHandles, maxLikelyClasses); @@ -410,7 +410,9 @@ extern "C" DLLEXPORT UINT32 WINAPI getLikelyValues(LikelyValueRecord* // sort by m_count (descending) jitstd::sort(sortedEntries, sortedEntries + h.countHistogramElements, [](const LikelyClassMethodHistogramEntry& h1, - const LikelyClassMethodHistogramEntry& h2) -> bool { return h1.m_count > h2.m_count; }); + const LikelyClassMethodHistogramEntry& h2) -> bool { + return h1.m_count > h2.m_count; + }); const UINT32 numberOfLikelyConst = min(h.countHistogramElements, maxLikelyValues); diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index b10bd98ff6221..d172cea22d369 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -9,7 +9,10 @@ #pragma hdrstop #endif -LIR::Use::Use() : m_range(nullptr), m_edge(nullptr), m_user(nullptr) +LIR::Use::Use() + : m_range(nullptr) + , m_edge(nullptr) + , m_user(nullptr) { } @@ -30,7 +33,10 @@ LIR::Use::Use(const Use& other) // // Return Value: // -LIR::Use::Use(Range& range, GenTree** edge, GenTree* user) : m_range(&range), m_edge(edge), m_user(user) +LIR::Use::Use(Range& range, GenTree** edge, GenTree* user) + : m_range(&range) + , m_edge(edge) + , m_user(user) { AssertIsValid(); } @@ -280,11 +286,15 @@ unsigned LIR::Use::ReplaceWithLclVar(Compiler* compiler, unsigned lclNum, GenTre return lclNum; } -LIR::ReadOnlyRange::ReadOnlyRange() : m_firstNode(nullptr), m_lastNode(nullptr) +LIR::ReadOnlyRange::ReadOnlyRange() + : m_firstNode(nullptr) + , m_lastNode(nullptr) { } -LIR::ReadOnlyRange::ReadOnlyRange(ReadOnlyRange&& other) : m_firstNode(other.m_firstNode), m_lastNode(other.m_lastNode) +LIR::ReadOnlyRange::ReadOnlyRange(ReadOnlyRange&& other) + : m_firstNode(other.m_firstNode) + , m_lastNode(other.m_lastNode) { #ifdef DEBUG other.m_firstNode = nullptr; @@ -301,7 +311,9 @@ LIR::ReadOnlyRange::ReadOnlyRange(ReadOnlyRange&& other) : m_firstNode(other.m_f // firstNode - The first node in the range. // lastNode - The last node in the range. // -LIR::ReadOnlyRange::ReadOnlyRange(GenTree* firstNode, GenTree* lastNode) : m_firstNode(firstNode), m_lastNode(lastNode) +LIR::ReadOnlyRange::ReadOnlyRange(GenTree* firstNode, GenTree* lastNode) + : m_firstNode(firstNode) + , m_lastNode(lastNode) { assert((m_firstNode == nullptr) == (m_lastNode == nullptr)); assert((m_firstNode == m_lastNode) || (Contains(m_lastNode))); @@ -426,11 +438,13 @@ bool LIR::ReadOnlyRange::Contains(GenTree* node) const #endif -LIR::Range::Range() : ReadOnlyRange() +LIR::Range::Range() + : ReadOnlyRange() { } -LIR::Range::Range(Range&& other) : ReadOnlyRange(std::move(other)) +LIR::Range::Range(Range&& other) + : ReadOnlyRange(std::move(other)) { } @@ -442,7 +456,8 @@ LIR::Range::Range(Range&& other) : ReadOnlyRange(std::move(other)) // firstNode - The first node in the range. // lastNode - The last node in the range. // -LIR::Range::Range(GenTree* firstNode, GenTree* lastNode) : ReadOnlyRange(firstNode, lastNode) +LIR::Range::Range(GenTree* firstNode, GenTree* lastNode) + : ReadOnlyRange(firstNode, lastNode) { } @@ -1186,7 +1201,7 @@ bool LIR::Range::TryGetUse(GenTree* node, Use* use) // Returns: // The computed subrange. // -template +template LIR::ReadOnlyRange LIR::Range::GetMarkedRange(unsigned markCount, GenTree* start, bool* isClosed, @@ -1406,8 +1421,8 @@ class CheckLclVarSemanticsHelper // range - a range to do the check. // unusedDefs - map of defs that do no have users. // - CheckLclVarSemanticsHelper(Compiler* compiler, - const LIR::Range* range, + CheckLclVarSemanticsHelper(Compiler* compiler, + const LIR::Range* range, SmallHashTable& unusedDefs) : compiler(compiler) , range(range) @@ -1554,7 +1569,7 @@ class CheckLclVarSemanticsHelper void PopLclVarRead(const AliasSet::NodeInfo& defInfo) { SmallHashTable* reads; - const bool foundReads = unusedLclVarReads.TryGetValue(defInfo.LclNum(), &reads); + const bool foundReads = unusedLclVarReads.TryGetValue(defInfo.LclNum(), &reads); assert(foundReads); bool found = reads->TryRemove(defInfo.Node()); @@ -1569,11 +1584,11 @@ class CheckLclVarSemanticsHelper } private: - Compiler* compiler; - const LIR::Range* range; - SmallHashTable& unusedDefs; + Compiler* compiler; + const LIR::Range* range; + SmallHashTable& unusedDefs; SmallHashTable*, 16U> unusedLclVarReads; - ArrayStack*> lclVarReadsMapsCache; + ArrayStack*> lclVarReadsMapsCache; }; //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/lir.h b/src/coreclr/jit/lir.h index 9b4f940bc0ae3..8a3a9a507a38b 100644 --- a/src/coreclr/jit/lir.h +++ b/src/coreclr/jit/lir.h @@ -73,7 +73,7 @@ class LIR final void AssertIsValid() const; bool IsDummyUse() const; - void ReplaceWith(GenTree* replacement); + void ReplaceWith(GenTree* replacement); unsigned ReplaceWithLclVar(Compiler* compiler, unsigned lclNum = BAD_VAR_NUM, GenTree** pStore = nullptr); }; @@ -113,7 +113,7 @@ class LIR final GenTree* m_firstNode; GenTree* m_lastNode; - ReadOnlyRange(const ReadOnlyRange& other) = delete; + ReadOnlyRange(const ReadOnlyRange& other) = delete; ReadOnlyRange& operator=(const ReadOnlyRange& other) = delete; public: @@ -125,12 +125,14 @@ class LIR final GenTree* m_node; - Iterator(GenTree* begin) : m_node(begin) + Iterator(GenTree* begin) + : m_node(begin) { } public: - Iterator() : m_node(nullptr) + Iterator() + : m_node(nullptr) { } @@ -167,12 +169,14 @@ class LIR final GenTree* m_node; - ReverseIterator(GenTree* begin) : m_node(begin) + ReverseIterator(GenTree* begin) + : m_node(begin) { } public: - ReverseIterator() : m_node(nullptr) + ReverseIterator() + : m_node(nullptr) { } @@ -245,7 +249,7 @@ class LIR final private: Range(GenTree* firstNode, GenTree* lastNode); - Range(const Range& other) = delete; + Range(const Range& other) = delete; Range& operator=(const Range& other) = delete; template @@ -280,7 +284,7 @@ class LIR final void InsertAtBeginning(Range&& range); void InsertAtEnd(Range&& range); - void Remove(GenTree* node, bool markOperandsUnused = false); + void Remove(GenTree* node, bool markOperandsUnused = false); Range Remove(GenTree* firstNode, GenTree* lastNode); Range Remove(ReadOnlyRange&& range); @@ -303,7 +307,7 @@ class LIR final }; public: - static Range& AsRange(BasicBlock* block); + static Range& AsRange(BasicBlock* block); static const Range& AsRange(const BasicBlock* block); static Range EmptyRange(); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 78fb96fe3d77d..7f413b75d6649 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -811,10 +811,10 @@ void Compiler::fgExtendDbgLifetimes() fgExtendDbgScopes(); -/*------------------------------------------------------------------------- - * Partly update liveness info so that we handle any funky BBF_INTERNAL - * blocks inserted out of sequence. - */ + /*------------------------------------------------------------------------- + * Partly update liveness info so that we handle any funky BBF_INTERNAL + * blocks inserted out of sequence. + */ #ifdef DEBUG if (verbose && 0) @@ -1005,7 +1005,7 @@ void Compiler::fgExtendDbgLifetimes() // So just ensure that they don't have a 0 ref cnt unsigned lclNum = 0; - for (LclVarDsc *varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) + for (LclVarDsc* varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { if (lclNum >= info.compArgsCount) { @@ -1676,10 +1676,10 @@ GenTree* Compiler::fgTryRemoveDeadStoreEarly(Statement* stmt, GenTreeLclVarCommo * or subtree of a statement moving backward from startNode to endNode */ -void Compiler::fgComputeLife(VARSET_TP& life, - GenTree* startNode, - GenTree* endNode, - VARSET_VALARG_TP volatileVars, +void Compiler::fgComputeLife(VARSET_TP& life, + GenTree* startNode, + GenTree* endNode, + VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)) { // Don't kill vars in scope @@ -2116,11 +2116,11 @@ bool Compiler::fgTryRemoveDeadStoreLIR(GenTree* store, GenTreeLclVarCommon* lclN // Return Value: // true if we should skip the rest of the statement, false if we should continue // -bool Compiler::fgRemoveDeadStore(GenTree** pTree, - LclVarDsc* varDsc, - VARSET_VALARG_TP life, - bool* doAgain, - bool* pStmtInfoDirty, +bool Compiler::fgRemoveDeadStore(GenTree** pTree, + LclVarDsc* varDsc, + VARSET_VALARG_TP life, + bool* doAgain, + bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)) { assert(!compRationalIRForm); @@ -2186,7 +2186,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, #ifdef DEBUG *treeModf = true; #endif // DEBUG - // Update ordering, costs, FP levels, etc. + // Update ordering, costs, FP levels, etc. gtSetStmtInfo(compCurStmt); // Re-link the nodes for this statement @@ -2278,7 +2278,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, printf("\n"); } #endif // DEBUG - // No side effects - Change the store to a GT_NOP node + // No side effects - Change the store to a GT_NOP node store->gtBashToNOP(); #ifdef DEBUG diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index 26789cabc3656..2001f396c6f22 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1371,7 +1371,7 @@ bool Compiler::optDeriveLoopCloningConditions(FlowGraphNaturalLoop* loop, LoopCl LcMdArrayOptInfo* mdArrInfo = optInfo->AsLcMdArrayOptInfo(); LC_Array arrLen(LC_Array(LC_Array::MdArray, mdArrInfo->GetArrIndexForDim(getAllocator(CMK_LoopClone)), mdArrInfo->dim, LC_Array::None)); - LC_Ident arrLenIdent = LC_Ident::CreateArrAccess(arrLen); + LC_Ident arrLenIdent = LC_Ident::CreateArrAccess(arrLen); LC_Condition cond(opLimitCondition, LC_Expr(ident), LC_Expr(arrLenIdent)); context->EnsureConditions(loop->GetIndex())->Push(cond); @@ -1666,7 +1666,7 @@ void Compiler::optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore // performs the optimizations assuming that the path in which the candidates // were collected is the fast path in which the optimizations will be performed. // -void Compiler::optPerformStaticOptimizations(FlowGraphNaturalLoop* loop, +void Compiler::optPerformStaticOptimizations(FlowGraphNaturalLoop* loop, LoopCloneContext* context DEBUGARG(bool dynamicPath)) { JitExpandArrayStack* optInfos = context->GetLoopOptInfo(loop->GetIndex()); diff --git a/src/coreclr/jit/loopcloning.h b/src/coreclr/jit/loopcloning.h index 64e810be6ff42..20f041eab40a5 100644 --- a/src/coreclr/jit/loopcloning.h +++ b/src/coreclr/jit/loopcloning.h @@ -196,7 +196,12 @@ struct ArrIndex unsigned rank; // Rank of the array BasicBlock* useBlock; // Block where the [] occurs - ArrIndex(CompAllocator alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr) + ArrIndex(CompAllocator alloc) + : arrLcl(BAD_VAR_NUM) + , indLcls(alloc) + , bndsChks(alloc) + , rank(0) + , useBlock(nullptr) { } @@ -236,7 +241,8 @@ struct LcOptInfo }; OptType optType; - LcOptInfo(OptType optType) : optType(optType) + LcOptInfo(OptType optType) + : optType(optType) { } @@ -267,7 +273,10 @@ struct LcMdArrayOptInfo : public LcOptInfo ArrIndex* index; // "index" cached computation in the form of an ArrIndex representation. LcMdArrayOptInfo(GenTreeArrElem* arrElem, unsigned dim) - : LcOptInfo(LcMdArray), arrElem(arrElem), dim(dim), index(nullptr) + : LcOptInfo(LcMdArray) + , arrElem(arrElem) + , dim(dim) + , index(nullptr) { } @@ -300,7 +309,10 @@ struct LcJaggedArrayOptInfo : public LcOptInfo Statement* stmt; // "stmt" where the optimization opportunity occurs. LcJaggedArrayOptInfo(ArrIndex& arrIndex, unsigned dim, Statement* stmt) - : LcOptInfo(LcJaggedArray), dim(dim), arrIndex(arrIndex), stmt(stmt) + : LcOptInfo(LcJaggedArray) + , dim(dim) + , arrIndex(arrIndex) + , stmt(stmt) { } }; @@ -319,7 +331,11 @@ struct LcTypeTestOptInfo : public LcOptInfo CORINFO_CLASS_HANDLE clsHnd; LcTypeTestOptInfo(Statement* stmt, GenTreeIndir* methodTableIndir, unsigned lclNum, CORINFO_CLASS_HANDLE clsHnd) - : LcOptInfo(LcTypeTest), stmt(stmt), methodTableIndir(methodTableIndir), lclNum(lclNum), clsHnd(clsHnd) + : LcOptInfo(LcTypeTest) + , stmt(stmt) + , methodTableIndir(methodTableIndir) + , lclNum(lclNum) + , clsHnd(clsHnd) { } }; @@ -343,7 +359,7 @@ struct LcMethodAddrTestOptInfo : public LcOptInfo GenTreeIndir* delegateAddressIndir, unsigned delegateLclNum, void* methAddr, - bool isSlot DEBUG_ARG(CORINFO_METHOD_HANDLE targetMethHnd)) + bool isSlot DEBUG_ARG(CORINFO_METHOD_HANDLE targetMethHnd)) : LcOptInfo(LcMethodAddrTest) , stmt(stmt) , delegateAddressIndir(delegateAddressIndir) @@ -393,15 +409,24 @@ struct LC_Array int dim; // "dim" = which index to invoke arrLen on, if -1 invoke on the whole array // Example 1: a[0][1][2] and dim = 2 implies a[0][1].length // Example 2: a[0][1][2] and dim = -1 implies a[0][1][2].length - LC_Array() : type(Invalid), dim(-1) + LC_Array() + : type(Invalid) + , dim(-1) { } LC_Array(ArrType type, ArrIndex* arrIndex, int dim, OperType oper) - : type(type), arrIndex(arrIndex), oper(oper), dim(dim) + : type(type) + , arrIndex(arrIndex) + , oper(oper) + , dim(dim) { } - LC_Array(ArrType type, ArrIndex* arrIndex, OperType oper) : type(type), arrIndex(arrIndex), oper(oper), dim(-1) + LC_Array(ArrType type, ArrIndex* arrIndex, OperType oper) + : type(type) + , arrIndex(arrIndex) + , oper(oper) + , dim(-1) { } @@ -464,7 +489,8 @@ struct LC_Ident }; private: - union { + union + { unsigned constant; struct { @@ -482,7 +508,8 @@ struct LC_Ident }; }; - LC_Ident(IdentType type) : type(type) + LC_Ident(IdentType type) + : type(type) { } @@ -490,7 +517,8 @@ struct LC_Ident // The type of this object IdentType type; - LC_Ident() : type(Invalid) + LC_Ident() + : type(Invalid) { } @@ -680,10 +708,13 @@ struct LC_Expr } #endif - LC_Expr() : type(Invalid) + LC_Expr() + : type(Invalid) { } - explicit LC_Expr(const LC_Ident& ident) : ident(ident), type(Ident) + explicit LC_Expr(const LC_Ident& ident) + : ident(ident) + , type(Ident) { } @@ -724,7 +755,10 @@ struct LC_Condition { } LC_Condition(genTreeOps oper, const LC_Expr& op1, const LC_Expr& op2, bool asUnsigned = false) - : op1(op1), op2(op2), oper(oper), compareUnsigned(asUnsigned) + : op1(op1) + , op2(op2) + , oper(oper) + , compareUnsigned(asUnsigned) { } @@ -756,7 +790,10 @@ struct LC_ArrayDeref unsigned level; - LC_ArrayDeref(const LC_Array& array, unsigned level) : array(array), children(nullptr), level(level) + LC_ArrayDeref(const LC_Array& array, unsigned level) + : array(array) + , children(nullptr) + , level(level) { } @@ -764,8 +801,8 @@ struct LC_ArrayDeref unsigned Lcl(); - bool HasChildren(); - void EnsureChildren(CompAllocator alloc); + bool HasChildren(); + void EnsureChildren(CompAllocator alloc); static LC_ArrayDeref* Find(JitExpandArrayStack* children, unsigned lcl); void DeriveLevelConditions(JitExpandArrayStack*>* len); @@ -859,7 +896,7 @@ struct LoopCloneContext } NaturalLoopIterInfo* GetLoopIterInfo(unsigned loopNum); - void SetLoopIterInfo(unsigned loopNum, NaturalLoopIterInfo* info); + void SetLoopIterInfo(unsigned loopNum, NaturalLoopIterInfo* info); // Evaluate conditions into a JTRUE stmt and put it in a new block after `insertAfter`. BasicBlock* CondToStmtInBlock(Compiler* comp, diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index c377bd7588bf8..5d3a504175ee9 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -1163,8 +1163,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // |____ (ICon) (The actual case constant) GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(i, genActualType(tempLclType))); - GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond); - LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch); + GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond); + LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch); currentBBRange->InsertAtEnd(std::move(caseRange)); } } @@ -3887,7 +3887,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) #ifdef TARGET_XARCH || IsContainableMemoryOp(castOp) #endif - ); + ); if (removeCast) { @@ -4771,10 +4771,10 @@ void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore) } convertToStoreObj = false; #else // TARGET_ARM64 - // This optimization on arm64 allows more SIMD16 vars to be enregistered but it could cause - // regressions when there are many calls and before/after each one we have to store/save the upper - // half of these registers. So enable this for arm64 only when LSRA is taught not to allocate registers when - // it would have to spilled too many times. + // This optimization on arm64 allows more SIMD16 vars to be enregistered but it could cause + // regressions when there are many calls and before/after each one we have to store/save the upper + // half of these registers. So enable this for arm64 only when LSRA is taught not to allocate registers when + // it would have to spilled too many times. convertToStoreObj = true; #endif // TARGET_ARM64 } @@ -5091,8 +5091,8 @@ void Lowering::LowerCallStruct(GenTreeCall* call) break; } #endif // FEATURE_SIMD - // importer has a separate mechanism to retype calls to helpers, - // keep it for now. + // importer has a separate mechanism to retype calls to helpers, + // keep it for now. assert(user->TypeIs(TYP_REF) || (user->TypeIs(TYP_I_IMPL) && comp->IsTargetAbi(CORINFO_NATIVEAOT_ABI))); assert(call->IsHelperCall()); assert(returnType == user->TypeGet()); @@ -8086,7 +8086,7 @@ void Lowering::ContainCheckNode(GenTree* node) #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: #endif // FEATURE_ARG_SPLIT - // The regNum must have been set by the lowering of the call. + // The regNum must have been set by the lowering of the call. assert(node->GetRegNum() != REG_NA); break; #ifdef TARGET_XARCH @@ -8799,7 +8799,7 @@ void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind) // const bool isContainable = IsInvariantInRange(ind->Addr(), ind); #else - const bool isContainable = true; + const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); @@ -8863,7 +8863,7 @@ GenTree* Lowering::LowerIndir(GenTreeIndir* ind) // const bool isContainable = IsInvariantInRange(ind->Addr(), ind); #else - const bool isContainable = true; + const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); @@ -9294,7 +9294,7 @@ void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, Bas #if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool useNullCheck = true; #elif defined(TARGET_ARM) - bool useNullCheck = false; + bool useNullCheck = false; #else // TARGET_XARCH bool useNullCheck = !ind->Addr()->isContained(); ind->ClearDontExtend(); @@ -9533,7 +9533,7 @@ void Lowering::TryRetypingFloatingPointStoreToIntegerStore(GenTree* store) #if defined(TARGET_XARCH) || defined(TARGET_ARM) bool shouldSwitchToInteger = true; #else // TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 - bool shouldSwitchToInteger = FloatingPointUtils::isPositiveZero(dblCns); + bool shouldSwitchToInteger = FloatingPointUtils::isPositiveZero(dblCns); #endif if (shouldSwitchToInteger) diff --git a/src/coreclr/jit/lower.h b/src/coreclr/jit/lower.h index 76124820944f3..318a148ee9c1c 100644 --- a/src/coreclr/jit/lower.h +++ b/src/coreclr/jit/lower.h @@ -88,14 +88,14 @@ class Lowering final : public Phase void ContainCheckLclHeap(GenTreeOp* node); void ContainCheckRet(GenTreeUnOp* ret); #ifdef TARGET_ARM64 - bool TryLowerAndOrToCCMP(GenTreeOp* tree, GenTree** next); + bool TryLowerAndOrToCCMP(GenTreeOp* tree, GenTree** next); insCflags TruthifyingFlags(GenCondition cond); - void ContainCheckConditionalCompare(GenTreeCCMP* ccmp); - void ContainCheckNeg(GenTreeOp* neg); - void TryLowerCnsIntCselToCinc(GenTreeOp* select, GenTree* cond); - void TryLowerCselToCSOp(GenTreeOp* select, GenTree* cond); - bool TryLowerAddSubToMulLongOp(GenTreeOp* op, GenTree** next); - bool TryLowerNegToMulLongOp(GenTreeOp* op, GenTree** next); + void ContainCheckConditionalCompare(GenTreeCCMP* ccmp); + void ContainCheckNeg(GenTreeOp* neg); + void TryLowerCnsIntCselToCinc(GenTreeOp* select, GenTree* cond); + void TryLowerCselToCSOp(GenTreeOp* select, GenTree* cond); + bool TryLowerAddSubToMulLongOp(GenTreeOp* op, GenTree** next); + bool TryLowerNegToMulLongOp(GenTreeOp* op, GenTree** next); #endif void ContainCheckSelect(GenTreeOp* select); void ContainCheckBitCast(GenTree* node); @@ -129,7 +129,7 @@ class Lowering final : public Phase static bool CheckBlock(Compiler* compiler, BasicBlock* block); #endif // DEBUG - void LowerBlock(BasicBlock* block); + void LowerBlock(BasicBlock* block); GenTree* LowerNode(GenTree* node); bool IsCFGCallArgInvariantInRange(GenTree* node, GenTree* endExclusive); @@ -138,28 +138,28 @@ class Lowering final : public Phase // Call Lowering // ------------------------------ GenTree* LowerCall(GenTree* call); - bool LowerCallMemmove(GenTreeCall* call, GenTree** next); - bool LowerCallMemcmp(GenTreeCall* call, GenTree** next); - bool LowerCallMemset(GenTreeCall* call, GenTree** next); - void LowerCFGCall(GenTreeCall* call); - void MoveCFGCallArgs(GenTreeCall* call); - void MoveCFGCallArg(GenTreeCall* call, GenTree* node); + bool LowerCallMemmove(GenTreeCall* call, GenTree** next); + bool LowerCallMemcmp(GenTreeCall* call, GenTree** next); + bool LowerCallMemset(GenTreeCall* call, GenTree** next); + void LowerCFGCall(GenTreeCall* call); + void MoveCFGCallArgs(GenTreeCall* call); + void MoveCFGCallArg(GenTreeCall* call, GenTree* node); #ifndef TARGET_64BIT GenTree* DecomposeLongCompare(GenTree* cmp); #endif - GenTree* OptimizeConstCompare(GenTree* cmp); - GenTree* LowerCompare(GenTree* cmp); - GenTree* LowerJTrue(GenTreeOp* jtrue); - GenTree* LowerSelect(GenTreeConditional* cond); - bool TryLowerConditionToFlagsNode(GenTree* parent, GenTree* condition, GenCondition* code); + GenTree* OptimizeConstCompare(GenTree* cmp); + GenTree* LowerCompare(GenTree* cmp); + GenTree* LowerJTrue(GenTreeOp* jtrue); + GenTree* LowerSelect(GenTreeConditional* cond); + bool TryLowerConditionToFlagsNode(GenTree* parent, GenTree* condition, GenCondition* code); GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition); - void LowerJmpMethod(GenTree* jmp); - void LowerRet(GenTreeUnOp* ret); - void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar); - void LowerRetStruct(GenTreeUnOp* ret); - void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret); - void LowerCallStruct(GenTreeCall* call); - void LowerStoreSingleRegCallStruct(GenTreeBlk* store); + void LowerJmpMethod(GenTree* jmp); + void LowerRet(GenTreeUnOp* ret); + void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar); + void LowerRetStruct(GenTreeUnOp* ret); + void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret); + void LowerCallStruct(GenTreeCall* call); + void LowerStoreSingleRegCallStruct(GenTreeBlk* store); #if !defined(WINDOWS_AMD64_ABI) GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const; #endif // WINDOWS_AMD64_ABI @@ -168,29 +168,29 @@ class Lowering final : public Phase GenTree* LowerDirectCall(GenTreeCall* call); GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call); GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget); - void LowerFastTailCall(GenTreeCall* callNode); - void RehomeArgForFastTailCall(unsigned int lclNum, - GenTree* insertTempBefore, - GenTree* lookForUsesStart, - GenTreeCall* callNode); - void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint); + void LowerFastTailCall(GenTreeCall* callNode); + void RehomeArgForFastTailCall(unsigned int lclNum, + GenTree* insertTempBefore, + GenTree* lookForUsesStart, + GenTreeCall* callNode); + void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint); GenTree* FindEarliestPutArg(GenTreeCall* call); - size_t MarkPutArgNodes(GenTree* node); + size_t MarkPutArgNodes(GenTree* node); GenTree* LowerVirtualVtableCall(GenTreeCall* call); GenTree* LowerVirtualStubCall(GenTreeCall* call); - void LowerArgsForCall(GenTreeCall* call); - void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode); + void LowerArgsForCall(GenTreeCall* call); + void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode); GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, CallArg* callArg, var_types type); - void LowerArg(GenTreeCall* call, CallArg* callArg, bool late); + void LowerArg(GenTreeCall* call, CallArg* callArg, bool late); #if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) GenTree* LowerFloatArg(GenTree** pArg, CallArg* callArg); GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum); #endif - void InsertPInvokeCallProlog(GenTreeCall* call); - void InsertPInvokeCallEpilog(GenTreeCall* call); - void InsertPInvokeMethodProlog(); - void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)); + void InsertPInvokeCallProlog(GenTreeCall* call); + void InsertPInvokeCallEpilog(GenTreeCall* call); + void InsertPInvokeMethodProlog(); + void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)); GenTree* SetGCState(int cns); GenTree* CreateReturnTrapSeq(); enum FrameLinkAction @@ -316,31 +316,31 @@ class Lowering final : public Phase #endif // defined(TARGET_XARCH) // Per tree node member functions - void LowerStoreIndirCommon(GenTreeStoreInd* ind); + void LowerStoreIndirCommon(GenTreeStoreInd* ind); GenTree* LowerIndir(GenTreeIndir* ind); - bool OptimizeForLdp(GenTreeIndir* ind); - bool TryMakeIndirsAdjacent(GenTreeIndir* prevIndir, GenTreeIndir* indir); - void MarkTree(GenTree* root); - void UnmarkTree(GenTree* root); - void LowerStoreIndir(GenTreeStoreInd* node); - void LowerStoreIndirCoalescing(GenTreeStoreInd* node); + bool OptimizeForLdp(GenTreeIndir* ind); + bool TryMakeIndirsAdjacent(GenTreeIndir* prevIndir, GenTreeIndir* indir); + void MarkTree(GenTree* root); + void UnmarkTree(GenTree* root); + void LowerStoreIndir(GenTreeStoreInd* node); + void LowerStoreIndirCoalescing(GenTreeStoreInd* node); GenTree* LowerAdd(GenTreeOp* node); GenTree* LowerMul(GenTreeOp* mul); - bool TryLowerAndNegativeOne(GenTreeOp* node, GenTree** nextNode); + bool TryLowerAndNegativeOne(GenTreeOp* node, GenTree** nextNode); GenTree* LowerBinaryArithmetic(GenTreeOp* binOp); - bool LowerUnsignedDivOrMod(GenTreeOp* divMod); - bool TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode); + bool LowerUnsignedDivOrMod(GenTreeOp* divMod); + bool TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode); GenTree* LowerSignedDivOrMod(GenTree* node); - void LowerBlockStore(GenTreeBlk* blkNode); - void LowerBlockStoreCommon(GenTreeBlk* blkNode); - void LowerBlockStoreAsHelperCall(GenTreeBlk* blkNode); - void LowerLclHeap(GenTree* node); - void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr, GenTree* addrParent); - void LowerPutArgStkOrSplit(GenTreePutArgStk* putArgNode); + void LowerBlockStore(GenTreeBlk* blkNode); + void LowerBlockStoreCommon(GenTreeBlk* blkNode); + void LowerBlockStoreAsHelperCall(GenTreeBlk* blkNode); + void LowerLclHeap(GenTree* node); + void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr, GenTree* addrParent); + void LowerPutArgStkOrSplit(GenTreePutArgStk* putArgNode); GenTree* LowerArrLength(GenTreeArrCommon* node); #ifdef TARGET_XARCH - void LowerPutArgStk(GenTreePutArgStk* putArgStk); + void LowerPutArgStk(GenTreePutArgStk* putArgStk); GenTree* TryLowerMulWithConstant(GenTreeOp* node); #endif // TARGET_XARCH @@ -351,12 +351,12 @@ class Lowering final : public Phase void TryRetypingFloatingPointStoreToIntegerStore(GenTree* store); GenTree* LowerSwitch(GenTree* node); - bool TryLowerSwitchToBitTest(FlowEdge* jumpTable[], - unsigned jumpCount, - unsigned targetCount, - BasicBlock* bbSwitch, - GenTree* switchValue, - weight_t defaultLikelihood); + bool TryLowerSwitchToBitTest(FlowEdge* jumpTable[], + unsigned jumpCount, + unsigned targetCount, + BasicBlock* bbSwitch, + GenTree* switchValue, + weight_t defaultLikelihood); void LowerCast(GenTree* node); @@ -374,12 +374,12 @@ class Lowering final : public Phase void LowerShift(GenTreeOp* shift); #ifdef FEATURE_HW_INTRINSICS GenTree* LowerHWIntrinsic(GenTreeHWIntrinsic* node); - void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition); + void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition); GenTree* LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp); GenTree* LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node); GenTree* LowerHWIntrinsicDot(GenTreeHWIntrinsic* node); #if defined(TARGET_XARCH) - void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node); + void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node); GenTree* LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node); GenTree* LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node); GenTree* LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* node); @@ -389,7 +389,7 @@ class Lowering final : public Phase GenTree* TryLowerAndOpToExtractLowestSetBit(GenTreeOp* andNode); GenTree* TryLowerAndOpToAndNot(GenTreeOp* andNode); GenTree* TryLowerXorOpToGetMaskUpToLowestSetBit(GenTreeOp* xorNode); - void LowerBswapOp(GenTreeOp* node); + void LowerBswapOp(GenTreeOp* node); #elif defined(TARGET_ARM64) bool IsValidConstForMovImm(GenTreeHWIntrinsic* node); void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node); @@ -589,7 +589,9 @@ class Lowering final : public Phase target_ssize_t Offset; SavedIndir(GenTreeIndir* indir, GenTreeLclVar* addrBase, target_ssize_t offset) - : Indir(indir), AddrBase(addrBase), Offset(offset) + : Indir(indir) + , AddrBase(addrBase) + , Offset(offset) { } }; diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp index 9d28135c92a1a..498093ae6fc52 100644 --- a/src/coreclr/jit/lowerarmarch.cpp +++ b/src/coreclr/jit/lowerarmarch.cpp @@ -726,7 +726,7 @@ void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenT { return; } -#else // !TARGET_ARM +#else // !TARGET_ARM if ((ClrSafeInt(offset) + ClrSafeInt(size)).IsOverflow()) { return; diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp index 5c4f05a04ad57..999a3fc6d338c 100644 --- a/src/coreclr/jit/lowerxarch.cpp +++ b/src/coreclr/jit/lowerxarch.cpp @@ -690,13 +690,13 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) else #endif // TARGET_X86 if (loadSize <= comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy)) - { - putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Unroll; - } - else - { - putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::RepInstr; - } + { + putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Unroll; + } + else + { + putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::RepInstr; + } } else // There are GC pointers. { @@ -767,7 +767,7 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) #if defined(TARGET_AMD64) && !src->IsIntegralConst(0) #endif // TARGET_AMD64 - ) + ) { MakeSrcContained(putArgStk, src); } @@ -1767,8 +1767,8 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) // currently ANDNOT logic cannot be optimized by the ternary node. break; } - GenTree* op3 = second->AsHWIntrinsic()->Op(1) == node ? second->AsHWIntrinsic()->Op(2) - : second->AsHWIntrinsic()->Op(1); + GenTree* op3 = second->AsHWIntrinsic()->Op(1) == node ? second->AsHWIntrinsic()->Op(2) + : second->AsHWIntrinsic()->Op(1); GenTree* control = comp->gtNewIconNode(node->GetTernaryControlByte(second->AsHWIntrinsic())); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); unsigned simdSize = node->GetSimdSize(); @@ -6650,12 +6650,12 @@ void Lowering::ContainCheckCallOperands(GenTreeCall* call) else #endif // TARGET_X86 if (ctrlExpr->isIndir()) - { - // We may have cases where we have set a register target on the ctrlExpr, but if it - // contained we must clear it. - ctrlExpr->SetRegNum(REG_NA); - MakeSrcContained(call, ctrlExpr); - } + { + // We may have cases where we have set a register target on the ctrlExpr, but if it + // contained we must clear it. + ctrlExpr->SetRegNum(REG_NA); + MakeSrcContained(call, ctrlExpr); + } } } @@ -10027,8 +10027,8 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) if (op1->IsVectorZero()) { -// When op1 is zero, we can contain it and we expect that -// ival is already in the correct state to account for it + // When op1 is zero, we can contain it and we expect that + // ival is already in the correct state to account for it #if DEBUG ssize_t ival = lastOp->AsIntConCommon()->IconValue(); @@ -10048,8 +10048,8 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) } else if (op2->IsVectorZero()) { -// When op2 is zero, we can contain it and we expect that -// zmask is already in the correct state to account for it + // When op2 is zero, we can contain it and we expect that + // zmask is already in the correct state to account for it #if DEBUG ssize_t ival = lastOp->AsIntConCommon()->IconValue(); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index b2d37b9becad9..50652ca075254 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -384,9 +384,9 @@ void LinearScan::updateSpillCost(regNumber reg, Interval* interval) // interval - Interval of Refposition. // assignedReg - Assigned register for this refposition. // -void LinearScan::updateRegsFreeBusyState(RefPosition& refPosition, - regMaskTP regsBusy, - regMaskTP* regsToFree, +void LinearScan::updateRegsFreeBusyState(RefPosition& refPosition, + regMaskTP regsBusy, + regMaskTP* regsToFree, regMaskTP* delayRegsToFree DEBUG_ARG(Interval* interval) DEBUG_ARG(regNumber assignedReg)) { @@ -1437,7 +1437,7 @@ PhaseStatus LinearScan::doLinearScan() #ifdef DEBUG || VERBOSE #endif - ) + ) { dumpLsraStats(jitstdout()); } @@ -1771,7 +1771,7 @@ template void LinearScan::identifyCandidates(); // TODO-Cleanup: This was cloned from Compiler::lvaSortByRefCount() in lclvars.cpp in order // to avoid perturbation, but should be merged. template -void LinearScan::identifyCandidates() +void LinearScan::identifyCandidates() { if (localVarsEnregistered) { @@ -2022,24 +2022,24 @@ void LinearScan::identifyCandidates() else #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (regType(type) == FloatRegisterType) - { - floatVarCount++; - weight_t refCntWtd = varDsc->lvRefCntWtd(); - if (varDsc->lvIsRegArg) - { - // Don't count the initial reference for register params. In those cases, - // using a callee-save causes an extra copy. - refCntWtd -= BB_UNITY_WEIGHT; - } - if (refCntWtd >= thresholdFPRefCntWtd) { - VarSetOps::AddElemD(compiler, fpCalleeSaveCandidateVars, varDsc->lvVarIndex); - } - else if (refCntWtd >= maybeFPRefCntWtd) - { - VarSetOps::AddElemD(compiler, fpMaybeCandidateVars, varDsc->lvVarIndex); + floatVarCount++; + weight_t refCntWtd = varDsc->lvRefCntWtd(); + if (varDsc->lvIsRegArg) + { + // Don't count the initial reference for register params. In those cases, + // using a callee-save causes an extra copy. + refCntWtd -= BB_UNITY_WEIGHT; + } + if (refCntWtd >= thresholdFPRefCntWtd) + { + VarSetOps::AddElemD(compiler, fpCalleeSaveCandidateVars, varDsc->lvVarIndex); + } + else if (refCntWtd >= maybeFPRefCntWtd) + { + VarSetOps::AddElemD(compiler, fpMaybeCandidateVars, varDsc->lvVarIndex); + } } - } JITDUMP(" "); DBEXEC(VERBOSE, newInt->dump(compiler)); } @@ -2498,7 +2498,7 @@ void LinearScan::checkLastUses(BasicBlock* block) // the register locations will be "rotated" to stress the resolution and allocation // code. // -BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, +BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated)) { BasicBlock* predBlock = nullptr; @@ -2695,33 +2695,33 @@ void LinearScan::setFrameType() else #endif // DOUBLE_ALIGN if (compiler->codeGen->isFramePointerRequired()) - { - frameType = FT_EBP_FRAME; - } - else - { - if (compiler->rpMustCreateEBPCalled == false) - { -#ifdef DEBUG - const char* reason; -#endif // DEBUG - compiler->rpMustCreateEBPCalled = true; - if (compiler->rpMustCreateEBPFrame(INDEBUG(&reason))) - { - JITDUMP("; Decided to create an EBP based frame for ETW stackwalking (%s)\n", reason); - compiler->codeGen->setFrameRequired(true); - } - } - - if (compiler->codeGen->isFrameRequired()) { frameType = FT_EBP_FRAME; } else { - frameType = FT_ESP_FRAME; + if (compiler->rpMustCreateEBPCalled == false) + { +#ifdef DEBUG + const char* reason; +#endif // DEBUG + compiler->rpMustCreateEBPCalled = true; + if (compiler->rpMustCreateEBPFrame(INDEBUG(&reason))) + { + JITDUMP("; Decided to create an EBP based frame for ETW stackwalking (%s)\n", reason); + compiler->codeGen->setFrameRequired(true); + } + } + + if (compiler->codeGen->isFrameRequired()) + { + frameType = FT_EBP_FRAME; + } + else + { + frameType = FT_ESP_FRAME; + } } - } switch (frameType) { @@ -2941,7 +2941,7 @@ bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPo // for enregistration. It simply finds the register to be assigned, if it was assigned to something // else, then will unassign it and then assign to the currentInterval // -regNumber LinearScan::allocateRegMinimal(Interval* currentInterval, +regNumber LinearScan::allocateRegMinimal(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { assert(!enregisterLocalVars); @@ -3004,7 +3004,7 @@ regNumber LinearScan::allocateRegMinimal(Interval* currentInterval, // no such ref position, no register will be allocated. // template -regNumber LinearScan::allocateReg(Interval* currentInterval, +regNumber LinearScan::allocateReg(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { regMaskTP foundRegBit = @@ -7829,7 +7829,7 @@ void LinearScan::updateMaxSpill(RefPosition* refPosition) // the tree, and performs resolution across joins and back edges. // template -void LinearScan::resolveRegisters() +void LinearScan::resolveRegisters() { // Iterate over the tree and the RefPositions in lockstep // - annotate the tree with register assignments by setting GetRegNum() or gtRegPair (for longs) @@ -8302,8 +8302,8 @@ void LinearScan::resolveRegisters() { regMaskTP initialRegMask = interval->firstRefPosition->registerAssignment; regNumber initialReg = (initialRegMask == RBM_NONE || interval->firstRefPosition->spillAfter) - ? REG_STK - : genRegNumFromMask(initialRegMask); + ? REG_STK + : genRegNumFromMask(initialRegMask); #ifdef TARGET_ARM if (varTypeIsMultiReg(varDsc)) @@ -8750,12 +8750,12 @@ regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, // Notes: // It inserts at least one move and updates incoming parameter 'location'. // -void LinearScan::addResolutionForDouble(BasicBlock* block, - GenTree* insertionPoint, - Interval** sourceIntervals, - regNumberSmall* location, - regNumber toReg, - regNumber fromReg, +void LinearScan::addResolutionForDouble(BasicBlock* block, + GenTree* insertionPoint, + Interval** sourceIntervals, + regNumberSmall* location, + regNumber toReg, + regNumber fromReg, ResolveType resolveType DEBUG_ARG(BasicBlock* fromBlock) DEBUG_ARG(BasicBlock* toBlock)) { @@ -8825,10 +8825,10 @@ void LinearScan::addResolutionForDouble(BasicBlock* block, // The next time, we want to move from the stack to the destination (toReg), // in which case fromReg will be REG_STK, and we insert at the top. // -void LinearScan::addResolution(BasicBlock* block, - GenTree* insertionPoint, - Interval* interval, - regNumber toReg, +void LinearScan::addResolution(BasicBlock* block, + GenTree* insertionPoint, + Interval* interval, + regNumber toReg, regNumber fromReg DEBUG_ARG(BasicBlock* fromBlock) DEBUG_ARG(BasicBlock* toBlock) DEBUG_ARG(const char* reason)) { @@ -9952,7 +9952,7 @@ const char* LinearScan::getStatName(unsigned stat) #define LSRA_STAT_DEF(stat, name) name, #include "lsra_stats.h" #undef LSRA_STAT_DEF -#define REG_SEL_DEF(stat, value, shortname, orderSeqId) #stat, +#define REG_SEL_DEF(stat, value, shortname, orderSeqId) #stat, #define BUSY_REG_SEL_DEF(stat, value, shortname, orderSeqId) REG_SEL_DEF(stat, value, shortname, orderSeqId) #include "lsra_score.h" }; @@ -11272,9 +11272,8 @@ void LinearScan::dumpRegRecordHeader() // l is either '*' (if a last use) or ' ' (otherwise) // d is either 'D' (if a delayed use) or ' ' (otherwise) - maxNodeLocation = (maxNodeLocation == 0) - ? 1 - : maxNodeLocation; // corner case of a method with an infinite loop without any GenTree nodes + maxNodeLocation = (maxNodeLocation == 0) ? 1 : maxNodeLocation; // corner case of a method with an infinite loop + // without any GenTree nodes assert(maxNodeLocation >= 1); assert(refPositions.size() >= 1); int treeIdWidth = 9; /* '[XXXXX] '*/ @@ -12404,7 +12403,7 @@ LinearScan::RegisterSelection::RegisterSelection(LinearScan* linearScan) #ifdef TARGET_ARM64 && !linearScan->compiler->info.compNeedsConsecutiveRegisters #endif - ) + ) { ordering = W("MQQQQQQQQQQQQQQQQ"); } @@ -13121,7 +13120,7 @@ void LinearScan::RegisterSelection::try_PREV_REG_OPT() && !refPosition->needsConsecutive #endif - ) + ) { assert(!"Spill candidate has no assignedInterval recentRefPosition"); } @@ -13253,7 +13252,7 @@ void LinearScan::RegisterSelection::calculateCoversSets() // Register bit selected (a single register) and REG_NA if no register was selected. // template -regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval, +regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { #ifdef DEBUG @@ -13718,7 +13717,7 @@ regMaskTP LinearScan::RegisterSelection::select(Interval* currentInterval, // select the REG_ORDER heuristics (if there are any free candidates) or REG_NUM (if all registers // are busy). // -regMaskTP LinearScan::RegisterSelection::selectMinimal(Interval* currentInterval, +regMaskTP LinearScan::RegisterSelection::selectMinimal(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)) { assert(!linearScan->enregisterLocalVars); diff --git a/src/coreclr/jit/lsra.h b/src/coreclr/jit/lsra.h index 9620abbc5a782..e038b4e8243a5 100644 --- a/src/coreclr/jit/lsra.h +++ b/src/coreclr/jit/lsra.h @@ -30,13 +30,13 @@ const unsigned int MaxInternalRegisters = 8; const unsigned int RegisterTypeCount = 2; /***************************************************************************** -* Register types -*****************************************************************************/ + * Register types + *****************************************************************************/ typedef var_types RegisterType; -#define IntRegisterType TYP_INT +#define IntRegisterType TYP_INT #define FloatRegisterType TYP_FLOAT -#define MaskRegisterType TYP_MASK +#define MaskRegisterType TYP_MASK //------------------------------------------------------------------------ // regType: Return the RegisterType to use for a given type @@ -83,7 +83,9 @@ struct RefInfo RefPosition* ref; GenTree* treeNode; - RefInfo(RefPosition* r, GenTree* t) : ref(r), treeNode(t) + RefInfo(RefPosition* r, GenTree* t) + : ref(r) + , treeNode(t) { } @@ -107,7 +109,8 @@ class RefInfoListNode final : public RefInfo RefInfoListNode* m_next; // The next node in the list public: - RefInfoListNode(RefPosition* r, GenTree* t) : RefInfo(r, t) + RefInfoListNode(RefPosition* r, GenTree* t) + : RefInfo(r, t) { } @@ -134,11 +137,15 @@ class RefInfoList final RefInfoListNode* m_tail; // The tail of the list public: - RefInfoList() : m_head(nullptr), m_tail(nullptr) + RefInfoList() + : m_head(nullptr) + , m_tail(nullptr) { } - RefInfoList(RefInfoListNode* node) : m_head(node), m_tail(node) + RefInfoList(RefInfoListNode* node) + : m_head(node) + , m_tail(node) { assert(m_head->m_next == nullptr); } @@ -365,7 +372,7 @@ class RefInfoListNodePool final public: RefInfoListNodePool(Compiler* compiler, unsigned preallocate = defaultPreallocation); RefInfoListNode* GetNode(RefPosition* r, GenTree* t); - void ReturnNode(RefInfoListNode* listNode); + void ReturnNode(RefInfoListNode* listNode); }; #if TRACK_LSRA_STATS @@ -374,7 +381,7 @@ enum LsraStat #define LSRA_STAT_DEF(enum_name, enum_str) enum_name, #include "lsra_stats.h" #undef LSRA_STAT_DEF -#define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) STAT_##enum_name, +#define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) STAT_##enum_name, #define BUSY_REG_SEL_DEF(enum_name, value, short_str, orderSeqId) REG_SEL_DEF(enum_name, value, short_str, orderSeqId) #include "lsra_score.h" COUNT @@ -387,11 +394,11 @@ struct LsraBlockInfo // 0 for fgFirstBB. unsigned int predBBNum; weight_t weight; - bool hasCriticalInEdge : 1; + bool hasCriticalInEdge : 1; bool hasCriticalOutEdge : 1; - bool hasEHBoundaryIn : 1; - bool hasEHBoundaryOut : 1; - bool hasEHPred : 1; + bool hasEHBoundaryIn : 1; + bool hasEHBoundaryOut : 1; + bool hasEHPred : 1; #if TRACK_LSRA_STATS // Per block maintained LSRA statistics. @@ -401,7 +408,7 @@ struct LsraBlockInfo enum RegisterScore { -#define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) enum_name = value, +#define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) enum_name = value, #define BUSY_REG_SEL_DEF(enum_name, value, short_str, orderSeqId) REG_SEL_DEF(enum_name, value, short_str, orderSeqId) #include "lsra_score.h" NONE = 0 @@ -635,7 +642,7 @@ class LinearScan : public LinearScanInterface // This does the dataflow analysis and builds the intervals template - void buildIntervals(); + void buildIntervals(); // This is where the actual assignment is done for scenarios where // no local var enregistration is done. @@ -648,7 +655,7 @@ class LinearScan : public LinearScanInterface void allocateRegisters(); // This is the resolution phase, where cross-block mismatches are fixed up template - void resolveRegisters(); + void resolveRegisters(); void writeRegisters(RefPosition* currentRefPosition, GenTree* tree); @@ -658,7 +665,7 @@ class LinearScan : public LinearScanInterface void insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE - void makeUpperVectorInterval(unsigned varIndex); + void makeUpperVectorInterval(unsigned varIndex); Interval* getUpperVectorInterval(unsigned varIndex); // Save the upper half of a vector that lives in a callee-save register at the point of a call. @@ -693,20 +700,20 @@ class LinearScan : public LinearScanInterface }; #ifdef TARGET_ARM - void addResolutionForDouble(BasicBlock* block, - GenTree* insertionPoint, - Interval** sourceIntervals, - regNumberSmall* location, - regNumber toReg, - regNumber fromReg, + void addResolutionForDouble(BasicBlock* block, + GenTree* insertionPoint, + Interval** sourceIntervals, + regNumberSmall* location, + regNumber toReg, + regNumber fromReg, ResolveType resolveType DEBUG_ARG(BasicBlock* fromBlock) DEBUG_ARG(BasicBlock* toBlock)); #endif - void addResolution(BasicBlock* block, - GenTree* insertionPoint, - Interval* interval, - regNumber outReg, + void addResolution(BasicBlock* block, + GenTree* insertionPoint, + Interval* interval, + regNumber outReg, regNumber inReg DEBUG_ARG(BasicBlock* fromBlock) DEBUG_ARG(BasicBlock* toBlock) DEBUG_ARG(const char* reason)); @@ -816,8 +823,14 @@ class LinearScan : public LinearScanInterface // This controls the heuristics used to select registers // These can be combined. - enum LsraSelect{LSRA_SELECT_DEFAULT = 0, LSRA_SELECT_REVERSE_HEURISTICS = 0x04, - LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08, LSRA_SELECT_NEAREST = 0x10, LSRA_SELECT_MASK = 0x1c}; + enum LsraSelect + { + LSRA_SELECT_DEFAULT = 0, + LSRA_SELECT_REVERSE_HEURISTICS = 0x04, + LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08, + LSRA_SELECT_NEAREST = 0x10, + LSRA_SELECT_MASK = 0x1c + }; LsraSelect getSelectionHeuristics() { return (LsraSelect)(lsraStressMask & LSRA_SELECT_MASK); @@ -836,9 +849,14 @@ class LinearScan : public LinearScanInterface } // This controls the order in which basic blocks are visited during allocation - enum LsraTraversalOrder{LSRA_TRAVERSE_LAYOUT = 0x20, LSRA_TRAVERSE_PRED_FIRST = 0x40, - LSRA_TRAVERSE_RANDOM = 0x60, // NYI - LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST, LSRA_TRAVERSE_MASK = 0x60}; + enum LsraTraversalOrder + { + LSRA_TRAVERSE_LAYOUT = 0x20, + LSRA_TRAVERSE_PRED_FIRST = 0x40, + LSRA_TRAVERSE_RANDOM = 0x60, // NYI + LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST, + LSRA_TRAVERSE_MASK = 0x60 + }; LsraTraversalOrder getLsraTraversalOrder() { if ((lsraStressMask & LSRA_TRAVERSE_MASK) == 0) @@ -858,7 +876,12 @@ class LinearScan : public LinearScanInterface // This controls whether lifetimes should be extended to the entire method. // Note that this has no effect under MinOpts - enum LsraExtendLifetimes{LSRA_DONT_EXTEND = 0, LSRA_EXTEND_LIFETIMES = 0x80, LSRA_EXTEND_LIFETIMES_MASK = 0x80}; + enum LsraExtendLifetimes + { + LSRA_DONT_EXTEND = 0, + LSRA_EXTEND_LIFETIMES = 0x80, + LSRA_EXTEND_LIFETIMES_MASK = 0x80 + }; LsraExtendLifetimes getLsraExtendLifeTimes() { return (LsraExtendLifetimes)(lsraStressMask & LSRA_EXTEND_LIFETIMES_MASK); @@ -871,8 +894,13 @@ class LinearScan : public LinearScanInterface // This controls whether variables locations should be set to the previous block in layout order // (LSRA_BLOCK_BOUNDARY_LAYOUT), or to that of the highest-weight predecessor (LSRA_BLOCK_BOUNDARY_PRED - // the default), or rotated (LSRA_BLOCK_BOUNDARY_ROTATE). - enum LsraBlockBoundaryLocations{LSRA_BLOCK_BOUNDARY_PRED = 0, LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100, - LSRA_BLOCK_BOUNDARY_ROTATE = 0x200, LSRA_BLOCK_BOUNDARY_MASK = 0x300}; + enum LsraBlockBoundaryLocations + { + LSRA_BLOCK_BOUNDARY_PRED = 0, + LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100, + LSRA_BLOCK_BOUNDARY_ROTATE = 0x200, + LSRA_BLOCK_BOUNDARY_MASK = 0x300 + }; LsraBlockBoundaryLocations getLsraBlockBoundaryLocations() { return (LsraBlockBoundaryLocations)(lsraStressMask & LSRA_BLOCK_BOUNDARY_MASK); @@ -881,7 +909,12 @@ class LinearScan : public LinearScanInterface // This controls whether we always insert a GT_RELOAD instruction after a spill // Note that this can be combined with LSRA_SPILL_ALWAYS (or not) - enum LsraReload{LSRA_NO_RELOAD_IF_SAME = 0, LSRA_ALWAYS_INSERT_RELOAD = 0x400, LSRA_RELOAD_MASK = 0x400}; + enum LsraReload + { + LSRA_NO_RELOAD_IF_SAME = 0, + LSRA_ALWAYS_INSERT_RELOAD = 0x400, + LSRA_RELOAD_MASK = 0x400 + }; LsraReload getLsraReload() { return (LsraReload)(lsraStressMask & LSRA_RELOAD_MASK); @@ -892,7 +925,12 @@ class LinearScan : public LinearScanInterface } // This controls whether we spill everywhere - enum LsraSpill{LSRA_DONT_SPILL_ALWAYS = 0, LSRA_SPILL_ALWAYS = 0x800, LSRA_SPILL_MASK = 0x800}; + enum LsraSpill + { + LSRA_DONT_SPILL_ALWAYS = 0, + LSRA_SPILL_ALWAYS = 0x800, + LSRA_SPILL_MASK = 0x800 + }; LsraSpill getLsraSpill() { return (LsraSpill)(lsraStressMask & LSRA_SPILL_MASK); @@ -904,8 +942,12 @@ class LinearScan : public LinearScanInterface // This controls whether RefPositions that lower/codegen indicated as reg optional be // allocated a reg at all. - enum LsraRegOptionalControl{LSRA_REG_OPTIONAL_DEFAULT = 0, LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000, - LSRA_REG_OPTIONAL_MASK = 0x1000}; + enum LsraRegOptionalControl + { + LSRA_REG_OPTIONAL_DEFAULT = 0, + LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000, + LSRA_REG_OPTIONAL_MASK = 0x1000 + }; LsraRegOptionalControl getLsraRegOptionalControl() { @@ -988,7 +1030,7 @@ class LinearScan : public LinearScanInterface private: // Determine which locals are candidates for allocation template - void identifyCandidates(); + void identifyCandidates(); // determine which locals are used in EH constructs we don't want to deal with void identifyCandidatesExceptionDataflow(); @@ -997,8 +1039,8 @@ class LinearScan : public LinearScanInterface #ifdef DEBUG void checkLastUses(BasicBlock* block); - int ComputeOperandDstCount(GenTree* operand); - int ComputeAvailableSrcCount(GenTree* node); + int ComputeOperandDstCount(GenTree* operand); + int ComputeAvailableSrcCount(GenTree* node); #endif // DEBUG void setFrameType(); @@ -1014,20 +1056,20 @@ class LinearScan : public LinearScanInterface void resetAllRegistersState(); #ifdef TARGET_ARM - bool isSecondHalfReg(RegRecord* regRec, Interval* interval); + bool isSecondHalfReg(RegRecord* regRec, Interval* interval); RegRecord* getSecondHalfRegRec(RegRecord* regRec); RegRecord* findAnotherHalfRegRec(RegRecord* regRec); - regNumber findAnotherHalfRegNum(regNumber regNum); - bool canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation); - void unassignDoublePhysReg(RegRecord* doubleRegRecord); + regNumber findAnotherHalfRegNum(regNumber regNum); + bool canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation); + void unassignDoublePhysReg(RegRecord* doubleRegRecord); #endif - void clearAssignedInterval(RegRecord* reg ARM_ARG(RegisterType regType)); - void updateAssignedInterval(RegRecord* reg, Interval* interval ARM_ARG(RegisterType regType)); - void updatePreviousInterval(RegRecord* reg, Interval* interval ARM_ARG(RegisterType regType)); - bool canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval); - bool isAssignedToInterval(Interval* interval, RegRecord* regRec); - bool isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation); - bool canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation); + void clearAssignedInterval(RegRecord* reg ARM_ARG(RegisterType regType)); + void updateAssignedInterval(RegRecord* reg, Interval* interval ARM_ARG(RegisterType regType)); + void updatePreviousInterval(RegRecord* reg, Interval* interval ARM_ARG(RegisterType regType)); + bool canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval); + bool isAssignedToInterval(Interval* interval, RegRecord* regRec); + bool isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation); + bool canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation); weight_t getSpillWeight(RegRecord* physRegRecord); // insert refpositions representing prolog zero-inits which will be added later @@ -1214,13 +1256,13 @@ class LinearScan : public LinearScanInterface void spillGCRefs(RefPosition* killRefPosition); -/***************************************************************************** -* Register selection -****************************************************************************/ + /***************************************************************************** + * Register selection + ****************************************************************************/ #if defined(TARGET_ARM64) - bool canAssignNextConsecutiveRegisters(RefPosition* firstRefPosition, regNumber firstRegAssigned); - void assignConsecutiveRegisters(RefPosition* firstRefPosition, regNumber firstRegAssigned); + bool canAssignNextConsecutiveRegisters(RefPosition* firstRefPosition, regNumber firstRegAssigned); + void assignConsecutiveRegisters(RefPosition* firstRefPosition, regNumber firstRegAssigned); regMaskTP getConsecutiveCandidates(regMaskTP candidates, RefPosition* refPosition, regMaskTP* busyCandidates); regMaskTP filterConsecutiveCandidates(regMaskTP candidates, unsigned int registersNeeded, @@ -1258,10 +1300,10 @@ class LinearScan : public LinearScanInterface // Perform register selection and update currentInterval or refPosition template - FORCEINLINE regMaskTP select(Interval* currentInterval, + FORCEINLINE regMaskTP select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); - FORCEINLINE regMaskTP selectMinimal(Interval* currentInterval, + FORCEINLINE regMaskTP selectMinimal(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); // If the register is from unassigned set such that it was not already @@ -1344,14 +1386,14 @@ class LinearScan : public LinearScanInterface return (prevRegBit & preferences) == foundRegBit; } - bool applySelection(int selectionScore, regMaskTP selectionCandidates); - bool applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate); + bool applySelection(int selectionScore, regMaskTP selectionCandidates); + bool applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate); FORCEINLINE void calculateCoversSets(); FORCEINLINE void calculateUnassignedSets(); FORCEINLINE void reset(Interval* interval, RefPosition* refPosition); FORCEINLINE void resetMinimal(Interval* interval, RefPosition* refPosition); -#define REG_SEL_DEF(stat, value, shortname, orderSeqId) FORCEINLINE void try_##stat(); +#define REG_SEL_DEF(stat, value, shortname, orderSeqId) FORCEINLINE void try_##stat(); #define BUSY_REG_SEL_DEF(stat, value, shortname, orderSeqId) REG_SEL_DEF(stat, value, shortname, orderSeqId) #include "lsra_score.h" }; @@ -1379,8 +1421,8 @@ class LinearScan : public LinearScanInterface unsigned toBBNum; }; typedef JitHashTable, SplitEdgeInfo> SplitBBNumToTargetBBNumMap; - SplitBBNumToTargetBBNumMap* splitBBNumToTargetBBNumMap; - SplitBBNumToTargetBBNumMap* getSplitBBNumToTargetBBNumMap() + SplitBBNumToTargetBBNumMap* splitBBNumToTargetBBNumMap; + SplitBBNumToTargetBBNumMap* getSplitBBNumToTargetBBNumMap() { if (splitBBNumToTargetBBNumMap == nullptr) { @@ -1391,13 +1433,13 @@ class LinearScan : public LinearScanInterface } SplitEdgeInfo getSplitEdgeInfo(unsigned int bbNum); - void initVarRegMaps(); - void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); - void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); + void initVarRegMaps(); + void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); + void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); VarToRegMap getInVarToRegMap(unsigned int bbNum); VarToRegMap getOutVarToRegMap(unsigned int bbNum); - void setVarReg(VarToRegMap map, unsigned int trackedVarIndex, regNumber reg); - regNumber getVarReg(VarToRegMap map, unsigned int trackedVarIndex); + void setVarReg(VarToRegMap map, unsigned int trackedVarIndex, regNumber reg); + regNumber getVarReg(VarToRegMap map, unsigned int trackedVarIndex); // Initialize the incoming VarToRegMap to the given map values (generally a predecessor of // the block) VarToRegMap setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap); @@ -1410,8 +1452,8 @@ class LinearScan : public LinearScanInterface #ifdef TARGET_ARM64 typedef JitHashTable, RefPosition*> NextConsecutiveRefPositionsMap; - NextConsecutiveRefPositionsMap* nextConsecutiveRefPositionMap; - NextConsecutiveRefPositionsMap* getNextConsecutiveRefPositionsMap() + NextConsecutiveRefPositionsMap* nextConsecutiveRefPositionMap; + NextConsecutiveRefPositionsMap* getNextConsecutiveRefPositionsMap() { if (nextConsecutiveRefPositionMap == nullptr) { @@ -1439,7 +1481,12 @@ class LinearScan : public LinearScanInterface // - In LSRA_DUMP_POST, which is after register allocation, the registers are // shown. - enum LsraTupleDumpMode{LSRA_DUMP_PRE, LSRA_DUMP_REFPOS, LSRA_DUMP_POST}; + enum LsraTupleDumpMode + { + LSRA_DUMP_PRE, + LSRA_DUMP_REFPOS, + LSRA_DUMP_POST + }; void lsraGetOperandString(GenTree* tree, LsraTupleDumpMode mode, char* operandString, unsigned operandStringLength); void lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest); void DumpOperandDefs( @@ -1477,7 +1524,7 @@ class LinearScan : public LinearScanInterface regMaskTP lastDumpedRegisters; regMaskTP registersToDump; int lastUsedRegNumIndex; - bool shouldDumpReg(regNumber regNum) + bool shouldDumpReg(regNumber regNum) { return (registersToDump & genRegMask(regNum)) != 0; } @@ -1498,29 +1545,54 @@ class LinearScan : public LinearScanInterface void dumpIntervalName(Interval* interval); // Events during the allocation phase that cause some dump output - enum LsraDumpEvent{ + enum LsraDumpEvent + { // Conflicting def/use - LSRA_EVENT_DEFUSE_CONFLICT, LSRA_EVENT_DEFUSE_FIXED_DELAY_USE, LSRA_EVENT_DEFUSE_CASE1, LSRA_EVENT_DEFUSE_CASE2, - LSRA_EVENT_DEFUSE_CASE3, LSRA_EVENT_DEFUSE_CASE4, LSRA_EVENT_DEFUSE_CASE5, LSRA_EVENT_DEFUSE_CASE6, + LSRA_EVENT_DEFUSE_CONFLICT, + LSRA_EVENT_DEFUSE_FIXED_DELAY_USE, + LSRA_EVENT_DEFUSE_CASE1, + LSRA_EVENT_DEFUSE_CASE2, + LSRA_EVENT_DEFUSE_CASE3, + LSRA_EVENT_DEFUSE_CASE4, + LSRA_EVENT_DEFUSE_CASE5, + LSRA_EVENT_DEFUSE_CASE6, // Spilling - LSRA_EVENT_SPILL, LSRA_EVENT_SPILL_EXTENDED_LIFETIME, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, - LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, LSRA_EVENT_DONE_KILL_GC_REFS, LSRA_EVENT_NO_GC_KILLS, + LSRA_EVENT_SPILL, + LSRA_EVENT_SPILL_EXTENDED_LIFETIME, + LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, + LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, + LSRA_EVENT_DONE_KILL_GC_REFS, + LSRA_EVENT_NO_GC_KILLS, // Block boundaries - LSRA_EVENT_START_BB, LSRA_EVENT_END_BB, + LSRA_EVENT_START_BB, + LSRA_EVENT_END_BB, // Miscellaneous - LSRA_EVENT_FREE_REGS, LSRA_EVENT_UPPER_VECTOR_SAVE, LSRA_EVENT_UPPER_VECTOR_RESTORE, + LSRA_EVENT_FREE_REGS, + LSRA_EVENT_UPPER_VECTOR_SAVE, + LSRA_EVENT_UPPER_VECTOR_RESTORE, // Characteristics of the current RefPosition LSRA_EVENT_INCREMENT_RANGE_END, // ??? - LSRA_EVENT_LAST_USE, LSRA_EVENT_LAST_USE_DELAYED, LSRA_EVENT_NEEDS_NEW_REG, + LSRA_EVENT_LAST_USE, + LSRA_EVENT_LAST_USE_DELAYED, + LSRA_EVENT_NEEDS_NEW_REG, // Allocation decisions - LSRA_EVENT_FIXED_REG, LSRA_EVENT_EXP_USE, LSRA_EVENT_ZERO_REF, LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, - LSRA_EVENT_KEPT_ALLOCATION, LSRA_EVENT_COPY_REG, LSRA_EVENT_MOVE_REG, LSRA_EVENT_ALLOC_REG, - LSRA_EVENT_NO_REG_ALLOCATED, LSRA_EVENT_RELOAD, LSRA_EVENT_SPECIAL_PUTARG, LSRA_EVENT_REUSE_REG, + LSRA_EVENT_FIXED_REG, + LSRA_EVENT_EXP_USE, + LSRA_EVENT_ZERO_REF, + LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, + LSRA_EVENT_KEPT_ALLOCATION, + LSRA_EVENT_COPY_REG, + LSRA_EVENT_MOVE_REG, + LSRA_EVENT_ALLOC_REG, + LSRA_EVENT_NO_REG_ALLOCATED, + LSRA_EVENT_RELOAD, + LSRA_EVENT_SPECIAL_PUTARG, + LSRA_EVENT_REUSE_REG, }; void dumpLsraAllocationEvent(LsraDumpEvent event, Interval* interval = nullptr, @@ -1533,14 +1605,14 @@ class LinearScan : public LinearScanInterface #if TRACK_LSRA_STATS unsigned regCandidateVarCount; - void updateLsraStat(LsraStat stat, unsigned currentBBNum); - void dumpLsraStats(FILE* file); + void updateLsraStat(LsraStat stat, unsigned currentBBNum); + void dumpLsraStats(FILE* file); LsraStat getLsraStatFromScore(RegisterScore registerScore); LsraStat firstRegSelStat = STAT_FREE; public: - virtual void dumpLsraStatsCsv(FILE* file); - virtual void dumpLsraStatsSummary(FILE* file); + virtual void dumpLsraStatsCsv(FILE* file); + virtual void dumpLsraStatsSummary(FILE* file); static const char* getStatName(unsigned stat); #define INTRACK_STATS(x) x @@ -1576,7 +1648,7 @@ class LinearScan : public LinearScanInterface // Set of blocks that have been visited. BlockSet bbVisitedSet; - void markBlockVisited(BasicBlock* block) + void markBlockVisited(BasicBlock* block) { BlockSetOps::AddElemD(compiler, bbVisitedSet, block->bbNum); } @@ -1603,17 +1675,17 @@ class LinearScan : public LinearScanInterface BasicBlock** blockSequence; // The verifiedAllBBs flag indicates whether we have verified that all BBs have been // included in the blockSeuqence above, during setBlockSequence(). - bool verifiedAllBBs; - void setBlockSequence(); - int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights); + bool verifiedAllBBs; + void setBlockSequence(); + int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights); BasicBlockList* blockSequenceWorkList; bool blockSequencingDone; #ifdef DEBUG // LSRA must not change number of blocks and blockEpoch that it initializes at start. unsigned blockEpoch; #endif // DEBUG - void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet); - void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode); + void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet); + void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode); BasicBlock* getNextCandidateFromWorkList(); // Indicates whether the allocation pass has been completed. @@ -1714,7 +1786,7 @@ class LinearScan : public LinearScanInterface #if defined(TARGET_AMD64) static const var_types LargeVectorSaveType = TYP_SIMD16; #elif defined(TARGET_ARM64) - static const var_types LargeVectorSaveType = TYP_DOUBLE; + static const var_types LargeVectorSaveType = TYP_DOUBLE; #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) // Set of large vector (TYP_SIMD32 on AVX) variables. VARSET_TP largeVectorVars; @@ -1790,14 +1862,14 @@ class LinearScan : public LinearScanInterface void clearSpillCost(regNumber reg, var_types regType); void updateSpillCost(regNumber reg, Interval* interval); - FORCEINLINE void updateRegsFreeBusyState(RefPosition& refPosition, - regMaskTP regsBusy, - regMaskTP* regsToFree, + FORCEINLINE void updateRegsFreeBusyState(RefPosition& refPosition, + regMaskTP regsBusy, + regMaskTP* regsToFree, regMaskTP* delayRegsToFree DEBUG_ARG(Interval* interval) DEBUG_ARG(regNumber assignedReg)); regMaskTP m_RegistersWithConstants; - void clearConstantReg(regNumber reg, var_types regType) + void clearConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants &= ~getRegMask(reg, regType); } @@ -1815,7 +1887,7 @@ class LinearScan : public LinearScanInterface regMaskTP fixedRegs; LsraLocation nextFixedRef[REG_COUNT]; - void updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition); + void updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition); LsraLocation getNextFixedRef(regNumber regNum, var_types regType) { LsraLocation loc = nextFixedRef[regNum]; @@ -1932,11 +2004,11 @@ class LinearScan : public LinearScanInterface bool checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode); RefPosition* BuildUse(GenTree* operand, regMaskTP candidates = RBM_NONE, int multiRegIdx = 0); - void setDelayFree(RefPosition* use); - int BuildBinaryUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); - int BuildCastUses(GenTreeCast* cast, regMaskTP candidates); + void setDelayFree(RefPosition* use); + int BuildBinaryUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); + int BuildCastUses(GenTreeCast* cast, regMaskTP candidates); #ifdef TARGET_XARCH - int BuildRMWUses(GenTree* node, GenTree* op1, GenTree* op2, regMaskTP candidates = RBM_NONE); + int BuildRMWUses(GenTree* node, GenTree* op1, GenTree* op2, regMaskTP candidates = RBM_NONE); inline regMaskTP BuildEvexIncompatibleMask(GenTree* tree); #endif // !TARGET_XARCH int BuildSelect(GenTreeOp* select); @@ -1948,19 +2020,19 @@ class LinearScan : public LinearScanInterface void getTgtPrefOperands(GenTree* tree, GenTree* op1, GenTree* op2, bool* prefOp1, bool* prefOp2); bool supportsSpecialPutArg(); - int BuildSimple(GenTree* tree); - int BuildOperandUses(GenTree* node, regMaskTP candidates = RBM_NONE); - void AddDelayFreeUses(RefPosition* refPosition, GenTree* rmwNode); - int BuildDelayFreeUses(GenTree* node, - GenTree* rmwNode = nullptr, - regMaskTP candidates = RBM_NONE, - RefPosition** useRefPosition = nullptr); - int BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates = RBM_NONE); - int BuildAddrUses(GenTree* addr, regMaskTP candidates = RBM_NONE); - void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs); + int BuildSimple(GenTree* tree); + int BuildOperandUses(GenTree* node, regMaskTP candidates = RBM_NONE); + void AddDelayFreeUses(RefPosition* refPosition, GenTree* rmwNode); + int BuildDelayFreeUses(GenTree* node, + GenTree* rmwNode = nullptr, + regMaskTP candidates = RBM_NONE, + RefPosition** useRefPosition = nullptr); + int BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates = RBM_NONE); + int BuildAddrUses(GenTree* addr, regMaskTP candidates = RBM_NONE); + void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs); RefPosition* BuildDef(GenTree* tree, regMaskTP dstCandidates = RBM_NONE, int multiRegIdx = 0); - void BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates = RBM_NONE); - void BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask); + void BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates = RBM_NONE); + void BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask); int BuildReturn(GenTree* tree); #ifdef TARGET_XARCH @@ -1971,24 +2043,24 @@ class LinearScan : public LinearScanInterface #ifdef TARGET_ARM int BuildShiftLongCarry(GenTree* tree); #endif - int BuildPutArgReg(GenTreeUnOp* node); - int BuildCall(GenTreeCall* call); - int BuildCmp(GenTree* tree); - int BuildCmpOperands(GenTree* tree); - int BuildBlockStore(GenTreeBlk* blkNode); - int BuildModDiv(GenTree* tree); - int BuildIntrinsic(GenTree* tree); + int BuildPutArgReg(GenTreeUnOp* node); + int BuildCall(GenTreeCall* call); + int BuildCmp(GenTree* tree); + int BuildCmpOperands(GenTree* tree); + int BuildBlockStore(GenTreeBlk* blkNode); + int BuildModDiv(GenTree* tree); + int BuildIntrinsic(GenTree* tree); void BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, LclVarDsc* varDsc, RefPosition* singleUseRef, int index); - int BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc); - int BuildStoreLoc(GenTreeLclVarCommon* tree); - int BuildIndir(GenTreeIndir* indirTree); - int BuildGCWriteBarrier(GenTree* tree); - int BuildCast(GenTreeCast* cast); + int BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc); + int BuildStoreLoc(GenTreeLclVarCommon* tree); + int BuildIndir(GenTreeIndir* indirTree); + int BuildGCWriteBarrier(GenTree* tree); + int BuildCast(GenTreeCast* cast); #if defined(TARGET_XARCH) // returns true if the tree can use the read-modify-write memory instruction form bool isRMWRegOper(GenTree* tree); - int BuildMul(GenTree* tree); + int BuildMul(GenTree* tree); void SetContainsAVXFlags(unsigned sizeOfSIMDVector = 0); #endif // defined(TARGET_XARCH) @@ -2017,7 +2089,7 @@ class LinearScan : public LinearScanInterface #ifdef FEATURE_HW_INTRINSICS int BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCount); #ifdef TARGET_ARM64 - int BuildConsecutiveRegistersForUse(GenTree* treeNode, GenTree* rmwNode = nullptr); + int BuildConsecutiveRegistersForUse(GenTree* treeNode, GenTree* rmwNode = nullptr); void BuildConsecutiveRegistersForDef(GenTree* treeNode, int fieldCount); #endif // TARGET_ARM64 #endif // FEATURE_HW_INTRINSICS @@ -2487,8 +2559,8 @@ class RefPosition // we need an explicit move. // - copyReg and moveReg must not exist with each other. - unsigned char reload : 1; - unsigned char spillAfter : 1; + unsigned char reload : 1; + unsigned char spillAfter : 1; unsigned char singleDefSpill : 1; unsigned char writeThru : 1; // true if this var is defined in a register and also spilled. spillAfter must NOT be // set. @@ -2496,7 +2568,7 @@ class RefPosition unsigned char copyReg : 1; unsigned char moveReg : 1; // true if this var is moved to a new register - unsigned char isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval + unsigned char isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval unsigned char isFixedRegRef : 1; unsigned char isLocalDefUse : 1; @@ -2538,9 +2610,9 @@ class RefPosition GenTree* buildNode; #endif // DEBUG - RefPosition(unsigned int bbNum, - LsraLocation nodeLocation, - GenTree* treeNode, + RefPosition(unsigned int bbNum, + LsraLocation nodeLocation, + GenTree* treeNode, RefType refType DEBUG_ARG(GenTree* buildNode)) : referent(nullptr) , nextRefPosition(nullptr) diff --git a/src/coreclr/jit/lsraarmarch.cpp b/src/coreclr/jit/lsraarmarch.cpp index 4738fcf33725e..c2b8b74406584 100644 --- a/src/coreclr/jit/lsraarmarch.cpp +++ b/src/coreclr/jit/lsraarmarch.cpp @@ -212,7 +212,7 @@ int LinearScan::BuildCall(GenTreeCall* call) RegisterType registerType = call->TypeGet(); -// Set destination candidates for return value of the call. + // Set destination candidates for return value of the call. #ifdef TARGET_ARM if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME)) @@ -224,22 +224,22 @@ int LinearScan::BuildCall(GenTreeCall* call) else #endif // TARGET_ARM if (hasMultiRegRetVal) - { - assert(retTypeDesc != nullptr); - dstCandidates = retTypeDesc->GetABIReturnRegs(call->GetUnmanagedCallConv()); - } - else if (varTypeUsesFloatArgReg(registerType)) - { - dstCandidates = RBM_FLOATRET; - } - else if (registerType == TYP_LONG) - { - dstCandidates = RBM_LNGRET; - } - else - { - dstCandidates = RBM_INTRET; - } + { + assert(retTypeDesc != nullptr); + dstCandidates = retTypeDesc->GetABIReturnRegs(call->GetUnmanagedCallConv()); + } + else if (varTypeUsesFloatArgReg(registerType)) + { + dstCandidates = RBM_FLOATRET; + } + else if (registerType == TYP_LONG) + { + dstCandidates = RBM_LNGRET; + } + else + { + dstCandidates = RBM_INTRET; + } // First, count reg args // Each register argument corresponds to one source. diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp index 0eba0d6b19bdc..4f3d39c76d3ad 100644 --- a/src/coreclr/jit/lsrabuild.cpp +++ b/src/coreclr/jit/lsrabuild.cpp @@ -78,7 +78,8 @@ RefInfoListNode* RefInfoList::removeListNode(GenTree* node, unsigned multiRegIdx // compiler - The compiler context. // preallocate - The number of nodes to preallocate. // -RefInfoListNodePool::RefInfoListNodePool(Compiler* compiler, unsigned preallocate) : m_compiler(compiler) +RefInfoListNodePool::RefInfoListNodePool(Compiler* compiler, unsigned preallocate) + : m_compiler(compiler) { if (preallocate > 0) { @@ -1155,9 +1156,9 @@ bool LinearScan::buildKillPositionsForNode(GenTree* tree, LsraLocation currentLo #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE if (varTypeIsFloating(varDsc) && !VarSetOps::IsMember(compiler, fpCalleeSaveCandidateVars, varIndex)) - { - continue; - } + { + continue; + } Interval* interval = getIntervalForLocalVar(varIndex); const bool isCallKill = ((killMask == RBM_INT_CALLEE_TRASH) || (killMask == RBM_CALLEE_TRASH)); @@ -2217,7 +2218,7 @@ template void LinearScan::buildIntervals(); // which we will do register allocation. // template -void LinearScan::buildIntervals() +void LinearScan::buildIntervals() { BasicBlock* block; @@ -2473,7 +2474,7 @@ void LinearScan::buildIntervals() assert(isCandidateVar(varDsc)); Interval* interval = getIntervalForLocalVar(varIndex); RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeDummyDef, nullptr, - allRegs(interval->registerType)); + allRegs(interval->registerType)); pos->setRegOptional(true); } JITDUMP("Finished creating dummy definitions\n\n"); @@ -3691,7 +3692,7 @@ void LinearScan::BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, defCandidates = allRegs(type); } #else - defCandidates = allRegs(type); + defCandidates = allRegs(type); #endif // TARGET_X86 RefPosition* def = newRefPosition(varDefInterval, currentLoc + 1, RefTypeDef, storeLoc, defCandidates, index); @@ -3965,114 +3966,114 @@ int LinearScan::BuildReturn(GenTree* tree) else #endif // !defined(TARGET_64BIT) if ((tree->TypeGet() != TYP_VOID) && !op1->isContained()) - { - regMaskTP useCandidates = RBM_NONE; + { + regMaskTP useCandidates = RBM_NONE; #if FEATURE_MULTIREG_RET #ifdef TARGET_ARM64 - if (varTypeIsSIMD(tree) && !op1->IsMultiRegLclVar()) - { - BuildUse(op1, RBM_DOUBLERET); - return 1; - } -#endif // TARGET_ARM64 - - if (varTypeIsStruct(tree)) - { - // op1 has to be either a lclvar or a multi-reg returning call - if ((op1->OperGet() == GT_LCL_VAR) && !op1->IsMultiRegLclVar()) + if (varTypeIsSIMD(tree) && !op1->IsMultiRegLclVar()) { - BuildUse(op1, useCandidates); + BuildUse(op1, RBM_DOUBLERET); + return 1; } - else +#endif // TARGET_ARM64 + + if (varTypeIsStruct(tree)) { - noway_assert(op1->IsMultiRegCall() || (op1->IsMultiRegLclVar() && compiler->lvaEnregMultiRegVars)); + // op1 has to be either a lclvar or a multi-reg returning call + if ((op1->OperGet() == GT_LCL_VAR) && !op1->IsMultiRegLclVar()) + { + BuildUse(op1, useCandidates); + } + else + { + noway_assert(op1->IsMultiRegCall() || (op1->IsMultiRegLclVar() && compiler->lvaEnregMultiRegVars)); - ReturnTypeDesc retTypeDesc = compiler->compRetTypeDesc; - const int srcCount = retTypeDesc.GetReturnRegCount(); - assert(op1->GetMultiRegCount(compiler) == static_cast(srcCount)); + ReturnTypeDesc retTypeDesc = compiler->compRetTypeDesc; + const int srcCount = retTypeDesc.GetReturnRegCount(); + assert(op1->GetMultiRegCount(compiler) == static_cast(srcCount)); - // For any source that's coming from a different register file, we need to ensure that - // we reserve the specific ABI register we need. - bool hasMismatchedRegTypes = false; - if (op1->IsMultiRegLclVar()) - { - for (int i = 0; i < srcCount; i++) + // For any source that's coming from a different register file, we need to ensure that + // we reserve the specific ABI register we need. + bool hasMismatchedRegTypes = false; + if (op1->IsMultiRegLclVar()) { - RegisterType srcType = regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)); - RegisterType dstType = regType(retTypeDesc.GetReturnRegType(i)); - if (srcType != dstType) + for (int i = 0; i < srcCount; i++) { - hasMismatchedRegTypes = true; - regMaskTP dstRegMask = - genRegMask(retTypeDesc.GetABIReturnReg(i, compiler->info.compCallConv)); - - if (varTypeUsesIntReg(dstType)) + RegisterType srcType = regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)); + RegisterType dstType = regType(retTypeDesc.GetReturnRegType(i)); + if (srcType != dstType) { - buildInternalIntRegisterDefForNode(tree, dstRegMask); - } + hasMismatchedRegTypes = true; + regMaskTP dstRegMask = + genRegMask(retTypeDesc.GetABIReturnReg(i, compiler->info.compCallConv)); + + if (varTypeUsesIntReg(dstType)) + { + buildInternalIntRegisterDefForNode(tree, dstRegMask); + } #if defined(TARGET_XARCH) && defined(FEATURE_SIMD) - else if (varTypeUsesMaskReg(dstType)) - { - buildInternalMaskRegisterDefForNode(tree, dstRegMask); - } + else if (varTypeUsesMaskReg(dstType)) + { + buildInternalMaskRegisterDefForNode(tree, dstRegMask); + } #endif // TARGET_XARCH && FEATURE_SIMD - else - { - assert(varTypeUsesFloatReg(dstType)); - buildInternalFloatRegisterDefForNode(tree, dstRegMask); + else + { + assert(varTypeUsesFloatReg(dstType)); + buildInternalFloatRegisterDefForNode(tree, dstRegMask); + } } } } - } - for (int i = 0; i < srcCount; i++) - { - // We will build uses of the type of the operand registers/fields, and the codegen - // for return will move as needed. - if (!hasMismatchedRegTypes || (regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)) == - regType(retTypeDesc.GetReturnRegType(i)))) + for (int i = 0; i < srcCount; i++) { - BuildUse(op1, genRegMask(retTypeDesc.GetABIReturnReg(i, compiler->info.compCallConv)), i); + // We will build uses of the type of the operand registers/fields, and the codegen + // for return will move as needed. + if (!hasMismatchedRegTypes || (regType(op1->AsLclVar()->GetFieldTypeByIndex(compiler, i)) == + regType(retTypeDesc.GetReturnRegType(i)))) + { + BuildUse(op1, genRegMask(retTypeDesc.GetABIReturnReg(i, compiler->info.compCallConv)), i); + } + else + { + BuildUse(op1, RBM_NONE, i); + } } - else + if (hasMismatchedRegTypes) { - BuildUse(op1, RBM_NONE, i); + buildInternalRegisterUses(); } + return srcCount; } - if (hasMismatchedRegTypes) - { - buildInternalRegisterUses(); - } - return srcCount; } - } - else + else #endif // FEATURE_MULTIREG_RET - { - // Non-struct type return - determine useCandidates - switch (tree->TypeGet()) { - case TYP_VOID: - useCandidates = RBM_NONE; - break; - case TYP_FLOAT: - useCandidates = RBM_FLOATRET; - break; - case TYP_DOUBLE: - // We ONLY want the valid double register in the RBM_DOUBLERET mask. - useCandidates = (RBM_DOUBLERET & RBM_ALLDOUBLE); - break; - case TYP_LONG: - useCandidates = RBM_LNGRET; - break; - default: - useCandidates = RBM_INTRET; - break; + // Non-struct type return - determine useCandidates + switch (tree->TypeGet()) + { + case TYP_VOID: + useCandidates = RBM_NONE; + break; + case TYP_FLOAT: + useCandidates = RBM_FLOATRET; + break; + case TYP_DOUBLE: + // We ONLY want the valid double register in the RBM_DOUBLERET mask. + useCandidates = (RBM_DOUBLERET & RBM_ALLDOUBLE); + break; + case TYP_LONG: + useCandidates = RBM_LNGRET; + break; + default: + useCandidates = RBM_INTRET; + break; + } + BuildUse(op1, useCandidates); + return 1; } - BuildUse(op1, useCandidates); - return 1; } - } // No kills or defs. return 0; diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp index 1e7935ee5a215..ad7d25709ee30 100644 --- a/src/coreclr/jit/lsraxarch.cpp +++ b/src/coreclr/jit/lsraxarch.cpp @@ -1182,33 +1182,33 @@ int LinearScan::BuildCall(GenTreeCall* call) else #endif // TARGET_X86 if (hasMultiRegRetVal) - { - assert(retTypeDesc != nullptr); - dstCandidates = retTypeDesc->GetABIReturnRegs(call->GetUnmanagedCallConv()); - assert((int)genCountBits(dstCandidates) == dstCount); - } - else if (varTypeUsesFloatReg(registerType)) - { + { + assert(retTypeDesc != nullptr); + dstCandidates = retTypeDesc->GetABIReturnRegs(call->GetUnmanagedCallConv()); + assert((int)genCountBits(dstCandidates) == dstCount); + } + else if (varTypeUsesFloatReg(registerType)) + { #ifdef TARGET_X86 - // The return value will be on the X87 stack, and we will need to move it. - dstCandidates = allRegs(registerType); + // The return value will be on the X87 stack, and we will need to move it. + dstCandidates = allRegs(registerType); #else // !TARGET_X86 dstCandidates = RBM_FLOATRET; #endif // !TARGET_X86 - } - else - { - assert(varTypeUsesIntReg(registerType)); - - if (registerType == TYP_LONG) - { - dstCandidates = RBM_LNGRET; } else { - dstCandidates = RBM_INTRET; + assert(varTypeUsesIntReg(registerType)); + + if (registerType == TYP_LONG) + { + dstCandidates = RBM_LNGRET; + } + else + { + dstCandidates = RBM_INTRET; + } } - } // number of args to a call = // callRegArgs + (callargs - placeholders, setup, etc) diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index e7f202e21fa73..2144869b1ce40 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -344,7 +344,7 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) // x86: src = float, dst = uint32/int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT)) #endif - ) + ) { oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); } @@ -436,7 +436,7 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) #ifdef TARGET_ARM && !varTypeIsLong(oper->AsCast()->CastOp()) #endif - ) + ) { oper->gtType = TYP_FLOAT; oper->CastToType() = TYP_FLOAT; @@ -2109,8 +2109,8 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call unsigned numArgs = CountArgs(); #ifdef TARGET_X86 -// Compute the maximum number of arguments that can be passed in registers. -// For X86 we handle the varargs and unmanaged calling conventions + // Compute the maximum number of arguments that can be passed in registers. + // For X86 we handle the varargs and unmanaged calling conventions #ifndef UNIX_X86_ABI if (call->gtFlags & GTF_CALL_POP_ARGS) @@ -2513,7 +2513,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call #elif defined(TARGET_X86) || (isStructArg && comp->isTrivialPointerSizedStruct(argSigClass)) #endif - ) + ) { #ifdef TARGET_ARM if (passUsingFloatRegs) @@ -2936,7 +2936,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call // we skip the corresponding floating point register argument intArgRegNum = min(intArgRegNum + size, (unsigned)MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI - // No supported architecture supports partial structs using float registers. + // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); } else @@ -3242,12 +3242,12 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) assert(arg.AbiInfo.GetStackSlotsNumber() == 1); makeOutArgCopy = true; #else // UNIX_AMD64_ABI - // On Unix, structs are always passed by value. - // We only need a copy if we have one of the following: - // - The sizes don't match for a non-lclVar argument. - // - We have a known struct type (e.g. SIMD) that requires multiple registers. - // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not - // actually passed in registers. + // On Unix, structs are always passed by value. + // We only need a copy if we have one of the following: + // - The sizes don't match for a non-lclVar argument. + // - We have a known struct type (e.g. SIMD) that requires multiple registers. + // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not + // actually passed in registers. if (arg.AbiInfo.IsPassedInRegisters()) { if (argObj->OperIs(GT_BLK)) @@ -3332,9 +3332,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) } #ifdef TARGET_AMD64 else if (!argObj->OperIs(GT_LCL_VAR) || !argObj->TypeIs(TYP_SIMD8)) // Handled by lowering. -#else // !TARGET_ARM64 +#else // !TARGET_ARM64 else -#endif // !TARGET_ARM64 +#endif // !TARGET_ARM64 { // TODO-CQ: perform this transformation in lowering instead of here and // avoid marking enregisterable structs DNER. @@ -3965,18 +3965,18 @@ void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, CallArg* arg) if (!opts.MinOpts()) { found = ForEachHbvBitSet(*fgAvailableOutgoingArgTemps, [&](indexType lclNum) { - LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); - ClassLayout* layout = varDsc->GetLayout(); - if (!layout->IsBlockLayout() && (layout->GetClassHandle() == copyBlkClass)) - { - tmp = (unsigned)lclNum; - JITDUMP("reusing outgoing struct arg V%02u\n", tmp); - fgAvailableOutgoingArgTemps->clearBit(lclNum); - return HbvWalk::Abort; - } + LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); + ClassLayout* layout = varDsc->GetLayout(); + if (!layout->IsBlockLayout() && (layout->GetClassHandle() == copyBlkClass)) + { + tmp = (unsigned)lclNum; + JITDUMP("reusing outgoing struct arg V%02u\n", tmp); + fgAvailableOutgoingArgTemps->clearBit(lclNum); + return HbvWalk::Abort; + } - return HbvWalk::Continue; - }) == HbvWalk::Abort; + return HbvWalk::Continue; + }) == HbvWalk::Abort; } // Create the CopyBlk tree and insert it. @@ -4019,7 +4019,7 @@ void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, CallArg* arg) GenTree* argNode = call->gtArgs.MakeTmpArgNode(this, arg); // Change the expression to "(tmp=val),tmp" - argNode = gtNewOperNode(GT_COMMA, argNode->TypeGet(), copyBlk, argNode); + argNode = gtNewOperNode(GT_COMMA, argNode->TypeGet(), copyBlk, argNode); #endif // !FEATURE_FIXED_OUT_ARGS @@ -4520,7 +4520,7 @@ GenTree* Compiler::fgMorphLeafLocal(GenTreeLclVarCommon* lclNode) #if FEATURE_IMPLICIT_BYREFS || varDsc->lvIsLastUseCopyOmissionCandidate #endif - ) + ) { lclNode->gtFlags |= GTF_GLOB_REF; } @@ -4594,7 +4594,7 @@ GenTree* Compiler::fgMorphExpandStackArgForVarArgs(GenTreeLclVarCommon* lclNode) { GenTree* data = lclNode->Data(); argNode = lclNode->TypeIs(TYP_STRUCT) ? gtNewStoreBlkNode(lclNode->GetLayout(this), argAddr, data) - : gtNewStoreIndNode(lclNode->TypeGet(), argAddr, data)->AsIndir(); + : gtNewStoreIndNode(lclNode->TypeGet(), argAddr, data)->AsIndir(); } else if (lclNode->OperIsLocalRead()) { @@ -6317,7 +6317,10 @@ void Compiler::fgValidateIRForTailCall(GenTreeCall* call) }; TailCallIRValidatorVisitor(Compiler* comp, GenTreeCall* tailcall) - : GenTreeVisitor(comp), m_tailcall(tailcall), m_lclNum(BAD_VAR_NUM), m_active(false) + : GenTreeVisitor(comp) + , m_tailcall(tailcall) + , m_lclNum(BAD_VAR_NUM) + , m_active(false) { } @@ -7903,7 +7906,7 @@ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) // [tmp + vtabOffsOfIndirection] GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); - tmpTree1 = gtNewIndir(TYP_I_IMPL, tmpTree1, GTF_IND_NONFAULTING | GTF_IND_INVARIANT); + tmpTree1 = gtNewIndir(TYP_I_IMPL, tmpTree1, GTF_IND_NONFAULTING | GTF_IND_INVARIANT); // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection GenTree* tmpTree2 = @@ -8321,7 +8324,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA #if FEATURE_IMPLICIT_BYREFS || lclDsc->lvIsLastUseCopyOmissionCandidate #endif - ) + ) { tree->AddAllEffectsFlags(GTF_GLOB_REF); } @@ -8566,8 +8569,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA // Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization // #else // TARGET_XARCH - // If this is an unsigned long mod with a constant divisor, - // then don't morph to a helper call - it can be done faster inline using idiv. + // If this is an unsigned long mod with a constant divisor, + // then don't morph to a helper call - it can be done faster inline using idiv. noway_assert(op2); if ((typ == TYP_LONG) && opts.OptimizationEnabled()) @@ -11545,8 +11548,8 @@ GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret) #if defined(TARGET_64BIT) bool canFold = (indSize == lclVarSize); #else // !TARGET_64BIT - // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST - // long<->double` there. + // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST + // long<->double` there. bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES); #endif @@ -12571,12 +12574,12 @@ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac) bool optAssertionPropDone = false; -/*------------------------------------------------------------------------- - * fgMorphTree() can potentially replace a tree with another, and the - * caller has to store the return value correctly. - * Turn this on to always make copy of "tree" here to shake out - * hidden/unupdated references. - */ + /*------------------------------------------------------------------------- + * fgMorphTree() can potentially replace a tree with another, and the + * caller has to store the return value correctly. + * Turn this on to always make copy of "tree" here to shake out + * hidden/unupdated references. + */ #ifdef DEBUG @@ -13499,7 +13502,8 @@ void Compiler::fgMorphStmtBlockOps(BasicBlock* block, Statement* stmt) DoPostOrder = true, }; - Visitor(Compiler* comp) : GenTreeVisitor(comp) + Visitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -14975,7 +14979,8 @@ PhaseStatus Compiler::fgMarkImplicitByRefCopyOmissionCandidates() UseExecutionOrder = true, }; - Visitor(Compiler* comp) : GenTreeVisitor(comp) + Visitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -15187,9 +15192,9 @@ PhaseStatus Compiler::fgRetypeImplicitByRefArgs() { // Insert IR that initializes the temp from the parameter. fgEnsureFirstBBisScratch(); - GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); - GenTree* data = (varDsc->TypeGet() == TYP_STRUCT) ? gtNewBlkIndir(varDsc->GetLayout(), addr) - : gtNewIndir(varDsc->TypeGet(), addr); + GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); + GenTree* data = (varDsc->TypeGet() == TYP_STRUCT) ? gtNewBlkIndir(varDsc->GetLayout(), addr) + : gtNewIndir(varDsc->TypeGet(), addr); GenTree* store = gtNewStoreLclVarNode(newLclNum, data); fgNewStmtAtBeg(fgFirstBB, store); } @@ -15560,7 +15565,10 @@ bool Compiler::fgMorphArrayOpsStmt(MorphMDArrayTempCache* pTempCache, BasicBlock }; MorphMDArrayVisitor(Compiler* compiler, BasicBlock* block, MorphMDArrayTempCache* pTempCache) - : GenTreeVisitor(compiler), m_changed(false), m_block(block), m_pTempCache(pTempCache) + : GenTreeVisitor(compiler) + , m_changed(false) + , m_block(block) + , m_pTempCache(pTempCache) { } diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index d7fa5821eb9db..84c30e64621a2 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -92,7 +92,8 @@ GenTree* MorphInitBlockHelper::MorphInitBlock(Compiler* comp, GenTree* tree) // Most class members are initialized via in-class member initializers. // MorphInitBlockHelper::MorphInitBlockHelper(Compiler* comp, GenTree* store, bool initBlock = true) - : m_comp(comp), m_initBlock(initBlock) + : m_comp(comp) + , m_initBlock(initBlock) { assert(store->OperIsStore()); assert((m_initBlock == store->OperIsInitBlkOp()) && (!m_initBlock == store->OperIsCopyBlkOp())); @@ -530,8 +531,8 @@ GenTree* MorphInitBlockHelper::EliminateCommas(GenTree** commaPool) { *commaPool = nullptr; - GenTree* sideEffects = nullptr; - auto addSideEffect = [&sideEffects](GenTree* sideEff) { + GenTree* sideEffects = nullptr; + auto addSideEffect = [&sideEffects](GenTree* sideEff) { sideEff->gtNext = sideEffects; sideEffects = sideEff; }; @@ -645,7 +646,8 @@ GenTree* MorphCopyBlockHelper::MorphCopyBlock(Compiler* comp, GenTree* tree) // Notes: // Most class members are initialized via in-class member initializers. // -MorphCopyBlockHelper::MorphCopyBlockHelper(Compiler* comp, GenTree* store) : MorphInitBlockHelper(comp, store, false) +MorphCopyBlockHelper::MorphCopyBlockHelper(Compiler* comp, GenTree* store) + : MorphInitBlockHelper(comp, store, false) { } diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index a86039dc33384..0af5f4ba7a992 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -163,7 +163,8 @@ void ObjectAllocator::MarkEscapingVarsAndBuildConnGraph() }; BuildConnGraphVisitor(ObjectAllocator* allocator) - : GenTreeVisitor(allocator->comp), m_allocator(allocator) + : GenTreeVisitor(allocator->comp) + , m_allocator(allocator) { } @@ -504,8 +505,8 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a assert(m_AnalysisDone); const bool shortLifetime = false; - const unsigned int lclNum = comp->lvaGrabTemp(shortLifetime DEBUGARG("MorphAllocObjNodeIntoStackAlloc temp")); - const int unsafeValueClsCheck = true; + const unsigned int lclNum = comp->lvaGrabTemp(shortLifetime DEBUGARG("MorphAllocObjNodeIntoStackAlloc temp")); + const int unsafeValueClsCheck = true; comp->lvaSetStruct(lclNum, allocObj->gtAllocObjClsHnd, unsafeValueClsCheck); // Initialize the object memory if necessary. @@ -766,7 +767,8 @@ void ObjectAllocator::RewriteUses() }; RewriteUsesVisitor(ObjectAllocator* allocator) - : GenTreeVisitor(allocator->comp), m_allocator(allocator) + : GenTreeVisitor(allocator->comp) + , m_allocator(allocator) { } diff --git a/src/coreclr/jit/objectalloc.h b/src/coreclr/jit/objectalloc.h index f4a56cb4ca39d..07307161da002 100644 --- a/src/coreclr/jit/objectalloc.h +++ b/src/coreclr/jit/objectalloc.h @@ -47,21 +47,21 @@ class ObjectAllocator final : public Phase virtual PhaseStatus DoPhase() override; private: - bool CanAllocateLclVarOnStack(unsigned int lclNum, CORINFO_CLASS_HANDLE clsHnd); - bool CanLclVarEscape(unsigned int lclNum); - void MarkLclVarAsPossiblyStackPointing(unsigned int lclNum); - void MarkLclVarAsDefinitelyStackPointing(unsigned int lclNum); - bool MayLclVarPointToStack(unsigned int lclNum); - bool DoesLclVarPointToStack(unsigned int lclNum); - void DoAnalysis(); - void MarkLclVarAsEscaping(unsigned int lclNum); - void MarkEscapingVarsAndBuildConnGraph(); - void AddConnGraphEdge(unsigned int sourceLclNum, unsigned int targetLclNum); - void ComputeEscapingNodes(BitVecTraits* bitVecTraits, BitVec& escapingNodes); - void ComputeStackObjectPointers(BitVecTraits* bitVecTraits); - bool MorphAllocObjNodes(); - void RewriteUses(); - GenTree* MorphAllocObjNodeIntoHelperCall(GenTreeAllocObj* allocObj); + bool CanAllocateLclVarOnStack(unsigned int lclNum, CORINFO_CLASS_HANDLE clsHnd); + bool CanLclVarEscape(unsigned int lclNum); + void MarkLclVarAsPossiblyStackPointing(unsigned int lclNum); + void MarkLclVarAsDefinitelyStackPointing(unsigned int lclNum); + bool MayLclVarPointToStack(unsigned int lclNum); + bool DoesLclVarPointToStack(unsigned int lclNum); + void DoAnalysis(); + void MarkLclVarAsEscaping(unsigned int lclNum); + void MarkEscapingVarsAndBuildConnGraph(); + void AddConnGraphEdge(unsigned int sourceLclNum, unsigned int targetLclNum); + void ComputeEscapingNodes(BitVecTraits* bitVecTraits, BitVec& escapingNodes); + void ComputeStackObjectPointers(BitVecTraits* bitVecTraits); + bool MorphAllocObjNodes(); + void RewriteUses(); + GenTree* MorphAllocObjNodeIntoHelperCall(GenTreeAllocObj* allocObj); unsigned int MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* allocObj, BasicBlock* block, Statement* stmt); struct BuildConnGraphVisitorCallbackData; bool CanLclVarEscapeViaParentStack(ArrayStack* parentStack, unsigned int lclNum); diff --git a/src/coreclr/jit/optcse.cpp b/src/coreclr/jit/optcse.cpp index cb17b65035cd5..acaed299aad42 100644 --- a/src/coreclr/jit/optcse.cpp +++ b/src/coreclr/jit/optcse.cpp @@ -204,7 +204,9 @@ void Compiler::optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData) DoPreOrder = true, }; - MaskDataWalker(Compiler* comp, optCSE_MaskData* maskData) : GenTreeVisitor(comp), m_maskData(maskData) + MaskDataWalker(Compiler* comp, optCSE_MaskData* maskData) + : GenTreeVisitor(comp) + , m_maskData(maskData) { } @@ -396,7 +398,9 @@ void CSEdsc::ComputeNumLocals(Compiler* compiler) }; LocalCountingVisitor(Compiler* compiler) - : GenTreeVisitor(compiler), m_count(0), m_occurrences(0) + : GenTreeVisitor(compiler) + , m_count(0) + , m_occurrences(0) { } @@ -1186,7 +1190,9 @@ class CSE_DataFlow EXPSET_TP m_preMergeOut; public: - CSE_DataFlow(Compiler* pCompiler) : m_comp(pCompiler), m_preMergeOut(BitVecOps::UninitVal()) + CSE_DataFlow(Compiler* pCompiler) + : m_comp(pCompiler) + , m_preMergeOut(BitVecOps::UninitVal()) { } @@ -1742,7 +1748,8 @@ void Compiler::optValnumCSE_Availability() // Notes: // This creates the basic CSE heuristic. It never does any CSEs. // -CSE_HeuristicCommon::CSE_HeuristicCommon(Compiler* pCompiler) : m_pCompiler(pCompiler) +CSE_HeuristicCommon::CSE_HeuristicCommon(Compiler* pCompiler) + : m_pCompiler(pCompiler) { m_addCSEcount = 0; /* Count of the number of LclVars for CSEs that we added */ sortTab = nullptr; @@ -2074,7 +2081,8 @@ void CSE_HeuristicCommon::DumpMetrics() // This creates the random CSE heuristic. It does CSEs randomly, with some // predetermined likelihood (set by config or by stress). // -CSE_HeuristicRandom::CSE_HeuristicRandom(Compiler* pCompiler) : CSE_HeuristicCommon(pCompiler) +CSE_HeuristicRandom::CSE_HeuristicRandom(Compiler* pCompiler) + : CSE_HeuristicCommon(pCompiler) { m_cseRNG.Init(m_pCompiler->info.compMethodHash() ^ JitConfig.JitRandomCSE()); } @@ -2200,7 +2208,8 @@ void CSE_HeuristicRandom::ConsiderCandidates() // This creates the replay CSE heuristic. It does CSEs specifed by // the ArrayConfig parsing of JitReplayCSE. // -CSE_HeuristicReplay::CSE_HeuristicReplay(Compiler* pCompiler) : CSE_HeuristicCommon(pCompiler) +CSE_HeuristicReplay::CSE_HeuristicReplay(Compiler* pCompiler) + : CSE_HeuristicCommon(pCompiler) { } @@ -2292,7 +2301,8 @@ double CSE_HeuristicParameterized::s_defaultParameters[CSE_HeuristicParameterize // Arguments; // pCompiler - compiler instance // -CSE_HeuristicParameterized::CSE_HeuristicParameterized(Compiler* pCompiler) : CSE_HeuristicCommon(pCompiler) +CSE_HeuristicParameterized::CSE_HeuristicParameterized(Compiler* pCompiler) + : CSE_HeuristicCommon(pCompiler) { // Default parameter values... // @@ -2605,7 +2615,7 @@ void CSE_HeuristicParameterized::GetFeatures(CSEdsc* cse, double* features) if (!isLiveAcrossCallLSRA) { unsigned count = 0; - for (BasicBlock *block = minPostorderBlock; + for (BasicBlock* block = minPostorderBlock; block != nullptr && block != maxPostorderBlock && count < blockSpread; block = block->Next(), count++) { if (block->HasFlag(BBF_HAS_CALL)) @@ -2986,7 +2996,10 @@ void CSE_HeuristicParameterized::DumpChoices(ArrayStack& choices, CSEdsc // Uses parameters from JitRLCSE to drive a deterministic greedy policy // CSE_HeuristicRL::CSE_HeuristicRL(Compiler* pCompiler) - : CSE_HeuristicParameterized(pCompiler), m_alpha(0.0), m_updateParameters(false), m_greedy(false) + : CSE_HeuristicParameterized(pCompiler) + , m_alpha(0.0) + , m_updateParameters(false) + , m_greedy(false) { // Set up the random state // @@ -3656,7 +3669,8 @@ CSE_HeuristicRL::Choice* CSE_HeuristicRL::FindChoice(CSEdsc* dsc, ArrayStack& choices); + void BuildChoices(ArrayStack& choices); Choice& ChooseGreedy(ArrayStack& choices, bool recompute); @@ -227,12 +231,12 @@ class CSE_HeuristicRL : public CSE_HeuristicParameterized bool m_updateParameters; bool m_greedy; - Choice& ChooseSoftmax(ArrayStack& choices); - void Softmax(ArrayStack& choices); - void SoftmaxPolicy(); - void UpdateParametersStep(CSEdsc* dsc, ArrayStack& choices, double reward, double* delta); - void UpdateParameters(); - Choice* FindChoice(CSEdsc* dsc, ArrayStack& choices); + Choice& ChooseSoftmax(ArrayStack& choices); + void Softmax(ArrayStack& choices); + void SoftmaxPolicy(); + void UpdateParametersStep(CSEdsc* dsc, ArrayStack& choices, double reward, double* delta); + void UpdateParameters(); + Choice* FindChoice(CSEdsc* dsc, ArrayStack& choices); const char* Name() const; public: diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index d456cb3793f5e..1e5d5a00b107c 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -74,10 +74,10 @@ class OptBoolsDsc private: Statement* optOptimizeBoolsChkBlkCond(); - GenTree* optIsBoolComp(OptTestInfo* pOptTest); - bool optOptimizeBoolsChkTypeCostCond(); - void optOptimizeBoolsUpdateTrees(); - bool FindCompareChain(GenTree* condition, bool* isTestCondition); + GenTree* optIsBoolComp(OptTestInfo* pOptTest); + bool optOptimizeBoolsChkTypeCostCond(); + void optOptimizeBoolsUpdateTrees(); + bool FindCompareChain(GenTree* condition, bool* isTestCondition); }; //----------------------------------------------------------------------------- diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index c7760c4241c36..289e37b16fc4e 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -37,7 +37,8 @@ void Compiler::optInit() optCSEunmarks = 0; } -DataFlow::DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler) +DataFlow::DataFlow(Compiler* pCompiler) + : m_pCompiler(pCompiler) { } @@ -889,7 +890,7 @@ bool Compiler::optComputeLoopRep(int constInit, switch (iterOperType) { -// For small types, the iteration operator will narrow these values if big + // For small types, the iteration operator will narrow these values if big #define INIT_ITER_BY_TYPE(type) \ constInitX = (type)constInit; \ @@ -908,7 +909,7 @@ bool Compiler::optComputeLoopRep(int constInit, INIT_ITER_BY_TYPE(unsigned short); break; - // For the big types, 32 bit arithmetic is performed + // For the big types, 32 bit arithmetic is performed case TYP_INT: if (unsTest) @@ -1795,7 +1796,9 @@ void Compiler::optReplaceScalarUsesWithConst(BasicBlock* block, unsigned lclNum, bool MadeChanges = false; ReplaceVisitor(Compiler* comp, unsigned lclNum, ssize_t cnsVal) - : GenTreeVisitor(comp), m_lclNum(lclNum), m_cnsVal(cnsVal) + : GenTreeVisitor(comp) + , m_lclNum(lclNum) + , m_cnsVal(cnsVal) { } @@ -1841,7 +1844,8 @@ Compiler::OptInvertCountTreeInfoType Compiler::optInvertCountTreeInfo(GenTree* t Compiler::OptInvertCountTreeInfoType Result = {}; - CountTreeInfoVisitor(Compiler* comp) : GenTreeVisitor(comp) + CountTreeInfoVisitor(Compiler* comp) + : GenTreeVisitor(comp) { } @@ -3516,8 +3520,8 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu return true; - /* Operands that are in memory can usually be narrowed - simply by changing their gtType */ + /* Operands that are in memory can usually be narrowed + simply by changing their gtType */ case GT_LCL_VAR: /* We only allow narrowing long -> int for a GT_LCL_VAR */ @@ -3775,7 +3779,8 @@ void Compiler::optRecordSsaUses(GenTree* tree, BasicBlock* block) }; SsaRecordingVisitor(Compiler* compiler, BasicBlock* block) - : GenTreeVisitor(compiler), m_block(block) + : GenTreeVisitor(compiler) + , m_block(block) { } @@ -4612,7 +4617,11 @@ void Compiler::optHoistLoopBlocks(FlowGraphNaturalLoop* loop, const char* m_failReason; #endif - Value(GenTree* node) : m_node(node), m_hoistable(false), m_cctorDependent(false), m_invariant(false) + Value(GenTree* node) + : m_node(node) + , m_hoistable(false) + , m_cctorDependent(false) + , m_invariant(false) { #ifdef DEBUG m_failReason = "unset"; @@ -4812,9 +4821,9 @@ void Compiler::optHoistLoopBlocks(FlowGraphNaturalLoop* loop, // To be invariant the variable must be in SSA ... bool isInvariant = lclVar->HasSsaName(); // and the SSA definition must be outside the loop we're hoisting from ... - isInvariant = isInvariant && - !m_loop->ContainsBlock( - m_compiler->lvaGetDesc(lclNum)->GetPerSsaData(lclVar->GetSsaNum())->GetBlock()); + isInvariant = + isInvariant && !m_loop->ContainsBlock( + m_compiler->lvaGetDesc(lclNum)->GetPerSsaData(lclVar->GetSsaNum())->GetBlock()); // and the VN of the tree is considered invariant as well. // @@ -5467,7 +5476,9 @@ PhaseStatus Compiler::fgCanonicalizeFirstBB() return PhaseStatus::MODIFIED_EVERYTHING; } -LoopSideEffects::LoopSideEffects() : VarInOut(VarSetOps::UninitVal()), VarUseDef(VarSetOps::UninitVal()) +LoopSideEffects::LoopSideEffects() + : VarInOut(VarSetOps::UninitVal()) + , VarUseDef(VarSetOps::UninitVal()) { for (MemoryKind mk : allMemoryKinds()) { diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index 5a27367906774..71622ecfc3d75 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -34,7 +34,9 @@ class PatchpointTransformer Compiler* compiler; public: - PatchpointTransformer(Compiler* compiler) : ppCounterLclNum(BAD_VAR_NUM), compiler(compiler) + PatchpointTransformer(Compiler* compiler) + : ppCounterLclNum(BAD_VAR_NUM) + , compiler(compiler) { } diff --git a/src/coreclr/jit/phase.h b/src/coreclr/jit/phase.h index 6288d596729da..0f3d461c2b13f 100644 --- a/src/coreclr/jit/phase.h +++ b/src/coreclr/jit/phase.h @@ -34,14 +34,17 @@ class Phase virtual void Run(); protected: - Phase(Compiler* _compiler, Phases _phase) : comp(_compiler), m_name(nullptr), m_phase(_phase) + Phase(Compiler* _compiler, Phases _phase) + : comp(_compiler) + , m_name(nullptr) + , m_phase(_phase) { m_name = PhaseNames[_phase]; } virtual void PrePhase(); virtual PhaseStatus DoPhase() = 0; - virtual void PostPhase(PhaseStatus status); + virtual void PostPhase(PhaseStatus status); Compiler* comp; const char* m_name; @@ -54,7 +57,9 @@ template class ActionPhase final : public Phase { public: - ActionPhase(Compiler* _compiler, Phases _phase, A _action) : Phase(_compiler, _phase), action(_action) + ActionPhase(Compiler* _compiler, Phases _phase, A _action) + : Phase(_compiler, _phase) + , action(_action) { } @@ -84,7 +89,8 @@ class CompilerPhase final : public Phase { public: CompilerPhase(Compiler* _compiler, Phases _phase, void (Compiler::*_action)()) - : Phase(_compiler, _phase), action(_action) + : Phase(_compiler, _phase) + , action(_action) { } @@ -114,7 +120,8 @@ class CompilerPhaseWithStatus final : public Phase { public: CompilerPhaseWithStatus(Compiler* _compiler, Phases _phase, PhaseStatus (Compiler::*_action)()) - : Phase(_compiler, _phase), action(_action) + : Phase(_compiler, _phase) + , action(_action) { } diff --git a/src/coreclr/jit/promotion.cpp b/src/coreclr/jit/promotion.cpp index 2f7b1e0b31372..e02a5f0e06bab 100644 --- a/src/coreclr/jit/promotion.cpp +++ b/src/coreclr/jit/promotion.cpp @@ -80,7 +80,9 @@ struct Access #endif Access(unsigned offset, var_types accessType, ClassLayout* layout) - : Layout(layout), Offset(offset), AccessType(accessType) + : Layout(layout) + , Offset(offset) + , AccessType(accessType) { } @@ -220,7 +222,8 @@ bool AggregateInfo::OverlappingReplacements(unsigned offset, // numLocals - Number of locals to support in the map // AggregateInfoMap::AggregateInfoMap(CompAllocator allocator, unsigned numLocals) - : m_aggregates(allocator), m_numLocals(numLocals) + : m_aggregates(allocator) + , m_numLocals(numLocals) { m_lclNumToAggregateIndex = new (allocator) unsigned[numLocals]; for (unsigned i = 0; i < numLocals; i++) @@ -277,7 +280,9 @@ struct PrimitiveAccess unsigned Offset; var_types AccessType; - PrimitiveAccess(unsigned offset, var_types accessType) : Offset(offset), AccessType(accessType) + PrimitiveAccess(unsigned offset, var_types accessType) + : Offset(offset) + , AccessType(accessType) { } }; @@ -290,7 +295,8 @@ class LocalUses public: LocalUses(Compiler* comp) - : m_accesses(comp->getAllocator(CMK_Promotion)), m_inducedAccesses(comp->getAllocator(CMK_Promotion)) + : m_accesses(comp->getAllocator(CMK_Promotion)) + , m_inducedAccesses(comp->getAllocator(CMK_Promotion)) { } @@ -973,7 +979,7 @@ class LocalsUseVisitor : public GenTreeVisitor , m_prom(prom) , m_candidateStores(prom->m_compiler->getAllocator(CMK_Promotion)) { - m_uses = new (prom->m_compiler, CMK_Promotion) LocalUses*[prom->m_compiler->lvaCount]{}; + m_uses = new (prom->m_compiler, CMK_Promotion) LocalUses* [prom->m_compiler->lvaCount] {}; } //------------------------------------------------------------------------ @@ -2269,7 +2275,9 @@ void ReplaceVisitor::InsertPreStatementWriteBacks() DoPreOrder = true, }; - Visitor(Compiler* comp, ReplaceVisitor* replacer) : GenTreeVisitor(comp), m_replacer(replacer) + Visitor(Compiler* comp, ReplaceVisitor* replacer) + : GenTreeVisitor(comp) + , m_replacer(replacer) { } @@ -2716,8 +2724,8 @@ void ReplaceVisitor::WriteBackBeforeUse(GenTree** use, unsigned lcl, unsigned of GenTreeOp* comma = m_compiler->gtNewOperNode(GT_COMMA, (*use)->TypeGet(), Promotion::CreateWriteBack(m_compiler, lcl, rep), *use); - *use = comma; - use = &comma->gtOp2; + *use = comma; + use = &comma->gtOp2; ClearNeedsWriteBack(rep); m_madeChanges = true; diff --git a/src/coreclr/jit/promotion.h b/src/coreclr/jit/promotion.h index c421b019bc8f9..89097d78cd106 100644 --- a/src/coreclr/jit/promotion.h +++ b/src/coreclr/jit/promotion.h @@ -31,7 +31,9 @@ struct Replacement const char* Description = ""; #endif - Replacement(unsigned offset, var_types accessType) : Offset(offset), AccessType(accessType) + Replacement(unsigned offset, var_types accessType) + : Offset(offset) + , AccessType(accessType) { } @@ -55,7 +57,9 @@ class StructSegments { } - Segment(unsigned start, unsigned end) : Start(start), End(end) + Segment(unsigned start, unsigned end) + : Start(start) + , End(end) { } @@ -69,7 +73,8 @@ class StructSegments jitstd::vector m_segments; public: - explicit StructSegments(CompAllocator allocator) : m_segments(allocator) + explicit StructSegments(CompAllocator allocator) + : m_segments(allocator) { } @@ -96,7 +101,10 @@ struct AggregateInfo // Max offset in the struct local of the unpromoted part. unsigned UnpromotedMax = 0; - AggregateInfo(CompAllocator alloc, unsigned lclNum) : Replacements(alloc), LclNum(lclNum), Unpromoted(alloc) + AggregateInfo(CompAllocator alloc, unsigned lclNum) + : Replacements(alloc) + , LclNum(lclNum) + , Unpromoted(alloc) { } @@ -115,7 +123,7 @@ class AggregateInfoMap public: AggregateInfoMap(CompAllocator allocator, unsigned numLocals); - void Add(AggregateInfo* agg); + void Add(AggregateInfo* agg); AggregateInfo* Lookup(unsigned lclNum); jitstd::vector::iterator begin() @@ -146,10 +154,10 @@ class Promotion StructSegments SignificantSegments(ClassLayout* layout); - void ExplicitlyZeroInitReplacementLocals(unsigned lclNum, - const jitstd::vector& replacements, - Statement** prevStmt); - void InsertInitStatement(Statement** prevStmt, GenTree* tree); + void ExplicitlyZeroInitReplacementLocals(unsigned lclNum, + const jitstd::vector& replacements, + Statement** prevStmt); + void InsertInitStatement(Statement** prevStmt, GenTree* tree); static GenTree* CreateWriteBack(Compiler* compiler, unsigned structLclNum, const Replacement& replacement); static GenTree* CreateReadBack(Compiler* compiler, unsigned structLclNum, const Replacement& replacement); @@ -198,11 +206,12 @@ class Promotion bool HaveCandidateLocals(); - static bool IsCandidateForPhysicalPromotion(LclVarDsc* dsc); + static bool IsCandidateForPhysicalPromotion(LclVarDsc* dsc); static GenTree* EffectiveUser(Compiler::GenTreeStack& ancestors); public: - explicit Promotion(Compiler* compiler) : m_compiler(compiler) + explicit Promotion(Compiler* compiler) + : m_compiler(compiler) { } @@ -218,12 +227,15 @@ class StructDeaths friend class PromotionLiveness; private: - StructDeaths(BitVec deaths, AggregateInfo* agg) : m_deaths(deaths), m_aggregate(agg) + StructDeaths(BitVec deaths, AggregateInfo* agg) + : m_deaths(deaths) + , m_aggregate(agg) { } public: - StructDeaths() : m_deaths(BitVecOps::UninitVal()) + StructDeaths() + : m_deaths(BitVecOps::UninitVal()) { } @@ -236,26 +248,28 @@ struct BasicBlockLiveness; // Class to compute and track liveness information pertaining promoted structs. class PromotionLiveness { - Compiler* m_compiler; - AggregateInfoMap& m_aggregates; - BitVecTraits* m_bvTraits = nullptr; - unsigned* m_structLclToTrackedIndex = nullptr; - unsigned m_numVars = 0; - BasicBlockLiveness* m_bbInfo = nullptr; - bool m_hasPossibleBackEdge = false; - BitVec m_liveIn; - BitVec m_ehLiveVars; + Compiler* m_compiler; + AggregateInfoMap& m_aggregates; + BitVecTraits* m_bvTraits = nullptr; + unsigned* m_structLclToTrackedIndex = nullptr; + unsigned m_numVars = 0; + BasicBlockLiveness* m_bbInfo = nullptr; + bool m_hasPossibleBackEdge = false; + BitVec m_liveIn; + BitVec m_ehLiveVars; JitHashTable, BitVec> m_aggDeaths; public: PromotionLiveness(Compiler* compiler, AggregateInfoMap& aggregates) - : m_compiler(compiler), m_aggregates(aggregates), m_aggDeaths(compiler->getAllocator(CMK_Promotion)) + : m_compiler(compiler) + , m_aggregates(aggregates) + , m_aggDeaths(compiler->getAllocator(CMK_Promotion)) { } - void Run(); - bool IsReplacementLiveIn(BasicBlock* bb, unsigned structLcl, unsigned replacement); - bool IsReplacementLiveOut(BasicBlock* bb, unsigned structLcl, unsigned replacement); + void Run(); + bool IsReplacementLiveIn(BasicBlock* bb, unsigned structLcl, unsigned replacement); + bool IsReplacementLiveOut(BasicBlock* bb, unsigned structLcl, unsigned replacement); StructDeaths GetDeathsForStructLocal(GenTreeLclVarCommon* use); private: @@ -297,7 +311,10 @@ class ReplaceVisitor : public GenTreeVisitor }; ReplaceVisitor(Promotion* prom, AggregateInfoMap& aggregates, PromotionLiveness* liveness) - : GenTreeVisitor(prom->m_compiler), m_promotion(prom), m_aggregates(aggregates), m_liveness(liveness) + : GenTreeVisitor(prom->m_compiler) + , m_promotion(prom) + , m_aggregates(aggregates) + , m_liveness(liveness) { } diff --git a/src/coreclr/jit/promotiondecomposition.cpp b/src/coreclr/jit/promotiondecomposition.cpp index 18ac84c58e4f2..d4f71b9983520 100644 --- a/src/coreclr/jit/promotiondecomposition.cpp +++ b/src/coreclr/jit/promotiondecomposition.cpp @@ -275,7 +275,9 @@ class DecompositionPlan var_types PrimitiveType; RemainderStrategy(int type, unsigned primitiveOffset = 0, var_types primitiveType = TYP_UNDEF) - : Type(type), PrimitiveOffset(primitiveOffset), PrimitiveType(primitiveType) + : Type(type) + , PrimitiveOffset(primitiveOffset) + , PrimitiveType(primitiveType) { } }; @@ -727,8 +729,8 @@ class DecompositionPlan // remainderStrategy - The strategy we are using for the remainder // dump - Whether to JITDUMP decisions made // - bool CanSkipEntry(const Entry& entry, - const StructDeaths& deaths, + bool CanSkipEntry(const Entry& entry, + const StructDeaths& deaths, const RemainderStrategy& remainderStrategy DEBUGARG(bool dump = false)) { if (entry.ToReplacement != nullptr) diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index 475df2d659cab..eae6d62793583 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -461,7 +461,9 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTree* expr, bool rejectNegativeCon } // Remove hashtable entry for expr when we exit the present scope. - auto code = [this, expr] { m_pSearchPath->Remove(expr); }; + auto code = [this, expr] { + m_pSearchPath->Remove(expr); + }; jitstd::utility::scoped_code finally(code); if (m_pSearchPath->GetCount() > MAX_SEARCH_DEPTH) @@ -1123,7 +1125,7 @@ Range RangeCheck::GetRangeFromType(var_types type) // Compute the range for a local var definition. Range RangeCheck::ComputeRangeForLocalDef(BasicBlock* block, GenTreeLclVarCommon* lcl, - bool monIncreasing DEBUGARG(int indent)) + bool monIncreasing DEBUGARG(int indent)) { LclSsaVarDsc* ssaDef = GetSsaDefStore(lcl); if (ssaDef == nullptr) @@ -1566,7 +1568,10 @@ struct MapMethodDefsData BasicBlock* block; Statement* stmt; - MapMethodDefsData(RangeCheck* rc, BasicBlock* block, Statement* stmt) : rc(rc), block(block), stmt(stmt) + MapMethodDefsData(RangeCheck* rc, BasicBlock* block, Statement* stmt) + : rc(rc) + , block(block) + , stmt(stmt) { } }; diff --git a/src/coreclr/jit/rangecheck.h b/src/coreclr/jit/rangecheck.h index 098e1cc62b0d7..cd4193f1e2fb7 100644 --- a/src/coreclr/jit/rangecheck.h +++ b/src/coreclr/jit/rangecheck.h @@ -83,20 +83,28 @@ struct Limit keUnknown, // The limit could not be determined. }; - Limit() : type(keUndef) + Limit() + : type(keUndef) { } - Limit(LimitType type) : type(type) + Limit(LimitType type) + : type(type) { } - Limit(LimitType type, int cns) : cns(cns), vn(ValueNumStore::NoVN), type(type) + Limit(LimitType type, int cns) + : cns(cns) + , vn(ValueNumStore::NoVN) + , type(type) { assert(type == keConstant); } - Limit(LimitType type, ValueNum vn, int cns) : cns(cns), vn(vn), type(type) + Limit(LimitType type, ValueNum vn, int cns) + : cns(cns) + , vn(vn) + , type(type) { assert(type == keBinOpArray); } @@ -242,11 +250,15 @@ struct Range Limit uLimit; Limit lLimit; - Range(const Limit& limit) : uLimit(limit), lLimit(limit) + Range(const Limit& limit) + : uLimit(limit) + , lLimit(limit) { } - Range(const Limit& lLimit, const Limit& uLimit) : uLimit(uLimit), lLimit(lLimit) + Range(const Limit& lLimit, const Limit& uLimit) + : uLimit(uLimit) + , lLimit(lLimit) { } @@ -586,7 +598,10 @@ class RangeCheck BasicBlock* block; Statement* stmt; GenTreeLclVarCommon* tree; - Location(BasicBlock* block, Statement* stmt, GenTreeLclVarCommon* tree) : block(block), stmt(stmt), tree(tree) + Location(BasicBlock* block, Statement* stmt, GenTreeLclVarCommon* tree) + : block(block) + , stmt(stmt) + , tree(tree) { } diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp index cb54b617a6a9c..d9b69b8df5aa2 100644 --- a/src/coreclr/jit/rationalize.cpp +++ b/src/coreclr/jit/rationalize.cpp @@ -383,7 +383,8 @@ PhaseStatus Rationalizer::DoPhase() }; RationalizeVisitor(Rationalizer& rationalizer) - : GenTreeVisitor(rationalizer.comp), m_rationalizer(rationalizer) + : GenTreeVisitor(rationalizer.comp) + , m_rationalizer(rationalizer) { } diff --git a/src/coreclr/jit/rationalize.h b/src/coreclr/jit/rationalize.h index 65264f8294582..a8651b2e5b8c7 100644 --- a/src/coreclr/jit/rationalize.h +++ b/src/coreclr/jit/rationalize.h @@ -55,7 +55,8 @@ class Rationalizer final : public Phase Compiler::fgWalkResult RewriteNode(GenTree** useEdge, Compiler::GenTreeStack& parents); }; -inline Rationalizer::Rationalizer(Compiler* _comp) : Phase(_comp, PHASE_RATIONALIZE) +inline Rationalizer::Rationalizer(Compiler* _comp) + : Phase(_comp, PHASE_RATIONALIZE) { } diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index e8b346faccc37..e7569e86c2ed3 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -24,7 +24,9 @@ PhaseStatus Compiler::optRedundantBranches() public: bool madeChanges; - OptRedundantBranchesDomTreeVisitor(Compiler* compiler) : DomTreeVisitor(compiler), madeChanges(false) + OptRedundantBranchesDomTreeVisitor(Compiler* compiler) + : DomTreeVisitor(compiler) + , madeChanges(false) { } diff --git a/src/coreclr/jit/regset.cpp b/src/coreclr/jit/regset.cpp index efec31a78f5bd..5f5c80a4a19d6 100644 --- a/src/coreclr/jit/regset.cpp +++ b/src/coreclr/jit/regset.cpp @@ -233,7 +233,9 @@ void RegSet::SetMaskVars(regMaskTP newMaskVars) /*****************************************************************************/ -RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) : m_rsCompiler(compiler), m_rsGCInfo(gcInfo) +RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) + : m_rsCompiler(compiler) + , m_rsGCInfo(gcInfo) { /* Initialize the spill logic */ @@ -425,9 +427,9 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) #if defined(TARGET_X86) /***************************************************************************** -* -* Spill the top of the FP x87 stack. -*/ + * + * Spill the top of the FP x87 stack. + */ void RegSet::rsSpillFPStack(GenTreeCall* call) { SpillDsc* spill; diff --git a/src/coreclr/jit/regset.h b/src/coreclr/jit/regset.h index 73eb08aa943eb..0924c410e3b85 100644 --- a/src/coreclr/jit/regset.h +++ b/src/coreclr/jit/regset.h @@ -58,7 +58,7 @@ class RegSet TempDsc* spillTemp; // the temp holding the spilled value static SpillDsc* alloc(Compiler* pComp, RegSet* regSet, var_types type); - static void freeDsc(RegSet* regSet, SpillDsc* spillDsc); + static void freeDsc(RegSet* regSet, SpillDsc* spillDsc); }; //------------------------------------------------------------------------- @@ -179,14 +179,14 @@ class RegSet }; static var_types tmpNormalizeType(var_types type); - TempDsc* tmpGetTemp(var_types type); // get temp for the given type - void tmpRlsTemp(TempDsc* temp); - TempDsc* tmpFindNum(int temp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const; + TempDsc* tmpGetTemp(var_types type); // get temp for the given type + void tmpRlsTemp(TempDsc* temp); + TempDsc* tmpFindNum(int temp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const; void tmpEnd(); TempDsc* tmpListBeg(TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const; TempDsc* tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const; - void tmpDone(); + void tmpDone(); #ifdef DEBUG bool tmpAllFree() const; diff --git a/src/coreclr/jit/scev.cpp b/src/coreclr/jit/scev.cpp index 5819b56bdfd3a..491ee4ab06f04 100644 --- a/src/coreclr/jit/scev.cpp +++ b/src/coreclr/jit/scev.cpp @@ -206,7 +206,9 @@ void Scev::Dump(Compiler* comp) // ResetForLoop. // ScalarEvolutionContext::ScalarEvolutionContext(Compiler* comp) - : m_comp(comp), m_cache(comp->getAllocator(CMK_LoopIVOpts)), m_ephemeralCache(comp->getAllocator(CMK_LoopIVOpts)) + : m_comp(comp) + , m_cache(comp->getAllocator(CMK_LoopIVOpts)) + , m_ephemeralCache(comp->getAllocator(CMK_LoopIVOpts)) { } @@ -967,8 +969,8 @@ Scev* ScalarEvolutionContext::Simplify(Scev* scev) ScevAddRec* addRec = (ScevAddRec*)op1; Scev* newStart = Simplify(NewBinop(binop->Oper, addRec->Start, op2)); Scev* newStep = scev->OperIs(ScevOper::Mul, ScevOper::Lsh) - ? Simplify(NewBinop(binop->Oper, addRec->Step, op2)) - : addRec->Step; + ? Simplify(NewBinop(binop->Oper, addRec->Step, op2)) + : addRec->Step; return NewAddRec(newStart, newStep); } diff --git a/src/coreclr/jit/scev.h b/src/coreclr/jit/scev.h index 0800be905503a..1aab39e3d3a5d 100644 --- a/src/coreclr/jit/scev.h +++ b/src/coreclr/jit/scev.h @@ -48,7 +48,9 @@ struct Scev const ScevOper Oper; const var_types Type; - Scev(ScevOper oper, var_types type) : Oper(oper), Type(type) + Scev(ScevOper oper, var_types type) + : Oper(oper) + , Type(type) { } @@ -74,7 +76,9 @@ struct Scev struct ScevConstant : Scev { - ScevConstant(var_types type, int64_t value) : Scev(ScevOper::Constant, type), Value(value) + ScevConstant(var_types type, int64_t value) + : Scev(ScevOper::Constant, type) + , Value(value) { } @@ -84,7 +88,9 @@ struct ScevConstant : Scev struct ScevLocal : Scev { ScevLocal(var_types type, unsigned lclNum, unsigned ssaNum) - : Scev(ScevOper::Local, type), LclNum(lclNum), SsaNum(ssaNum) + : Scev(ScevOper::Local, type) + , LclNum(lclNum) + , SsaNum(ssaNum) { } @@ -96,7 +102,9 @@ struct ScevLocal : Scev struct ScevUnop : Scev { - ScevUnop(ScevOper oper, var_types type, Scev* op1) : Scev(oper, type), Op1(op1) + ScevUnop(ScevOper oper, var_types type, Scev* op1) + : Scev(oper, type) + , Op1(op1) { } @@ -105,7 +113,9 @@ struct ScevUnop : Scev struct ScevBinop : ScevUnop { - ScevBinop(ScevOper oper, var_types type, Scev* op1, Scev* op2) : ScevUnop(oper, type, op1), Op2(op2) + ScevBinop(ScevOper oper, var_types type, Scev* op1, Scev* op2) + : ScevUnop(oper, type, op1) + , Op2(op2) { } @@ -118,7 +128,9 @@ struct ScevBinop : ScevUnop struct ScevAddRec : Scev { ScevAddRec(var_types type, Scev* start, Scev* step DEBUGARG(FlowGraphNaturalLoop* loop)) - : Scev(ScevOper::AddRec, type), Start(start), Step(step) DEBUGARG(Loop(loop)) + : Scev(ScevOper::AddRec, type) + , Start(start) + , Step(step) DEBUGARG(Loop(loop)) { } @@ -204,7 +216,7 @@ class ScalarEvolutionContext Scev* MakeAddRecFromRecursiveScev(Scev* start, Scev* scev, Scev* recursiveScev); Scev* CreateSimpleInvariantScev(GenTree* tree); Scev* CreateScevForConstant(GenTreeIntConCommon* tree); - void ExtractAddOperands(ScevBinop* add, ArrayStack& operands); + void ExtractAddOperands(ScevBinop* add, ArrayStack& operands); public: ScalarEvolutionContext(Compiler* comp); @@ -212,10 +224,10 @@ class ScalarEvolutionContext void ResetForLoop(FlowGraphNaturalLoop* loop); ScevConstant* NewConstant(var_types type, int64_t value); - ScevLocal* NewLocal(unsigned lclNum, unsigned ssaNum); - ScevUnop* NewExtension(ScevOper oper, var_types targetType, Scev* op); - ScevBinop* NewBinop(ScevOper oper, Scev* op1, Scev* op2); - ScevAddRec* NewAddRec(Scev* start, Scev* step); + ScevLocal* NewLocal(unsigned lclNum, unsigned ssaNum); + ScevUnop* NewExtension(ScevOper oper, var_types targetType, Scev* op); + ScevBinop* NewBinop(ScevOper oper, Scev* op1, Scev* op2); + ScevAddRec* NewAddRec(Scev* start, Scev* step); Scev* Analyze(BasicBlock* block, GenTree* tree); Scev* Simplify(Scev* scev); diff --git a/src/coreclr/jit/scopeinfo.cpp b/src/coreclr/jit/scopeinfo.cpp index 7a1290f9ac785..ddb766e94a0de 100644 --- a/src/coreclr/jit/scopeinfo.cpp +++ b/src/coreclr/jit/scopeinfo.cpp @@ -790,11 +790,9 @@ void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::startLiveRang else { JITDUMP("Debug: New V%02u debug range: %s\n", m_varNum, - m_VariableLiveRanges->empty() - ? "first" - : siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) - ? "new var or location" - : "not adjacent"); + m_VariableLiveRanges->empty() ? "first" + : siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) ? "new var or location" + : "not adjacent"); // Creates new live range with invalid end m_VariableLiveRanges->emplace_back(varLocation, emitLocation(), emitLocation()); m_VariableLiveRanges->back().m_StartEmitLocation.CaptureLocation(emit); @@ -1685,9 +1683,9 @@ NATIVE_OFFSET CodeGen::psiGetVarStackOffset(const LclVarDsc* lclVarDsc) const } /*============================================================================ -* INTERFACE (public) Functions for PrologScopeInfo -*============================================================================ -*/ + * INTERFACE (public) Functions for PrologScopeInfo + *============================================================================ + */ //------------------------------------------------------------------------ // psiBegProlog: Initializes the PrologScopeInfo creating open psiScopes or diff --git a/src/coreclr/jit/sideeffects.cpp b/src/coreclr/jit/sideeffects.cpp index e39bf596c4770..4a9b1899b24b8 100644 --- a/src/coreclr/jit/sideeffects.cpp +++ b/src/coreclr/jit/sideeffects.cpp @@ -8,7 +8,10 @@ #include "sideeffects.h" -LclVarSet::LclVarSet() : m_bitVector(nullptr), m_hasAnyLcl(false), m_hasBitVector(false) +LclVarSet::LclVarSet() + : m_bitVector(nullptr) + , m_hasAnyLcl(false) + , m_hasBitVector(false) { } @@ -121,7 +124,10 @@ void LclVarSet::Clear() } AliasSet::AliasSet() - : m_lclVarReads(), m_lclVarWrites(), m_readsAddressableLocation(false), m_writesAddressableLocation(false) + : m_lclVarReads() + , m_lclVarWrites() + , m_readsAddressableLocation(false) + , m_writesAddressableLocation(false) { } @@ -136,7 +142,11 @@ AliasSet::AliasSet() // node - The node in question. // AliasSet::NodeInfo::NodeInfo(Compiler* compiler, GenTree* node) - : m_compiler(compiler), m_node(node), m_flags(0), m_lclNum(0), m_lclOffs(0) + : m_compiler(compiler) + , m_node(node) + , m_flags(0) + , m_lclNum(0) + , m_lclOffs(0) { if (node->IsCall()) { @@ -444,7 +454,9 @@ void AliasSet::Clear() m_lclVarWrites.Clear(); } -SideEffectSet::SideEffectSet() : m_sideEffectFlags(0), m_aliasSet() +SideEffectSet::SideEffectSet() + : m_sideEffectFlags(0) + , m_aliasSet() { } @@ -460,7 +472,9 @@ SideEffectSet::SideEffectSet() : m_sideEffectFlags(0), m_aliasSet() // compiler - The compiler context. // node - The node to use for initialization. // -SideEffectSet::SideEffectSet(Compiler* compiler, GenTree* node) : m_sideEffectFlags(0), m_aliasSet() +SideEffectSet::SideEffectSet(Compiler* compiler, GenTree* node) + : m_sideEffectFlags(0) + , m_aliasSet() { AddNode(compiler, node); } diff --git a/src/coreclr/jit/sideeffects.h b/src/coreclr/jit/sideeffects.h index d94622d9f0ca8..0fef277532cf1 100644 --- a/src/coreclr/jit/sideeffects.h +++ b/src/coreclr/jit/sideeffects.h @@ -13,7 +13,8 @@ // class LclVarSet final { - union { + union + { hashBv* m_bitVector; unsigned m_lclNum; }; diff --git a/src/coreclr/jit/simd.h b/src/coreclr/jit/simd.h index aec72eaab542e..3a5311aaaa79d 100644 --- a/src/coreclr/jit/simd.h +++ b/src/coreclr/jit/simd.h @@ -6,7 +6,8 @@ struct simd8_t { - union { + union + { float f32[2]; double f64[1]; int8_t i8[8]; @@ -58,7 +59,8 @@ static_assert_no_msg(sizeof(simd8_t) == 8); #include struct simd12_t { - union { + union + { float f32[3]; int8_t i8[12]; int16_t i16[6]; @@ -116,7 +118,8 @@ static_assert_no_msg(sizeof(simd12_t) == 12); struct simd16_t { - union { + union + { float f32[4]; double f64[2]; int8_t i8[16]; @@ -170,7 +173,8 @@ static_assert_no_msg(sizeof(simd16_t) == 16); #if defined(TARGET_XARCH) struct simd32_t { - union { + union + { float f32[8]; double f64[4]; int8_t i8[32]; @@ -224,7 +228,8 @@ static_assert_no_msg(sizeof(simd32_t) == 32); struct simd64_t { - union { + union + { float f32[16]; double f64[8]; int8_t i8[64]; @@ -279,7 +284,8 @@ static_assert_no_msg(sizeof(simd64_t) == 64); struct simdmask_t { - union { + union + { int8_t i8[8]; int16_t i16[4]; int32_t i32[2]; diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp index f06b38736ddad..c22ebc7b63544 100644 --- a/src/coreclr/jit/simdashwintrinsic.cpp +++ b/src/coreclr/jit/simdashwintrinsic.cpp @@ -399,7 +399,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, { argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); - op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); + op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); return gtNewSimdAsHWIntrinsicNode(retType, op1, hwIntrinsic, simdBaseJitType, simdSize); } @@ -421,7 +421,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); - op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); + op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize); } @@ -954,7 +954,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, argType = isInstanceMethod ? simdType : JITtype2varType(strip(info.compCompHnd->getArgType(sig, argList, &argClass))); - op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); + op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod); switch (intrinsic) { diff --git a/src/coreclr/jit/sm.cpp b/src/coreclr/jit/sm.cpp index 5cd6e9879c78d..5e9b97699b841 100644 --- a/src/coreclr/jit/sm.cpp +++ b/src/coreclr/jit/sm.cpp @@ -130,8 +130,8 @@ SM_STATE_ID CodeSeqSM::GetDestState(SM_STATE_ID srcState, SM_OPCODE opcode) if (cell->srcState != srcState) { - assert(cell->srcState == 0 || - cell->srcState != srcState); // Either way means there is not outgoing edge from srcState. + assert(cell->srcState == 0 || cell->srcState != srcState); // Either way means there is not outgoing edge from + // srcState. return 0; } else diff --git a/src/coreclr/jit/smallhash.h b/src/coreclr/jit/smallhash.h index f16905c995fbd..5bbf58e99a4bd 100644 --- a/src/coreclr/jit/smallhash.h +++ b/src/coreclr/jit/smallhash.h @@ -338,7 +338,10 @@ class HashTableBase protected: HashTableBase(TAllocator alloc, Bucket* buckets, unsigned numBuckets) - : m_alloc(alloc), m_buckets(buckets), m_numBuckets(numBuckets), m_numFullBuckets(0) + : m_alloc(alloc) + , m_buckets(buckets) + , m_numBuckets(numBuckets) + , m_numFullBuckets(0) { if (numBuckets > 0) { @@ -359,13 +362,15 @@ class HashTableBase Bucket* m_bucket; - KeyValuePair(Bucket* bucket) : m_bucket(bucket) + KeyValuePair(Bucket* bucket) + : m_bucket(bucket) { assert(m_bucket != nullptr); } public: - KeyValuePair() : m_bucket(nullptr) + KeyValuePair() + : m_bucket(nullptr) { } @@ -392,7 +397,9 @@ class HashTableBase unsigned m_index; Iterator(Bucket* buckets, unsigned numBuckets, unsigned index) - : m_buckets(buckets), m_numBuckets(numBuckets), m_index(index) + : m_buckets(buckets) + , m_numBuckets(numBuckets) + , m_index(index) { assert((buckets != nullptr) || (numBuckets == 0)); assert(index <= numBuckets); @@ -405,7 +412,10 @@ class HashTableBase } public: - Iterator() : m_buckets(nullptr), m_numBuckets(0), m_index(0) + Iterator() + : m_buckets(nullptr) + , m_numBuckets(0) + , m_index(0) { } @@ -636,7 +646,8 @@ class HashTable final : public HashTableBase } public: - HashTable(TAllocator alloc) : TBase(alloc, nullptr, 0) + HashTable(TAllocator alloc) + : TBase(alloc, nullptr, 0) { } @@ -670,7 +681,8 @@ class SmallHashTable final : public HashTableBase> 1)); // Parameters: // info - Info about the method being classified. // -Arm32Classifier::Arm32Classifier(const ClassifierInfo& info) : m_info(info) +Arm32Classifier::Arm32Classifier(const ClassifierInfo& info) + : m_info(info) { } diff --git a/src/coreclr/jit/targetarm64.cpp b/src/coreclr/jit/targetarm64.cpp index 4d4d6ae12aa06..a0e4dfb5c3cf4 100644 --- a/src/coreclr/jit/targetarm64.cpp +++ b/src/coreclr/jit/targetarm64.cpp @@ -32,7 +32,9 @@ const regMaskTP fltArgMasks[] = {RBM_V0, RBM_V1, RBM_V2, RBM_V3, RBM_V4, RBM_V5, // info - Info about the method being classified. // Arm64Classifier::Arm64Classifier(const ClassifierInfo& info) - : m_info(info), m_intRegs(intArgRegs, ArrLen(intArgRegs)), m_floatRegs(fltArgRegs, ArrLen(fltArgRegs)) + : m_info(info) + , m_intRegs(intArgRegs, ArrLen(intArgRegs)) + , m_floatRegs(fltArgRegs, ArrLen(fltArgRegs)) { } @@ -88,7 +90,7 @@ ABIPassingInformation Arm64Classifier::Classify(Compiler* comp, compAppleArm64Abi() ? min(elemSize, (unsigned)TARGET_POINTER_SIZE) : TARGET_POINTER_SIZE; m_stackArgSize = roundUp(m_stackArgSize, alignment); info = ABIPassingInformation::FromSegment(comp, ABIPassingSegment::OnStack(m_stackArgSize, 0, - structLayout->GetSize())); + structLayout->GetSize())); m_stackArgSize += roundUp(structLayout->GetSize(), alignment); // After passing any float value on the stack, we should not enregister more float values. m_floatRegs.Clear(); diff --git a/src/coreclr/jit/targetx86.cpp b/src/coreclr/jit/targetx86.cpp index 1c3e91be1bd2f..5c2702d472889 100644 --- a/src/coreclr/jit/targetx86.cpp +++ b/src/coreclr/jit/targetx86.cpp @@ -28,7 +28,8 @@ const regMaskTP intArgMasks[] = {RBM_ECX, RBM_EDX}; // Parameters: // info - Info about the method being classified. // -X86Classifier::X86Classifier(const ClassifierInfo& info) : m_regs(nullptr, 0) +X86Classifier::X86Classifier(const ClassifierInfo& info) + : m_regs(nullptr, 0) { switch (info.CallConv) { diff --git a/src/coreclr/jit/treelifeupdater.cpp b/src/coreclr/jit/treelifeupdater.cpp index 9ae6d3cd02f74..31563b4d501cc 100644 --- a/src/coreclr/jit/treelifeupdater.cpp +++ b/src/coreclr/jit/treelifeupdater.cpp @@ -339,7 +339,7 @@ void TreeLifeUpdater::UpdateLifeBit(VARSET_TP& set, LclVarDsc* dsc, // can be dumped after potential updates. // template -void TreeLifeUpdater::StoreCurrentLifeForDump() +void TreeLifeUpdater::StoreCurrentLifeForDump() { #ifdef DEBUG if (compiler->verbose) diff --git a/src/coreclr/jit/typelist.h b/src/coreclr/jit/typelist.h index 1a9a8c4072f6b..bf5acb5ee014a 100644 --- a/src/coreclr/jit/typelist.h +++ b/src/coreclr/jit/typelist.h @@ -4,7 +4,7 @@ #define GCS EA_GCREF #define BRS EA_BYREF #define EPS EA_PTRSIZE -#define PS TARGET_POINTER_SIZE +#define PS TARGET_POINTER_SIZE #define PST (TARGET_POINTER_SIZE / sizeof(int)) #ifdef TARGET_64BIT diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index a927e73c02b9f..e1ff9bc464a16 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -128,7 +128,7 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, assert(func->funKind == FUNC_HANDLER); *ppStartLoc = new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndBeg)); *ppEndLoc = HBtab->ebdHndLast->IsLast() ? nullptr - : new (this, CMK_UnwindInfo) + : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->Next())); } } diff --git a/src/coreclr/jit/unwind.h b/src/coreclr/jit/unwind.h index 4d1b540f06062..8b7fcaa5a103d 100644 --- a/src/coreclr/jit/unwind.h +++ b/src/coreclr/jit/unwind.h @@ -21,46 +21,51 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #if defined(TARGET_ARM) const unsigned MAX_PROLOG_SIZE_BYTES = 44; const unsigned MAX_EPILOG_SIZE_BYTES = 44; -#define UWC_END 0xFF // "end" unwind code +#define UWC_END 0xFF // "end" unwind code #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 19) -#define UW_MAX_CODE_WORDS_COUNT 15 // Max number that can be encoded in the "Code Words" field of the .pdata record -#define UW_MAX_EPILOG_START_INDEX 0xFFU // Max number that can be encoded in the "Epilog Start Index" field - // of the .pdata record +#define UW_MAX_CODE_WORDS_COUNT 15 // Max number that can be encoded in the "Code Words" field of the .pdata record +#define UW_MAX_EPILOG_START_INDEX \ + 0xFFU // Max number that can be encoded in the "Epilog Start Index" field + // of the .pdata record #elif defined(TARGET_ARM64) const unsigned MAX_PROLOG_SIZE_BYTES = 100; const unsigned MAX_EPILOG_SIZE_BYTES = 100; -#define UWC_END 0xE4 // "end" unwind code -#define UWC_END_C 0xE5 // "end_c" unwind code +#define UWC_END 0xE4 // "end" unwind code +#define UWC_END_C 0xE5 // "end_c" unwind code #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20) -#define UW_MAX_CODE_WORDS_COUNT 31 -#define UW_MAX_EPILOG_START_INDEX 0x3FFU +#define UW_MAX_CODE_WORDS_COUNT 31 +#define UW_MAX_EPILOG_START_INDEX 0x3FFU #elif defined(TARGET_LOONGARCH64) const unsigned MAX_PROLOG_SIZE_BYTES = 200; const unsigned MAX_EPILOG_SIZE_BYTES = 200; -#define UWC_END 0xE4 // "end" unwind code -#define UWC_END_C 0xE5 // "end_c" unwind code +#define UWC_END 0xE4 // "end" unwind code +#define UWC_END_C 0xE5 // "end_c" unwind code #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20) -#define UW_MAX_CODE_WORDS_COUNT 31 -#define UW_MAX_EPILOG_START_INDEX 0x3FFU +#define UW_MAX_CODE_WORDS_COUNT 31 +#define UW_MAX_EPILOG_START_INDEX 0x3FFU #elif defined(TARGET_RISCV64) const unsigned MAX_PROLOG_SIZE_BYTES = 200; const unsigned MAX_EPILOG_SIZE_BYTES = 200; -#define UWC_END 0xE4 // "end" unwind code -#define UWC_END_C 0xE5 // "end_c" unwind code +#define UWC_END 0xE4 // "end" unwind code +#define UWC_END_C 0xE5 // "end_c" unwind code #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20) -#define UW_MAX_CODE_WORDS_COUNT 31 -#define UW_MAX_EPILOG_START_INDEX 0x3FFU +#define UW_MAX_CODE_WORDS_COUNT 31 +#define UW_MAX_EPILOG_START_INDEX 0x3FFU #endif // TARGET_RISCV64 -#define UW_MAX_EPILOG_COUNT 31 // Max number that can be encoded in the "Epilog count" field - // of the .pdata record -#define UW_MAX_EXTENDED_CODE_WORDS_COUNT 0xFFU // Max number that can be encoded in the "Extended Code Words" - // field of the .pdata record -#define UW_MAX_EXTENDED_EPILOG_COUNT 0xFFFFU // Max number that can be encoded in the "Extended Epilog Count" - // field of the .pdata record -#define UW_MAX_EPILOG_START_OFFSET 0x3FFFFU // Max number that can be encoded in the "Epilog Start Offset" - // field of the .pdata record +#define UW_MAX_EPILOG_COUNT \ + 31 // Max number that can be encoded in the "Epilog count" field + // of the .pdata record +#define UW_MAX_EXTENDED_CODE_WORDS_COUNT \ + 0xFFU // Max number that can be encoded in the "Extended Code Words" + // field of the .pdata record +#define UW_MAX_EXTENDED_EPILOG_COUNT \ + 0xFFFFU // Max number that can be encoded in the "Extended Epilog Count" + // field of the .pdata record +#define UW_MAX_EPILOG_START_OFFSET \ + 0x3FFFFU // Max number that can be encoded in the "Epilog Start Offset" + // field of the .pdata record // // Forward declaration of class defined in emit.h @@ -85,7 +90,8 @@ class UnwindInfo; class UnwindBase { protected: - UnwindBase(Compiler* comp) : uwiComp(comp) + UnwindBase(Compiler* comp) + : uwiComp(comp) { } @@ -107,9 +113,9 @@ class UnwindCodesBase public: // Add a single unwind code. - virtual void AddCode(BYTE b1) = 0; - virtual void AddCode(BYTE b1, BYTE b2) = 0; - virtual void AddCode(BYTE b1, BYTE b2, BYTE b3) = 0; + virtual void AddCode(BYTE b1) = 0; + virtual void AddCode(BYTE b1, BYTE b2) = 0; + virtual void AddCode(BYTE b1, BYTE b2, BYTE b3) = 0; virtual void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4) = 0; // Get access to the unwind codes @@ -139,7 +145,9 @@ class UnwindCodesBase // information for a function, including unwind info header, the prolog codes, // and any epilog codes. -class UnwindPrologCodes : public UnwindBase, public UnwindCodesBase +class UnwindPrologCodes + : public UnwindBase + , public UnwindCodesBase { // UPC_LOCAL_COUNT is the amount of memory local to this class. For ARM CoreLib, the maximum size is 34. // Here is a histogram of other interesting sizes: @@ -303,7 +311,9 @@ class UnwindPrologCodes : public UnwindBase, public UnwindCodesBase // Epilog unwind codes arrive in the order they will be emitted. Store them as an array, // adding new ones to the end of the array. -class UnwindEpilogCodes : public UnwindBase, public UnwindCodesBase +class UnwindEpilogCodes + : public UnwindBase + , public UnwindCodesBase { // UEC_LOCAL_COUNT is the amount of memory local to this class. For ARM CoreLib, the maximum size is 6, // while 89% of epilogs fit in 4. So, set it to 4 to maintain array alignment and hit most cases. diff --git a/src/coreclr/jit/unwindamd64.cpp b/src/coreclr/jit/unwindamd64.cpp index 549c4e9910567..e42a4368581fb 100644 --- a/src/coreclr/jit/unwindamd64.cpp +++ b/src/coreclr/jit/unwindamd64.cpp @@ -199,7 +199,7 @@ void Compiler::unwindPushWindows(regNumber reg) // since it is pushed as a frame register. || (reg == REG_FPBASE) #endif // ETW_EBP_FRAMED - ) + ) { code->UnwindOp = UWOP_PUSH_NONVOL; code->OpInfo = (BYTE)reg; diff --git a/src/coreclr/jit/unwindarm64.cpp b/src/coreclr/jit/unwindarm64.cpp index 0725eb41dfdba..f842737171c0b 100644 --- a/src/coreclr/jit/unwindarm64.cpp +++ b/src/coreclr/jit/unwindarm64.cpp @@ -461,8 +461,8 @@ void Compiler::unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int o pu->AddCode(0x80 | (BYTE)z); } - else if ((reg1 == REG_R19) && - (-256 <= offset)) // If the offset is between -512 and -256, we use the save_regp_x unwind code. + else if ((reg1 == REG_R19) && (-256 <= offset)) // If the offset is between -512 and -256, we use the save_regp_x + // unwind code. { // save_r19r20_x: 001zzzzz: save pair at [sp-#Z*8]!, pre-indexed offset >= -248 // NOTE: I'm not sure why we allow Z==0 here; seems useless, and the calculation of offset is different from the @@ -758,7 +758,7 @@ void DumpUnwindInfo(Compiler* comp, // pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end // to provide padding, and round down to get a multiple of 4 bytes in size. DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader; - DWORD dw; + DWORD dw; dw = *pdw++; diff --git a/src/coreclr/jit/unwindarmarch.cpp b/src/coreclr/jit/unwindarmarch.cpp index 445b2581ca0ab..bdc7663bde7ed 100644 --- a/src/coreclr/jit/unwindarmarch.cpp +++ b/src/coreclr/jit/unwindarmarch.cpp @@ -243,9 +243,8 @@ void Compiler::unwindPushPopMaskInt(regMaskTP maskInt, bool useOpsize16) } else { - assert((maskInt & - ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 | - RBM_R11 | RBM_R12 | RBM_LR)) == 0); + assert((maskInt & ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | + RBM_R10 | RBM_R11 | RBM_R12 | RBM_LR)) == 0); bool shortFormat = false; BYTE val = 0; @@ -321,9 +320,8 @@ void Compiler::unwindPushPopMaskFloat(regMaskTP maskFloat) void Compiler::unwindPushMaskInt(regMaskTP maskInt) { // Only r0-r12 and lr are supported - assert((maskInt & - ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 | - RBM_R11 | RBM_R12 | RBM_LR)) == 0); + assert((maskInt & ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | + RBM_R10 | RBM_R11 | RBM_R12 | RBM_LR)) == 0); #if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) @@ -364,9 +362,8 @@ void Compiler::unwindPopMaskInt(regMaskTP maskInt) #endif // FEATURE_CFI_SUPPORT // Only r0-r12 and lr and pc are supported (pc is mapped to lr when encoding) - assert((maskInt & - ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 | - RBM_R11 | RBM_R12 | RBM_LR | RBM_PC)) == 0); + assert((maskInt & ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | + RBM_R10 | RBM_R11 | RBM_R12 | RBM_LR | RBM_PC)) == 0); bool useOpsize16 = ((maskInt & (RBM_LOW_REGS | RBM_PC)) == maskInt); // Can POP use the 16-bit encoding? @@ -721,8 +718,8 @@ unsigned GetOpcodeSizeFromUnwindHeader(BYTE b1) }; BYTE opsize = s_UnwindOpsize[b1]; - assert(opsize == 2 || - opsize == 4); // We shouldn't get a code with no opsize (the 0xFF end code is handled specially) + assert(opsize == 2 || opsize == 4); // We shouldn't get a code with no opsize (the 0xFF end code is handled + // specially) return opsize; } @@ -887,9 +884,9 @@ void UnwindPrologCodes::AppendEpilog(UnwindEpilogInfo* pEpi) int epiSize = pEpi->Size(); memcpy_s(&upcMem[upcEpilogSlot], upcMemSize - upcEpilogSlot - 3, pEpi->GetCodes(), - epiSize); // -3 to avoid writing to the alignment padding - assert(pEpi->GetStartIndex() == - upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy it. + epiSize); // -3 to avoid writing to the alignment padding + assert(pEpi->GetStartIndex() == upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy + // it. upcEpilogSlot += epiSize; assert(upcEpilogSlot <= upcMemSize - 3); @@ -1455,7 +1452,7 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) } #endif -// Compute the header + // Compute the header #if defined(TARGET_ARM) noway_assert((functionLength & 1) == 0); @@ -1504,8 +1501,8 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) // Start writing the header - noway_assert(headerFunctionLength <= - 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, we have an internal error + noway_assert(headerFunctionLength <= 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, + // we have an internal error if ((headerEpilogCount > UW_MAX_EPILOG_COUNT) || (headerCodeWords > UW_MAX_CODE_WORDS_COUNT)) { @@ -1516,7 +1513,7 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) | (headerFBit << 22) | (headerEpilogCount << 23) | (headerCodeWords << 28); #elif defined(TARGET_ARM64) - DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) | + DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) | (headerEpilogCount << 22) | (headerCodeWords << 27); #endif // defined(TARGET_ARM64) @@ -2203,7 +2200,7 @@ DWORD DumpRegSetRange(const char* const rtype, DWORD start, DWORD end, DWORD lr) DWORD DumpOpsize(DWORD padding, DWORD opsize) { if (padding > 100) // underflow? - padding = 4; + padding = 4; DWORD printed = padding; for (; padding > 0; padding--) printf(" "); @@ -2231,7 +2228,7 @@ void DumpUnwindInfo(Compiler* comp, // pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end // to provide padding, and round down to get a multiple of 4 bytes in size. DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader; - DWORD dw; + DWORD dw; dw = *pdw++; diff --git a/src/coreclr/jit/unwindloongarch64.cpp b/src/coreclr/jit/unwindloongarch64.cpp index 3aa5fd668d40c..1b561eaaaae66 100644 --- a/src/coreclr/jit/unwindloongarch64.cpp +++ b/src/coreclr/jit/unwindloongarch64.cpp @@ -516,7 +516,7 @@ void DumpUnwindInfo(Compiler* comp, // pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end // to provide padding, and round down to get a multiple of 4 bytes in size. DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader; - DWORD dw; + DWORD dw; dw = *pdw++; @@ -1149,9 +1149,9 @@ void UnwindPrologCodes::AppendEpilog(UnwindEpilogInfo* pEpi) int epiSize = pEpi->Size(); memcpy_s(&upcMem[upcEpilogSlot], upcMemSize - upcEpilogSlot - 3, pEpi->GetCodes(), - epiSize); // -3 to avoid writing to the alignment padding - assert(pEpi->GetStartIndex() == - upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy it. + epiSize); // -3 to avoid writing to the alignment padding + assert(pEpi->GetStartIndex() == upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy + // it. upcEpilogSlot += epiSize; assert(upcEpilogSlot <= upcMemSize - 3); @@ -1772,8 +1772,8 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) // Start writing the header - noway_assert(headerFunctionLength <= - 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, we have an internal error + noway_assert(headerFunctionLength <= 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, + // we have an internal error if ((headerEpilogCount > UW_MAX_EPILOG_COUNT) || (headerCodeWords > UW_MAX_CODE_WORDS_COUNT)) { diff --git a/src/coreclr/jit/unwindriscv64.cpp b/src/coreclr/jit/unwindriscv64.cpp index b78eb04c228e9..f9db0d433c6f1 100644 --- a/src/coreclr/jit/unwindriscv64.cpp +++ b/src/coreclr/jit/unwindriscv64.cpp @@ -327,7 +327,7 @@ void DumpUnwindInfo(Compiler* comp, // pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end // to provide padding, and round down to get a multiple of 4 bytes in size. DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader; - DWORD dw; + DWORD dw; dw = *pdw++; diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index 63a91e8160a7f..c3234e5524dea 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -1107,7 +1107,8 @@ void Counter::dump(FILE* output) * Histogram class. */ -Histogram::Histogram(const unsigned* const sizeTable) : m_sizeTable(sizeTable) +Histogram::Histogram(const unsigned* const sizeTable) + : m_sizeTable(sizeTable) { unsigned sizeCount = 0; do @@ -1839,7 +1840,8 @@ void HelperCallProperties::init() // // You must use ';' as a separator; whitespace no longer works -AssemblyNamesList2::AssemblyNamesList2(const WCHAR* list, HostAllocator alloc) : m_alloc(alloc) +AssemblyNamesList2::AssemblyNamesList2(const WCHAR* list, HostAllocator alloc) + : m_alloc(alloc) { WCHAR prevChar = '?'; // dummy LPWSTR nameStart = nullptr; // start of the name currently being processed. nullptr if no current name @@ -1926,7 +1928,9 @@ bool AssemblyNamesList2::IsInList(const char* assemblyName) // MethodSet //============================================================================= -MethodSet::MethodSet(const WCHAR* filename, HostAllocator alloc) : m_pInfos(nullptr), m_alloc(alloc) +MethodSet::MethodSet(const WCHAR* filename, HostAllocator alloc) + : m_pInfos(nullptr) + , m_alloc(alloc) { FILE* methodSetFile = _wfopen(filename, W("r")); if (methodSetFile == nullptr) @@ -2155,7 +2159,8 @@ double CachedCyclesPerSecond() } #ifdef FEATURE_JIT_METHOD_PERF -CycleCount::CycleCount() : cps(CachedCyclesPerSecond()) +CycleCount::CycleCount() + : cps(CachedCyclesPerSecond()) { } @@ -2299,7 +2304,7 @@ unsigned __int64 FloatingPointUtils::convertDoubleToUInt64(double d) u64 = UINT64(INT64(d)); #else - u64 = UINT64(d); + u64 = UINT64(d); #endif // TARGET_XARCH return u64; @@ -4099,7 +4104,7 @@ int64_t GetSigned64Magic(int64_t d, int* shift /*out*/) return GetSignedMagic(d, shift); } #endif -} +} // namespace MagicDivide namespace CheckedOps { @@ -4293,4 +4298,4 @@ bool CastFromDoubleOverflows(double fromValue, var_types toType) unreached(); } } -} +} // namespace CheckedOps diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h index 549922ad15840..6a0362bbbf067 100644 --- a/src/coreclr/jit/utils.h +++ b/src/coreclr/jit/utils.h @@ -88,7 +88,9 @@ class IteratorPair TIterator m_end; public: - IteratorPair(TIterator begin, TIterator end) : m_begin(begin), m_end(end) + IteratorPair(TIterator begin, TIterator end) + : m_begin(begin) + , m_end(end) { } @@ -116,7 +118,8 @@ struct ConstLog2 { enum { - value = ConstLog2::value + value = ConstLog2 < val / 2, + acc + 1 > ::value }; }; @@ -246,7 +249,9 @@ class ConfigMethodRange class ConfigIntArray { public: - ConfigIntArray() : m_values(nullptr), m_length(0) + ConfigIntArray() + : m_values(nullptr) + , m_length(0) { } @@ -270,7 +275,7 @@ class ConfigIntArray } private: - void Init(const WCHAR* str); + void Init(const WCHAR* str); int* m_values; unsigned m_length; }; @@ -280,7 +285,9 @@ class ConfigIntArray class ConfigDoubleArray { public: - ConfigDoubleArray() : m_values(nullptr), m_length(0) + ConfigDoubleArray() + : m_values(nullptr) + , m_length(0) { } @@ -304,7 +311,7 @@ class ConfigDoubleArray } private: - void Init(const WCHAR* str); + void Init(const WCHAR* str); double* m_values; unsigned m_length; }; @@ -404,7 +411,8 @@ template class ScopedSetVariable { public: - ScopedSetVariable(T* pVariable, T value) : m_pVariable(pVariable) + ScopedSetVariable(T* pVariable, T value) + : m_pVariable(pVariable) { m_oldValue = *m_pVariable; *m_pVariable = value; @@ -442,7 +450,8 @@ class PhasedVar public: PhasedVar() #ifdef DEBUG - : m_initialized(false), m_writePhase(true) + : m_initialized(false) + , m_writePhase(true) #endif // DEBUG { } @@ -704,7 +713,9 @@ class MethodSet MethodInfo* m_next; MethodInfo(char* methodName, int methodHash) - : m_MethodName(methodName), m_MethodHash(methodHash), m_next(nullptr) + : m_MethodName(methodName) + , m_MethodHash(methodHash) + , m_next(nullptr) { } }; @@ -786,8 +797,8 @@ unsigned CountDigits(double num, unsigned base = 10); #endif // DEBUG /***************************************************************************** -* Floating point utility class -*/ + * Floating point utility class + */ class FloatingPointUtils { public: @@ -1019,7 +1030,7 @@ class CritSecObject CRITSEC_COOKIE m_pCs; // No copying or assignment allowed. - CritSecObject(const CritSecObject&) = delete; + CritSecObject(const CritSecObject&) = delete; CritSecObject& operator=(const CritSecObject&) = delete; }; @@ -1029,7 +1040,8 @@ class CritSecObject class CritSecHolder { public: - CritSecHolder(CritSecObject& critSec) : m_CritSec(critSec) + CritSecHolder(CritSecObject& critSec) + : m_CritSec(critSec) { ClrEnterCriticalSection(m_CritSec.Val()); } @@ -1043,7 +1055,7 @@ class CritSecHolder CritSecObject& m_CritSec; // No copying or assignment allowed. - CritSecHolder(const CritSecHolder&) = delete; + CritSecHolder(const CritSecHolder&) = delete; CritSecHolder& operator=(const CritSecHolder&) = delete; }; @@ -1059,7 +1071,7 @@ int32_t GetSigned32Magic(int32_t d, int* shift /*out*/); #ifdef TARGET_64BIT int64_t GetSigned64Magic(int64_t d, int* shift /*out*/); #endif -} +} // namespace MagicDivide // // Profiling helpers @@ -1160,6 +1172,6 @@ bool CastFromIntOverflows(int32_t fromValue, var_types toType, bool fromUnsigned bool CastFromLongOverflows(int64_t fromValue, var_types toType, bool fromUnsigned); bool CastFromFloatOverflows(float fromValue, var_types toType); bool CastFromDoubleOverflows(double fromValue, var_types toType); -} +} // namespace CheckedOps #endif // _UTILS_H_ diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 7ebde5995cf73..889e0227e992d 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -45,7 +45,7 @@ struct FloatTraits #if defined(TARGET_XARCH) unsigned bits = 0xFFC00000u; #elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) - unsigned bits = 0x7FC00000u; + unsigned bits = 0x7FC00000u; #else #error Unsupported or unset target architecture #endif @@ -1647,7 +1647,11 @@ bool ValueNumStore::IsSharedStatic(ValueNum vn) } ValueNumStore::Chunk::Chunk(CompAllocator alloc, ValueNum* pNextBaseVN, var_types typ, ChunkExtraAttribs attribs) - : m_defs(nullptr), m_numUsed(0), m_baseVN(*pNextBaseVN), m_typ(typ), m_attribs(attribs) + : m_defs(nullptr) + , m_numUsed(0) + , m_baseVN(*pNextBaseVN) + , m_typ(typ) + , m_attribs(attribs) { // Allocate "m_defs" here, according to the typ/attribs pair. switch (attribs) @@ -2971,7 +2975,8 @@ typedef JitHashTable, bool> ValueN class SmallValueNumSet { - union { + union + { ValueNum m_inlineElements[4]; ValueNumSet* m_set; }; @@ -3416,7 +3421,7 @@ ValueNum ValueNumStore::VNForMapSelectWork(ValueNumKind vnk, { bool usedRecursiveVN = false; ValueNum curResult = VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, - &usedRecursiveVN, recMemoryDependencies); + &usedRecursiveVN, recMemoryDependencies); *pUsedRecursiveVN |= usedRecursiveVN; if (sameSelResult == ValueNumStore::RecursiveVN) @@ -3449,8 +3454,9 @@ ValueNum ValueNumStore::VNForMapSelectWork(ValueNumKind vnk, GetMapSelectWorkCache()->Set(fstruct, entry); } - recMemoryDependencies.ForEach( - [this, &memoryDependencies](ValueNum vn) { memoryDependencies.Add(m_pComp, vn); }); + recMemoryDependencies.ForEach([this, &memoryDependencies](ValueNum vn) { + memoryDependencies.Add(m_pComp, vn); + }); return sameSelResult; } @@ -3485,7 +3491,9 @@ ValueNum ValueNumStore::VNForMapSelectWork(ValueNumKind vnk, GetMapSelectWorkCache()->Set(fstruct, entry); } - recMemoryDependencies.ForEach([this, &memoryDependencies](ValueNum vn) { memoryDependencies.Add(m_pComp, vn); }); + recMemoryDependencies.ForEach([this, &memoryDependencies](ValueNum vn) { + memoryDependencies.Add(m_pComp, vn); + }); return entry.Result; } @@ -5610,7 +5618,7 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeq* fldSeq, ssize_t offs { fldSeq = m_pComp->GetFieldSeqStore()->Append(FieldSeqVNToFieldSeq(funcApp.m_args[1]), fldSeq); res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, funcApp.m_args[0], VNForFieldSeq(fldSeq), - VNForIntPtrCon(ConstantValue(funcApp.m_args[2]) + offset)); + VNForIntPtrCon(ConstantValue(funcApp.m_args[2]) + offset)); } else if (funcApp.m_func == VNF_PtrToArrElem) { @@ -5653,7 +5661,6 @@ void Compiler::fgValueNumberLocalStore(GenTree* storeNode, auto processDef = [=](unsigned defLclNum, unsigned defSsaNum, ssize_t defOffset, unsigned defSize, ValueNumPair defValue) { - LclVarDsc* defVarDsc = lvaGetDesc(defLclNum); if (defSsaNum != SsaConfig::RESERVED_SSA_NUM) @@ -12189,8 +12196,8 @@ void Compiler::fgValueNumberCastTree(GenTree* tree) ValueNum ValueNumStore::VNForCast(ValueNum srcVN, var_types castToType, var_types castFromType, - bool srcIsUnsigned, /* = false */ - bool hasOverflowCheck) /* = false */ + bool srcIsUnsigned, /* = false */ + bool hasOverflowCheck) /* = false */ { if ((castFromType == TYP_I_IMPL) && (castToType == TYP_BYREF) && IsVNHandle(srcVN)) @@ -12235,8 +12242,8 @@ ValueNum ValueNumStore::VNForCast(ValueNum srcVN, ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair, var_types castToType, var_types castFromType, - bool srcIsUnsigned, /* = false */ - bool hasOverflowCheck) /* = false */ + bool srcIsUnsigned, /* = false */ + bool hasOverflowCheck) /* = false */ { ValueNum srcLibVN = srcVNPair.GetLiberal(); ValueNum srcConVN = srcVNPair.GetConservative(); @@ -13742,7 +13749,6 @@ void Compiler::fgDebugCheckExceptionSets() ValueNumPair operandsExcSet = vnStore->VNPForEmptyExcSet(); tree->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult { - CheckTree(operand, vnStore); ValueNumPair operandVNP = operand->gtVNPair.BothDefined() ? operand->gtVNPair : vnStore->VNPForVoid(); @@ -13796,7 +13802,7 @@ void Compiler::JitTestCheckVN() // First we have to know which nodes in the tree are reachable. typedef JitHashTable, int> NodeToIntMap; - NodeToIntMap* reachable = FindReachableNodesInNodeTestData(); + NodeToIntMap* reachable = FindReachableNodesInNodeTestData(); LabelToVNMap* labelToVN = new (getAllocatorDebugOnly()) LabelToVNMap(getAllocatorDebugOnly()); VNToLabelMap* vnToLabel = new (getAllocatorDebugOnly()) VNToLabelMap(getAllocatorDebugOnly()); @@ -13931,7 +13937,9 @@ void Compiler::vnPrint(ValueNum vn, unsigned level) #endif // DEBUG // Methods of ValueNumPair. -ValueNumPair::ValueNumPair() : m_liberal(ValueNumStore::NoVN), m_conservative(ValueNumStore::NoVN) +ValueNumPair::ValueNumPair() + : m_liberal(ValueNumStore::NoVN) + , m_conservative(ValueNumStore::NoVN) { } diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 7cd6c27aec206..1f9171e13cef2 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -238,7 +238,8 @@ class ValueNumStore class VNMap : public JitHashTable { public: - VNMap(CompAllocator alloc) : JitHashTable(alloc) + VNMap(CompAllocator alloc) + : JitHashTable(alloc) { } @@ -306,7 +307,7 @@ class ValueNumStore bool illegalAsVNFunc, GenTreeOperKind kind); static constexpr uint8_t GetOpAttribsForFunc(int arity, bool commute, bool knownNonNull, bool sharedStatic); - static const uint8_t s_vnfOpAttribs[]; + static const uint8_t s_vnfOpAttribs[]; // Returns "true" iff gtOper is a legal value number function. // (Requires InitValueNumStoreStatics to have been run.) @@ -355,18 +356,18 @@ class ValueNumStore public: // Given an constant value number return its value. - int GetConstantInt32(ValueNum argVN); - INT64 GetConstantInt64(ValueNum argVN); + int GetConstantInt32(ValueNum argVN); + INT64 GetConstantInt64(ValueNum argVN); double GetConstantDouble(ValueNum argVN); - float GetConstantSingle(ValueNum argVN); + float GetConstantSingle(ValueNum argVN); #if defined(FEATURE_SIMD) - simd8_t GetConstantSimd8(ValueNum argVN); + simd8_t GetConstantSimd8(ValueNum argVN); simd12_t GetConstantSimd12(ValueNum argVN); simd16_t GetConstantSimd16(ValueNum argVN); #if defined(TARGET_XARCH) - simd32_t GetConstantSimd32(ValueNum argVN); - simd64_t GetConstantSimd64(ValueNum argVN); + simd32_t GetConstantSimd32(ValueNum argVN); + simd64_t GetConstantSimd64(ValueNum argVN); simdmask_t GetConstantSimdMask(ValueNum argVN); #endif // TARGET_XARCH #endif // FEATURE_SIMD @@ -560,7 +561,7 @@ class ValueNumStore // Create or return the existimg value number representing a singleton exception set // for the exception value "x". - ValueNum VNExcSetSingleton(ValueNum x); + ValueNum VNExcSetSingleton(ValueNum x); ValueNumPair VNPExcSetSingleton(ValueNumPair x); // Returns true if the current pair of items are in ascending order and they are not duplicates. @@ -814,7 +815,7 @@ class ValueNumStore return ValueNumPair(liberalFuncVN, conservativeFuncVN); } - ValueNum VNForExpr(BasicBlock* block, var_types type = TYP_UNKNOWN); + ValueNum VNForExpr(BasicBlock* block, var_types type = TYP_UNKNOWN); ValueNumPair VNPairForExpr(BasicBlock* block, var_types type); // This controls extra tracing of the "evaluation" of "VNF_MapSelect" functions. @@ -916,7 +917,10 @@ class ValueNumStore ValueNum vnIdx; ValueNum vnBound; - UnsignedCompareCheckedBoundInfo() : cmpOper(GT_NONE), vnIdx(NoVN), vnBound(NoVN) + UnsignedCompareCheckedBoundInfo() + : cmpOper(GT_NONE) + , vnIdx(NoVN) + , vnBound(NoVN) { } }; @@ -930,7 +934,12 @@ class ValueNumStore ValueNum arrOp; unsigned cmpOper; ValueNum cmpOp; - CompareCheckedBoundArithInfo() : vnBound(NoVN), arrOper(GT_NONE), arrOp(NoVN), cmpOper(GT_NONE), cmpOp(NoVN) + CompareCheckedBoundArithInfo() + : vnBound(NoVN) + , arrOper(GT_NONE) + , arrOp(NoVN) + , cmpOper(GT_NONE) + , cmpOp(NoVN) { } #ifdef DEBUG @@ -958,7 +967,11 @@ class ValueNumStore ValueNum cmpOpVN; bool isUnsigned; - ConstantBoundInfo() : constVal(0), cmpOper(GT_NONE), cmpOpVN(NoVN), isUnsigned(false) + ConstantBoundInfo() + : constVal(0) + , cmpOper(GT_NONE) + , cmpOpVN(NoVN) + , isUnsigned(false) { } @@ -1307,7 +1320,8 @@ class ValueNumStore VNFunc m_func; ValueNum m_args[NumArgs]; - VNDefFuncApp() : m_func(VNF_COUNT) + VNDefFuncApp() + : m_func(VNF_COUNT) { for (size_t i = 0; i < NumArgs; i++) { @@ -1316,7 +1330,9 @@ class ValueNumStore } template - VNDefFuncApp(VNFunc func, VNs... vns) : m_func(func), m_args{vns...} + VNDefFuncApp(VNFunc func, VNs... vns) + : m_func(func) + , m_args{vns...} { static_assert_no_msg(NumArgs == sizeof...(VNs)); } @@ -1477,7 +1493,7 @@ class ValueNumStore static const int SmallIntConstMin = -1; static const int SmallIntConstMax = 10; static const unsigned SmallIntConstNum = SmallIntConstMax - SmallIntConstMin + 1; - static bool IsSmallIntConst(int i) + static bool IsSmallIntConst(int i) { return SmallIntConstMin <= i && i <= SmallIntConstMax; } @@ -1487,7 +1503,9 @@ class ValueNumStore { ValueNum vn; ValueNumList* next; - ValueNumList(const ValueNum& v, ValueNumList* n = nullptr) : vn(v), next(n) + ValueNumList(const ValueNum& v, ValueNumList* n = nullptr) + : vn(v) + , next(n) { } }; @@ -1518,8 +1536,8 @@ class ValueNumStore } typedef VNMap HandleToValueNumMap; - HandleToValueNumMap* m_handleMap; - HandleToValueNumMap* GetHandleMap() + HandleToValueNumMap* m_handleMap; + HandleToValueNumMap* GetHandleMap() { if (m_handleMap == nullptr) { @@ -1529,10 +1547,10 @@ class ValueNumStore } typedef SmallHashTable EmbeddedToCompileTimeHandleMap; - EmbeddedToCompileTimeHandleMap m_embeddedToCompileTimeHandleMap; + EmbeddedToCompileTimeHandleMap m_embeddedToCompileTimeHandleMap; typedef SmallHashTable FieldAddressToFieldSeqMap; - FieldAddressToFieldSeqMap m_fieldAddressToFieldSeqMap; + FieldAddressToFieldSeqMap m_fieldAddressToFieldSeqMap; struct LargePrimitiveKeyFuncsFloat : public JitLargePrimitiveKeyFuncs { @@ -1543,8 +1561,8 @@ class ValueNumStore }; typedef VNMap FloatToValueNumMap; - FloatToValueNumMap* m_floatCnsMap; - FloatToValueNumMap* GetFloatCnsMap() + FloatToValueNumMap* m_floatCnsMap; + FloatToValueNumMap* GetFloatCnsMap() { if (m_floatCnsMap == nullptr) { @@ -1563,8 +1581,8 @@ class ValueNumStore }; typedef VNMap DoubleToValueNumMap; - DoubleToValueNumMap* m_doubleCnsMap; - DoubleToValueNumMap* GetDoubleCnsMap() + DoubleToValueNumMap* m_doubleCnsMap; + DoubleToValueNumMap* GetDoubleCnsMap() { if (m_doubleCnsMap == nullptr) { @@ -1604,8 +1622,8 @@ class ValueNumStore }; typedef VNMap Simd8ToValueNumMap; - Simd8ToValueNumMap* m_simd8CnsMap; - Simd8ToValueNumMap* GetSimd8CnsMap() + Simd8ToValueNumMap* m_simd8CnsMap; + Simd8ToValueNumMap* GetSimd8CnsMap() { if (m_simd8CnsMap == nullptr) { @@ -1634,8 +1652,8 @@ class ValueNumStore }; typedef VNMap Simd12ToValueNumMap; - Simd12ToValueNumMap* m_simd12CnsMap; - Simd12ToValueNumMap* GetSimd12CnsMap() + Simd12ToValueNumMap* m_simd12CnsMap; + Simd12ToValueNumMap* GetSimd12CnsMap() { if (m_simd12CnsMap == nullptr) { @@ -1665,8 +1683,8 @@ class ValueNumStore }; typedef VNMap Simd16ToValueNumMap; - Simd16ToValueNumMap* m_simd16CnsMap; - Simd16ToValueNumMap* GetSimd16CnsMap() + Simd16ToValueNumMap* m_simd16CnsMap; + Simd16ToValueNumMap* GetSimd16CnsMap() { if (m_simd16CnsMap == nullptr) { @@ -1701,8 +1719,8 @@ class ValueNumStore }; typedef VNMap Simd32ToValueNumMap; - Simd32ToValueNumMap* m_simd32CnsMap; - Simd32ToValueNumMap* GetSimd32CnsMap() + Simd32ToValueNumMap* m_simd32CnsMap; + Simd32ToValueNumMap* GetSimd32CnsMap() { if (m_simd32CnsMap == nullptr) { @@ -1744,8 +1762,8 @@ class ValueNumStore }; typedef VNMap Simd64ToValueNumMap; - Simd64ToValueNumMap* m_simd64CnsMap; - Simd64ToValueNumMap* GetSimd64CnsMap() + Simd64ToValueNumMap* m_simd64CnsMap; + Simd64ToValueNumMap* GetSimd64CnsMap() { if (m_simd64CnsMap == nullptr) { @@ -1773,8 +1791,8 @@ class ValueNumStore }; typedef VNMap SimdMaskToValueNumMap; - SimdMaskToValueNumMap* m_simdMaskCnsMap; - SimdMaskToValueNumMap* GetSimdMaskCnsMap() + SimdMaskToValueNumMap* m_simdMaskCnsMap; + SimdMaskToValueNumMap* GetSimdMaskCnsMap() { if (m_simdMaskCnsMap == nullptr) { @@ -1813,8 +1831,8 @@ class ValueNumStore } typedef VNMap, VNDefFuncAppKeyFuncs<1>> VNFunc1ToValueNumMap; - VNFunc1ToValueNumMap* m_VNFunc1Map; - VNFunc1ToValueNumMap* GetVNFunc1Map() + VNFunc1ToValueNumMap* m_VNFunc1Map; + VNFunc1ToValueNumMap* GetVNFunc1Map() { if (m_VNFunc1Map == nullptr) { @@ -1824,8 +1842,8 @@ class ValueNumStore } typedef VNMap, VNDefFuncAppKeyFuncs<2>> VNFunc2ToValueNumMap; - VNFunc2ToValueNumMap* m_VNFunc2Map; - VNFunc2ToValueNumMap* GetVNFunc2Map() + VNFunc2ToValueNumMap* m_VNFunc2Map; + VNFunc2ToValueNumMap* GetVNFunc2Map() { if (m_VNFunc2Map == nullptr) { @@ -1835,8 +1853,8 @@ class ValueNumStore } typedef VNMap, VNDefFuncAppKeyFuncs<3>> VNFunc3ToValueNumMap; - VNFunc3ToValueNumMap* m_VNFunc3Map; - VNFunc3ToValueNumMap* GetVNFunc3Map() + VNFunc3ToValueNumMap* m_VNFunc3Map; + VNFunc3ToValueNumMap* GetVNFunc3Map() { if (m_VNFunc3Map == nullptr) { @@ -1846,8 +1864,8 @@ class ValueNumStore } typedef VNMap, VNDefFuncAppKeyFuncs<4>> VNFunc4ToValueNumMap; - VNFunc4ToValueNumMap* m_VNFunc4Map; - VNFunc4ToValueNumMap* GetVNFunc4Map() + VNFunc4ToValueNumMap* m_VNFunc4Map; + VNFunc4ToValueNumMap* GetVNFunc4Map() { if (m_VNFunc4Map == nullptr) { @@ -1858,7 +1876,8 @@ class ValueNumStore class MapSelectWorkCacheEntry { - union { + union + { ValueNum* m_memoryDependencies; ValueNum m_inlineMemoryDependencies[sizeof(ValueNum*) / sizeof(ValueNum)]; }; diff --git a/src/coreclr/jit/valuenumtype.h b/src/coreclr/jit/valuenumtype.h index 2eb3254e3e18b..e41db97267543 100644 --- a/src/coreclr/jit/valuenumtype.h +++ b/src/coreclr/jit/valuenumtype.h @@ -115,7 +115,9 @@ struct ValueNumPair // Initializes both elements to "NoVN". Defined in ValueNum.cpp. ValueNumPair(); - ValueNumPair(ValueNum lib, ValueNum cons) : m_liberal(lib), m_conservative(cons) + ValueNumPair(ValueNum lib, ValueNum cons) + : m_liberal(lib) + , m_conservative(cons) { } diff --git a/src/coreclr/jit/varset.h b/src/coreclr/jit/varset.h index 465ab146cbaca..b9e4cab1a0c45 100644 --- a/src/coreclr/jit/varset.h +++ b/src/coreclr/jit/varset.h @@ -108,7 +108,7 @@ typedef BitSetOpsWithCounter VarSetOps; #else -typedef VarSetOpsRaw VarSetOps; +typedef VarSetOpsRaw VarSetOps; #endif #define ALLVARSET_REP BSShortLong diff --git a/src/coreclr/jit/vartype.h b/src/coreclr/jit/vartype.h index 1623addb69b07..642ab15936035 100644 --- a/src/coreclr/jit/vartype.h +++ b/src/coreclr/jit/vartype.h @@ -225,7 +225,7 @@ inline bool varTypeIsIntOrI(T vt) #ifdef TARGET_64BIT || (TypeGet(vt) == TYP_I_IMPL) #endif // TARGET_64BIT - ); + ); } template @@ -321,11 +321,11 @@ inline bool varTypeUsesFloatReg(T vt) template inline bool varTypeUsesMaskReg(T vt) { -// The technically correct check is: -// return varTypeRegister[TypeGet(vt)] == VTR_MASK; -// -// However, we only have one type that uses VTR_MASK today -// and so its quite a bit cheaper to just check that directly + // The technically correct check is: + // return varTypeRegister[TypeGet(vt)] == VTR_MASK; + // + // However, we only have one type that uses VTR_MASK today + // and so its quite a bit cheaper to just check that directly #if defined(FEATURE_SIMD) && (defined(TARGET_XARCH) || defined(TARGET_ARM64)) assert((TypeGet(vt) == TYP_MASK) || (varTypeRegister[TypeGet(vt)] != VTR_MASK));