diff --git a/Net/Net_vs150.vcxproj b/Net/Net_vs150.vcxproj
index 65bf927cba9..dae8fe7d1ed 100644
--- a/Net/Net_vs150.vcxproj
+++ b/Net/Net_vs150.vcxproj
@@ -157,7 +157,7 @@
- <_ProjectFileVersion>16.0.32002.118
+ <_ProjectFileVersion>16.0.32602.291
PocoNetd
PocoNetmdd
PocoNetmtd
@@ -663,6 +663,7 @@
+
@@ -983,6 +984,9 @@
true
+
+ true
+
diff --git a/Net/Net_vs150.vcxproj.filters b/Net/Net_vs150.vcxproj.filters
index 684f4537bdb..9e13392d50d 100644
--- a/Net/Net_vs150.vcxproj.filters
+++ b/Net/Net_vs150.vcxproj.filters
@@ -2,166 +2,166 @@
- {8508f58c-3fb5-4d69-87a3-2c8482d06074}
+ {b8c63d64-5b02-433d-bef2-027cccbd58d8}
- {ed13593e-8ef5-4095-b19f-7fc285eab593}
+ {92d9b6d8-55d3-4d7d-a134-0f1ad80d0aba}
- {e5a47c76-1ca1-4851-bfa8-46c9382064df}
+ {9316ac69-9fb1-44e7-81ef-bb6cc8786c29}
- {314468ce-eec5-455d-acfa-4931c2933a6c}
+ {073a6f9f-77d0-4014-a35f-bc8f6fa77b3f}
- {5f1e8a1a-be59-400a-bcb3-789083996e51}
+ {de56c23c-6fc8-4a27-8f52-e3c882cc893e}
- {0ede78f6-a2d1-49b2-a863-50a53c83ec9f}
+ {b7a6e20b-0d70-4ad5-9d3c-667985ce6365}
- {fad03a2a-6d88-44d0-ba95-f2f7548e0999}
+ {6e027a83-7a5e-405b-95ea-b1a961affdfc}
- {c2268dc6-1ade-45d0-81da-904b91c8a6ec}
+ {b6c57ff9-ba08-40b0-8439-439ad34b4c99}
- {c6cdf518-af9a-4389-8897-22d2dd92dc78}
+ {b20c4351-21ad-44f2-a7a2-56df5db2ec20}
- {d9117b4a-9a8e-47e5-b07f-1e84102bfb9d}
+ {81597908-7722-466f-a15c-91375b842121}
- {9063d481-d1da-4532-8fcd-16fa74da9787}
+ {387094b4-54e3-47ef-97b7-1a8eb4820748}
- {366ffb86-c38c-43ac-8aea-5604ccc339a2}
+ {19deb356-b16f-4150-988f-87fcf7dcc4e7}
- {7a3f9cdb-959b-4669-8eaa-43281dfb3d6d}
+ {3908d4a8-ea46-451e-8331-c25835d68dcc}
- {1f1df559-bcd9-4637-a647-980fabd6d333}
+ {c5b695fc-0128-4175-8092-af37c6445e46}
- {6f05d453-cc20-4492-b48d-f1a1a479d8f5}
+ {193d479d-63e2-4108-b289-aef66c9bd341}
- {ea5f87b3-9366-40a7-aad6-ac0cee734746}
+ {1c89c733-c23e-4d5d-b28b-b5bca77fe5a3}
- {6c31788f-799c-4186-ae5e-fb5efee9ec7b}
+ {750bd091-a4bb-40e0-b799-60d986771eb6}
- {49f79eca-489a-4e41-9cb4-5cb3efa345ce}
+ {f21884f2-7b88-410a-bda0-bb3906d14132}
- {9b6ecadc-87b6-4e76-b302-5cd1e9339e53}
+ {8725aa4a-c95b-4170-8694-bbe43fa449cc}
- {71d351ab-d39b-402f-bfff-34f98d6e65f3}
+ {e0a96c6f-365b-4f3b-9831-c2f3736410dc}
- {1d2f2aba-f9c7-4edc-b302-a3143ee6a63f}
+ {c1258b82-48bb-413f-a640-6a5f3b8009b6}
- {8363348d-3499-445f-93f2-acb1d34db1ee}
+ {34a48a2c-e7e4-4907-ab72-e6ad39204201}
- {44711a22-ed48-4d62-b6c3-203b4312dc6a}
+ {847d9163-1d13-4a8a-9ab7-b877f7d03f32}
- {c1d81fd5-48dd-42f9-8f95-fe1300a5f9b6}
+ {9d1c2935-657a-4038-9982-348ceb1728a0}
- {ab0016f0-7810-44bf-a62d-721ee58c9a42}
+ {7ef89cb9-b2a4-4b45-bd66-9ee4b741dde0}
- {12a8ade4-619a-4d42-8b38-84d60997b8cf}
+ {24388354-6762-4736-8a60-66e72fa9d28d}
- {2a6a25f3-90c1-4ca9-b46a-d1e4bdffa6e0}
+ {66f6abab-86ea-4551-acd7-2f35ba187396}
- {6d2e622f-8236-4443-a280-4425af2dc018}
+ {e4e70d92-bf3b-4d0e-a8d6-3129c42bcfc3}
- {1b7044c7-9e8f-47c6-8d63-cd426aea4224}
+ {77b7f740-8919-4d45-823e-b31a76c0213a}
- {df87c970-043d-4654-9719-478910cc1dd6}
+ {db13d11d-2e27-42ae-b571-1a315582d581}
- {ee898654-6a57-4836-84d3-020fe8691181}
+ {a5dde971-c63a-4b13-a8b9-6177cd747956}
- {4a538045-ae84-4017-983b-b6b037434619}
+ {838c31dc-3903-4bce-a15d-6bb23f8d7069}
- {7ff1433e-13fa-4335-975a-7b3a1c51bfd7}
+ {7bcf2803-51fa-44f7-a584-853a12046a59}
- {12bf45e4-01e8-47ac-a2fd-85801e0bd515}
+ {74eaa622-ad33-4b56-bc28-a86b178e65c9}
- {a0a92bd4-b801-49d2-8743-f36f6bc971cb}
+ {c32f0bf7-5572-413b-b010-2c5e240ccb61}
- {53fd855e-d66d-46a8-8790-0dd5f6d7b32b}
+ {daafe222-ddeb-4595-a51b-eede70483ae0}
- {f096a4aa-4e73-402d-869b-f66562678d66}
+ {e8684710-074f-4497-a57b-6f5423287575}
- {ee7e3c59-2630-452d-ad3e-52f28a738e40}
+ {6416e74a-35f1-49e9-8c2f-946f5e61b3e4}
- {125f3f04-e591-4f38-9f73-250362647b50}
+ {7644a59a-4df5-4943-a112-07c9741884b2}
- {1382c0c6-f61a-48b3-87df-83e495bb0e58}
+ {524820f5-3aae-49b4-a777-ce253665aed0}
- {69c9f1c1-ab24-48fd-a560-3b1cbadf0a72}
+ {753d9aff-c451-4781-b334-a19caae19dc2}
- {c9a36242-f6cd-4138-812c-6ad76264bbe7}
+ {197b3bf9-1b52-4412-8522-672158419798}
- {07a93113-2a00-4807-a3e2-47c86506a9b6}
+ {392872f0-bcbd-4632-bbc1-ec1911a22287}
- {69b7dc5d-1585-4641-9065-f0f03f42a3d8}
+ {53e59c70-7d9a-48bf-a1bb-abbc34863678}
- {f321a8e4-e677-466c-abec-f6939f9c80df}
+ {cb8c9840-7cb2-40cc-bf52-f0c13607e682}
- {f7d6ecfd-b045-438e-b715-b7355fb4cf5b}
+ {7cee0380-df5f-402e-92d9-06bf6c19b5fd}
- {3238c255-6ee3-486a-8e3d-9bcb8f22b3a0}
+ {879eb00b-446c-4197-b206-4b69e9df956e}
- {f8668051-3cfe-424e-b417-82a88370c8c2}
+ {b2809bf3-ec33-4d71-8364-6946edc60179}
- {ac68c9fa-0fa8-41e1-a993-72067654866b}
+ {86f6f016-042f-4f9b-988b-42a060d39780}
- {98fa619f-f7f8-45d8-9d6b-e50b111b77c4}
+ {07cb0252-f4c0-4124-a592-f75e6a5f0772}
- {d648920e-76fc-4d3d-8337-64a95f751ab3}
+ {4843933d-0a02-441b-a35f-caecb581dad9}
- {1241adb6-6ae2-454b-82aa-46f8f460b141}
+ {2a30768e-effb-48cb-ad87-e58f84452d00}
- {74814059-de13-47bc-9519-80f59cdb3325}
+ {68c86030-86bd-4b54-ad81-f690c1c522a0}
- {7886d8d8-6c25-4523-861a-b0f9ab85b5aa}
+ {972ff0f7-abf3-4137-88b4-ce7d3ff774e8}
@@ -237,6 +237,9 @@
Sockets\Header Files
+
+ Sockets\Header Files
+
Messages\Header Files
@@ -587,6 +590,9 @@
Sockets\Source Files
+
+ Sockets\Source Files
+
Messages\Source Files
diff --git a/Net/Net_vs160.vcxproj b/Net/Net_vs160.vcxproj
index 51092d0f619..c2dff6f6a72 100644
--- a/Net/Net_vs160.vcxproj
+++ b/Net/Net_vs160.vcxproj
@@ -157,7 +157,7 @@
- <_ProjectFileVersion>16.0.32002.118
+ <_ProjectFileVersion>16.0.32602.291
PocoNetd
PocoNetmdd
PocoNetmtd
@@ -663,6 +663,7 @@
+
@@ -983,6 +984,9 @@
true
+
+ true
+
diff --git a/Net/Net_vs160.vcxproj.filters b/Net/Net_vs160.vcxproj.filters
index d02b1585177..9a45287d157 100644
--- a/Net/Net_vs160.vcxproj.filters
+++ b/Net/Net_vs160.vcxproj.filters
@@ -2,166 +2,166 @@
- {402148df-1810-4d87-a41d-c73539eeaf77}
+ {9a2c8a82-eb0e-4d8c-b33b-fe489515e400}
- {d7b25211-c15e-4f0f-b59c-093d798f8a72}
+ {d81bee8f-10fd-4958-b1d2-284a9c6e491a}
- {1ad3017c-7bef-4f41-9cc1-d8b974a27bbe}
+ {58bedd56-0c49-4329-909a-b99e3ce82aca}
- {6e94bb39-e1da-49bf-aa66-bab24f593c8f}
+ {e4095762-3214-4809-b3f6-493d89941884}
- {8ed0098e-a47f-41ad-b4fe-9c89d8922c2d}
+ {a173e87d-5573-455f-8a8c-74fc78823372}
- {729ab1f0-ab39-47c9-8766-7df6d56edaaf}
+ {bd44ff9f-79b9-44c4-a91e-f479d7237418}
- {ecdec0fc-16e3-495f-95d0-2388c34cedcb}
+ {73c6b732-c774-45be-b36a-3f0b24790461}
- {a8abfb8b-636f-4f99-a92a-053ea7cc691d}
+ {fa1379d4-455d-4a8c-b018-8e0550f606cf}
- {4043898e-b26c-4315-a624-025217a1e251}
+ {0fa10301-bbc1-4b8d-bf7c-b3bc33697d8b}
- {61cb93b4-2823-450f-81a1-a6055c99ba6c}
+ {9ab3bdb0-d44b-4337-9f33-981df8b7ebfc}
- {010de15b-3ee4-43f2-8bf2-028f0ace569f}
+ {9691abd4-e708-45e8-b474-ea967318b49f}
- {5a0daeb7-4f18-4bc3-8717-9814f8928d90}
+ {74582044-e142-42d6-8ff2-19a138577086}
- {288eb163-621d-426e-9142-8bd76969efa7}
+ {10318be3-a741-4443-816d-1d5374f7e3bf}
- {8377bf46-27f1-4a7b-8235-aadd080774c4}
+ {40eb84df-752d-439d-bc10-59ec9692fa3f}
- {b7d5c4d5-74f1-4ad9-aaf0-4302042766d7}
+ {d87aaf33-f7eb-4684-99c7-764a09a6ca5b}
- {7cc4c6b7-5321-4b59-b118-f258571eda13}
+ {a3cc9f7a-b98a-4c99-b349-7688db5c557e}
- {c5fd5cf8-846e-4a7d-af3d-ed1d7fadb25f}
+ {54609c9a-4d21-4b2b-b4f4-63269cbe1579}
- {b00197e9-ef09-4fd4-9695-7cb89bd0cadc}
+ {3ded7039-2cd9-4369-b08c-d4afdae2d7ce}
- {1d947826-d4aa-418b-b982-c731e1878b43}
+ {3428a473-84d5-4cdd-aa83-8a714655f4b4}
- {042479be-24c4-4977-9ce0-1c157e00f8c1}
+ {9d47c366-84e8-407a-b02a-bdf1c7aec77d}
- {e1fa2656-e63a-486e-a000-3e1173b0019b}
+ {90eb1556-ca29-4183-948f-a636168cb39b}
- {62682b9e-f622-45b1-861d-9d9da51ab87d}
+ {1fa4c202-2dc8-454a-ab13-35057bc7f4e7}
- {494ee506-0f56-4e44-93e3-9f21e7a77a38}
+ {89806bbd-bac6-46e9-a9df-bfdb5d7ea1ea}
- {66c9cfdb-2542-46be-8f23-512b37e83432}
+ {30bed2b9-f18d-41af-8305-9ea82e23de03}
- {38b8105f-36cb-4bc4-8884-5020e2a3c028}
+ {745f2738-08bc-41a5-97d8-a95d0f856cab}
- {6a64e643-e97e-4415-b3c5-be4a80405912}
+ {58687afd-23e7-4e95-a3b3-e959e8e3114e}
- {f64a10af-6e42-4645-8f98-3482fd7766f6}
+ {45229f00-7870-4788-ae11-7e8d0d77894b}
- {8059b691-1b7e-4a0b-b96e-e15c1d9517ac}
+ {efbbd6c7-781c-4b3d-a702-8553a0704492}
- {80feb7dc-c893-4799-a143-2f36648f1bab}
+ {98e38235-6e6d-415f-aaa5-e1a28476c884}
- {7a524320-d713-495c-86db-6672632c5aa6}
+ {39e76b38-81d6-4eda-a325-22861608692e}
- {99de76a7-fe6b-4e1d-8cf3-be76ca0b512d}
+ {898b6fe1-1b80-44e4-92c8-f84f12a34dec}
- {5bbb1bd8-d7a4-4d10-ab14-63cafef6f612}
+ {89a98188-7956-4f20-9ae7-b62204706cdb}
- {9b640510-2c67-457f-9f00-ec9654e6af3a}
+ {b2e08e75-2e3f-4601-b736-e5c513434bbe}
- {c2245dba-8bf4-4ffc-83c9-913db49fa34d}
+ {d740cc8f-d9ea-43fe-a691-c8643a3e31e1}
- {2a4a8750-284b-40eb-9c11-6aa13b0864c8}
+ {09bc7880-eb27-49c0-83d0-5f66af27990f}
- {93327379-9b7e-43d2-92b0-ceab04c041e1}
+ {b44b965f-79ca-4fac-9bc5-61156b14bb1e}
- {d2058c83-1230-481c-9cc7-239c24e3fcee}
+ {cb11e2d8-4b26-4d4e-8150-dd9e305ca351}
- {3ac5bf39-2024-4835-b904-4f0055a8b099}
+ {c863e4e4-ae9a-4808-a13e-fd5949b873cd}
- {d854d7dc-6a74-49e0-8fe0-b1788ee510d2}
+ {f56c75b7-c685-4781-92d4-93af04a95691}
- {2dae2b61-9fb8-48d7-ac36-54848e6dc3b5}
+ {358096bf-3545-4fb5-992f-b82983f71277}
- {7c9114dd-b8f6-438d-b165-fb01b7f0ca95}
+ {bf6385d0-e71f-4960-aeeb-1a43bb49b46e}
- {f05fd66a-5747-41f6-9ec9-6ae7e5850eff}
+ {367bf4e1-7cb3-4994-90c7-fc665e6a6a4b}
- {5fad2fc1-b98b-4b4c-ac12-a9bea7001e77}
+ {e67593cf-b2b1-498a-aa31-e6be0fb44c55}
- {fa33e5eb-ee72-494a-89f2-7ef26eac63a1}
+ {2152c458-b313-41f8-a969-69bf6606816a}
- {472030c3-9bb7-4fd6-b65b-9e2ef683d58a}
+ {5aa91948-1749-4de9-92b8-243a32290ef3}
- {bdd08486-fbfb-48fc-ba3a-7dc44df5ce3b}
+ {cc8371c6-5e1a-4934-a932-d73adf69a167}
- {09297207-28d3-40b2-9e20-6fd9c7d84c34}
+ {d7906815-ab17-49ca-85ae-682c7fdff3ea}
- {bfdbeb4e-fb50-4c94-a579-3095e55af174}
+ {391215de-5821-4154-9cea-85fbe91270b5}
- {0a00644c-8a92-475c-aa79-b02581f63bd8}
+ {eb70aa6b-2fac-46cc-99d4-673b50a4741f}
- {c181f9f4-5725-4d15-8dac-059ec06fe660}
+ {736d8534-16a7-4677-99e9-c330e6e952d6}
- {b051aceb-84c2-4385-a22a-cd77702efa52}
+ {40ae99ac-68e5-4558-936a-0439691eaa36}
- {d4e799db-92df-48b1-a820-ea264e118e57}
+ {6f05ce95-be6d-4c15-a33c-a2b9ab7f5aa0}
- {3ef21170-bef8-4a68-9422-da4c7bdcec3f}
+ {67b8971e-ac91-4b1e-98fe-27578f06ab42}
- {eabff0fa-1618-4cf0-a0a2-f2c580b9851e}
+ {abc18680-0a87-40c6-8d41-f07ee36f44cd}
@@ -237,6 +237,9 @@
Sockets\Header Files
+
+ Sockets\Header Files
+
Messages\Header Files
@@ -587,6 +590,9 @@
Sockets\Source Files
+
+ Sockets\Source Files
+
Messages\Source Files
diff --git a/Net/Net_vs170.vcxproj b/Net/Net_vs170.vcxproj
index 8999e123b41..0db667cf892 100644
--- a/Net/Net_vs170.vcxproj
+++ b/Net/Net_vs170.vcxproj
@@ -1,10 +1,6 @@
-
+
-
- debug_shared
- ARM64
-
debug_shared
Win32
@@ -13,10 +9,6 @@
debug_shared
x64
-
- debug_static_md
- ARM64
-
debug_static_md
Win32
@@ -25,10 +17,6 @@
debug_static_md
x64
-
- debug_static_mt
- ARM64
-
debug_static_mt
Win32
@@ -37,10 +25,6 @@
debug_static_mt
x64
-
- release_shared
- ARM64
-
release_shared
Win32
@@ -49,10 +33,6 @@
release_shared
x64
-
- release_static_md
- ARM64
-
release_static_md
Win32
@@ -61,10 +41,6 @@
release_static_md
x64
-
- release_static_mt
- ARM64
-
release_static_mt
Win32
@@ -75,7 +51,6 @@
- 17.0
Net
{B057A1FE-09F7-465E-B8B5-E1B659051D76}
Net
@@ -112,36 +87,6 @@
MultiByte
v143
-
- StaticLibrary
- MultiByte
- v143
-
-
- StaticLibrary
- MultiByte
- v143
-
-
- StaticLibrary
- MultiByte
- v143
-
-
- StaticLibrary
- MultiByte
- v143
-
-
- DynamicLibrary
- MultiByte
- v143
-
-
- DynamicLibrary
- MultiByte
- v143
-
StaticLibrary
MultiByte
@@ -192,24 +137,6 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -230,7 +157,7 @@
- <_ProjectFileVersion>17.0.32505.173
+ <_ProjectFileVersion>16.0.32602.291
PocoNetA64d
PocoNetmdd
PocoNetmtd
@@ -250,32 +177,6 @@
PocoNetmd
PocoNetmt
-
- ..\binA64\
- objA64\Net\$(Configuration)\
- true
-
-
- ..\binA64\
- objA64\Net\$(Configuration)\
- false
-
-
- ..\libA64\
- objA64\Net\$(Configuration)\
-
-
- ..\libA64\
- objA64\Net\$(Configuration)\
-
-
- ..\libA64\
- objA64\Net\$(Configuration)\
-
-
- ..\libA64\
- objA64\Net\$(Configuration)\
-
..\bin\
obj\Net\$(Configuration)\
@@ -328,166 +229,6 @@
..\lib64\
obj64\Net\$(Configuration)\
-
-
- Disabled
- .\include;..\Foundation\include;%(AdditionalIncludeDirectories)
- WIN32;_DEBUG;_WINDOWS;_USRDLL;Net_EXPORTS;%(PreprocessorDefinitions)
- true
- EnableFastChecks
- MultiThreadedDebugDLL
- true
- true
- true
- true
-
- Level3
- ProgramDatabase
- Default
- true
-
-
- ws2_32.lib;iphlpapi.lib;%(AdditionalDependencies)
- ..\binA64\PocoNetA64d.dll
- true
- true
- ..\binA64\PocoNetA64d.pdb
- ..\libA64;%(AdditionalLibraryDirectories)
- Console
- ..\libA64\PocoNetd.lib
- MachineARM64
-
-
-
-
- MaxSpeed
- OnlyExplicitInline
- true
- Speed
- true
- .\include;..\Foundation\include;%(AdditionalIncludeDirectories)
- WIN32;NDEBUG;_WINDOWS;_USRDLL;Net_EXPORTS;%(PreprocessorDefinitions)
- true
- MultiThreadedDLL
- false
- true
- true
- true
-
- Level3
-
- Default
- true
-
-
- ws2_32.lib;iphlpapi.lib;%(AdditionalDependencies)
- ..\binA64\PocoNetA64.dll
- true
- false
- ..\libA64;%(AdditionalLibraryDirectories)
- Console
- true
- true
- ..\libA64\PocoNet.lib
- MachineARM64
-
-
-
-
- Disabled
- .\include;..\Foundation\include;%(AdditionalIncludeDirectories)
- WIN32;_DEBUG;_WINDOWS;POCO_STATIC;%(PreprocessorDefinitions)
- true
- EnableFastChecks
- MultiThreadedDebug
- true
- true
- true
- true
-
- ..\libA64\PocoNetmtd.pdb
- Level3
- ProgramDatabase
- Default
- true
-
-
- ..\libA64\PocoNetmtd.lib
-
-
-
-
- MaxSpeed
- OnlyExplicitInline
- true
- Speed
- true
- .\include;..\Foundation\include;%(AdditionalIncludeDirectories)
- WIN32;NDEBUG;_WINDOWS;POCO_STATIC;%(PreprocessorDefinitions)
- true
- MultiThreaded
- false
- true
- true
- true
-
- Level3
-
- Default
- true
-
-
- ..\libA64\PocoNetmt.lib
-
-
-
-
- Disabled
- .\include;..\Foundation\include;%(AdditionalIncludeDirectories)
- WIN32;_DEBUG;_WINDOWS;POCO_STATIC;%(PreprocessorDefinitions)
- true
- EnableFastChecks
- MultiThreadedDebugDLL
- true
- true
- true
- true
-
- ..\libA64\PocoNetmdd.pdb
- Level3
- ProgramDatabase
- Default
- true
-
-
- ..\libA64\PocoNetmdd.lib
-
-
-
-
- MaxSpeed
- OnlyExplicitInline
- true
- Speed
- true
- .\include;..\Foundation\include;%(AdditionalIncludeDirectories)
- WIN32;NDEBUG;_WINDOWS;POCO_STATIC;%(PreprocessorDefinitions)
- true
- MultiThreadedDLL
- false
- true
- true
- true
-
- Level3
-
- Default
- true
-
-
- ..\libA64\PocoNetmd.lib
-
-
Disabled
@@ -928,6 +669,7 @@
+
@@ -1248,19 +990,18 @@
true
+
+ true
+
- true
true
true
- true
true
true
- true
true
true
- true
true
true
diff --git a/Net/Net_vs170.vcxproj.filters b/Net/Net_vs170.vcxproj.filters
index e8681c8e47a..f0e0559e270 100644
--- a/Net/Net_vs170.vcxproj.filters
+++ b/Net/Net_vs170.vcxproj.filters
@@ -2,166 +2,166 @@
- {911f56bb-049d-4d4d-a0d5-7f938744fc02}
+ {f58d9efa-d05a-48e1-8aec-a97010b7b822}
- {b3a5feb8-1e61-4684-93b2-e6e2729f8f59}
+ {84c114ac-91dc-49ca-9cfc-a9d048ef75ab}
- {acb399c5-f26f-47fa-ae8b-2570e98fc6b9}
+ {89a92b00-4da1-4576-99f9-e854801a8ad6}
- {0d3e56ff-4aae-41e7-a383-c640af227a92}
+ {ad0416ac-e596-4a02-9589-bc151cfe70ee}
- {a33c960c-5992-4770-9228-d803934a6f7e}
+ {5103d67b-7236-4d64-bb35-fb489002f48a}
- {035f2f4c-6b45-45b6-92ad-80609486fb63}
+ {2a884a8c-b06f-4d4e-a938-f76fe49e788a}
- {801e176c-77da-4180-91bf-fbe0ba5777b9}
+ {5faaf052-d668-4ae8-852d-ee09708052db}
- {8398e24c-7330-4b20-a610-52070c69f3b3}
+ {4e4f124d-7dce-4050-9f20-bb0ea5ebcbdd}
- {2d6e12d0-4024-4fe6-9b2f-c174efe3cea0}
+ {77144e7f-57ea-4ab8-9486-64f5a5c0ab19}
- {1b40f490-ba0b-4f0b-8def-9270a19b4aa6}
+ {f097d0e8-6248-491d-bc6f-bea8c4c906a5}
- {df2c3e2b-730a-4c92-b084-ba4d680bb196}
+ {1fa052d4-a3ed-4d0c-932d-53849af37a08}
- {7ca5f53a-9e26-4600-a44d-5d32483dec8b}
+ {2290bc80-eed1-463f-94be-e87576358377}
- {c8d8d59a-1fe7-4317-8f5f-c3b8195f0eac}
+ {50f96dcb-629d-4d8f-a6cf-39dfd7649815}
- {63058a84-fcdc-48f2-816a-1011c8c1b4fc}
+ {b06ed62c-bcae-43ab-b854-58d94d5cce53}
- {9df64c47-e030-42e2-b244-3d53f3e75fcd}
+ {ce98d46b-71b3-41ed-b515-902c904f8ab8}
- {e9b7c2f2-04a3-4bb6-bb80-e31d0fab6173}
+ {96c16ce2-a10b-4e3b-bae3-78518a3fd0cb}
- {07e92825-c445-4add-b93d-b6f019a103c8}
+ {2706244f-7364-41ad-959f-745a907d5c8b}
- {b886e732-7fce-4910-8681-e352fb440b7f}
+ {1f673592-d537-436c-907d-d1232baef575}
- {c33c3241-ea4c-4279-bf67-407b0e61c5a7}
+ {b69fd951-b10f-4a24-865f-480ffa0c2f45}
- {0190ed6f-cc86-4218-9d17-f5717b6cf6d9}
+ {5d338ea3-3eed-468d-873d-9962de7e4c45}
- {15a6d403-4a28-4c2a-a2e6-0d6e80deadfa}
+ {6173fb9b-1250-41ca-b73c-af348f64c423}
- {dd132ecb-6d2c-4b5c-b4d6-5fd8302348e1}
+ {b0df56b9-3b1f-4b9c-a018-f44c69153872}
- {8ba0d9dc-036b-46b5-a0ba-0ed450f82293}
+ {766486f2-83c1-44c2-a9d8-7ed09553fd8c}
- {d152a8b3-c193-4b2e-af7f-b24e0c8f2d0d}
+ {e615b27d-8887-40e4-b975-376e671eabe0}
- {5501c616-f154-4401-9a21-cdca08ee50c7}
+ {02d20e52-e22e-430a-866f-3d9a1e57cc44}
- {6da235cd-b41f-4ecb-a9d5-a338a7213736}
+ {90f7c4fc-9cca-4dc9-9c7a-31cf5e02f4cc}
- {3dfcda0f-7d4a-4f52-8efc-2307230d83f6}
+ {98047641-7f03-4202-829d-cf3517f84f75}
- {989410e5-21d1-42e7-bfa9-c44acaebc029}
+ {1942a16b-ca01-42b1-b737-9774ea8e96c9}
- {08bf7247-0028-44e5-918e-2ed119a8234b}
+ {45ca22aa-18a3-4637-b64c-d380c0e6e5ae}
- {9c5dc8c9-aabb-4cd8-8b74-5a17da821f02}
+ {951f2871-cb71-4cd5-bcbb-9086f69de7e9}
- {f938b867-060d-4737-9f49-25ccea5b957a}
+ {eeb06bca-a9d0-4aae-b881-b3ad100a2e73}
- {dd698354-21c3-4309-a998-e37acf21a217}
+ {5e86b963-bee6-4f51-9d2a-bc01b73e607e}
- {1c10f00f-d776-4da4-83e7-b41182e08a28}
+ {59b03923-3f21-4e31-b11a-367ffb161376}
- {fce5e7ec-6fd9-439e-8df8-6b908390c741}
+ {e14278dc-9177-4daa-bec1-6370b8a83182}
- {c181296a-fc76-4b0e-a31b-418fe46a1360}
+ {c6744fe4-f4de-4b73-a615-0c1a19d936a5}
- {5648a9a9-bc9b-42b5-9cdd-9c17e1437178}
+ {4f97b31b-0aba-4f43-9b9a-d431e4bcebd5}
- {be4fd9fd-85d4-4586-9e59-2b0622b04505}
+ {40833b28-954b-425b-8bf4-281230a36ab6}
- {54237d6d-6d2e-4f79-ab14-58e8dede50cc}
+ {6a481c7e-9e1c-42a9-a48d-4ec373f1be60}
- {ff1c12e6-cd02-4643-8c60-b7e651413022}
+ {4414a2a0-7855-4a5f-9a5b-dd6c585b5fef}
- {78b905a0-35ff-46f7-8062-13555ff1894f}
+ {99a73839-24fe-49b1-8b2b-edf2085090aa}
- {7d1ea7cf-6355-420b-99a7-addeeca4370c}
+ {0405b0a6-18d6-46fa-8544-3dc715cedb97}
- {b4c88a7d-7bea-4f64-a1bb-c3c9b8e24245}
+ {a2a6c15c-d109-4b01-bf53-3584a492022d}
- {02df74a1-bdc6-4076-b273-fc43fd4975da}
+ {142e7f8f-175a-4df2-8d9b-312b75926968}
- {56b6fa19-3510-4cdb-b80e-6a31d62a2aa6}
+ {4862a9d8-de90-4624-9c45-646eafea14c2}
- {e6cf0135-83ac-4cb5-98c0-11491870d830}
+ {257719f6-73c4-4472-9166-91a91db3e09b}
- {ecdb88cc-206a-407e-9f06-addbf0b0728d}
+ {b7c23e9f-bad8-48ef-b249-72caa5fedd3a}
- {b543122a-99ec-4dcb-b84f-db57f26fdeca}
+ {b9845d72-bef3-403f-af76-8dfc9fc28eaf}
- {fbced114-043b-4618-9cac-9257023a2ccb}
+ {14466342-01a9-4473-b024-b7dd9ffa9419}
- {3ffb60fe-8666-4130-ae45-84381cacb212}
+ {a17a7981-4d1a-4b79-9fff-9f1888e3cc85}
- {abed7288-dfb0-4381-994d-62683940ca49}
+ {710c1cae-bf5b-4976-8d22-6fb02efb0310}
- {049036d6-7e2b-451f-9f8b-b665375bb258}
+ {724ba623-a072-4b1a-9207-3edfb6a0af36}
- {d9e0f5f8-1057-49f5-ae66-4a9db6e73d67}
+ {1d32a542-189a-4ea4-b059-338d7151e3b4}
- {f9788d4b-3688-4a29-8856-b6246c49da88}
+ {fc30d3c3-ef0c-4a0f-9e84-0307900d3735}
- {169fb8fb-c41c-480e-a4a6-e13f271cc754}
+ {890432aa-24b6-4a75-ba70-fbe57c5ab36e}
@@ -237,6 +237,9 @@
Sockets\Header Files
+
+ Sockets\Header Files
+
Messages\Header Files
@@ -587,6 +590,9 @@
Sockets\Source Files
+
+ Sockets\Source Files
+
Messages\Source Files
diff --git a/Net/Net_vs90.vcproj b/Net/Net_vs90.vcproj
index f6b339ecc7a..0e874a3030d 100644
--- a/Net/Net_vs90.vcproj
+++ b/Net/Net_vs90.vcproj
@@ -643,6 +643,10 @@
RelativePath=".\include\Poco\Net\StreamSocketImpl.h"
>
+
+
+
+
= 0) ::close(_epollfd);
+ }
+
+ void add(const Socket& socket, int mode)
+ {
+ Poco::FastMutex::ScopedLock lock(_mutex);
+
+ SocketImpl* sockImpl = socket.impl();
+
+ int err = addImpl(static_cast(sockImpl->sockfd()), mode, sockImpl);
+
+ if (err)
+ {
+ if (errno == EEXIST) update(socket, mode);
+ else SocketImpl::error();
+ }
+
+ if (_socketMap.find(sockImpl) == _socketMap.end())
+ _socketMap[sockImpl] = socket;
+ }
+
+ void remove(const Socket& socket)
+ {
+ Poco::FastMutex::ScopedLock lock(_mutex);
+
+ poco_socket_t fd = socket.impl()->sockfd();
+ struct epoll_event ev;
+ ev.events = 0;
+ ev.data.ptr = 0;
+ int err = epoll_ctl(_epollfd, EPOLL_CTL_DEL, fd, &ev);
+ if (err) SocketImpl::error();
+
+ _socketMap.erase(socket.impl());
+ }
+
+ bool has(const Socket& socket) const
+ {
+ Poco::FastMutex::ScopedLock lock(_mutex);
+ SocketImpl* sockImpl = socket.impl();
+ return sockImpl &&
+ (_socketMap.find(sockImpl) != _socketMap.end());
+ }
+
+ bool empty() const
+ {
+ Poco::FastMutex::ScopedLock lock(_mutex);
+ return _socketMap.empty();
+ }
+
+ void update(const Socket& socket, int mode)
+ {
+ poco_socket_t fd = socket.impl()->sockfd();
+ struct epoll_event ev;
+ ev.events = 0;
+ if (mode & PollSet::POLL_READ)
+ ev.events |= EPOLLIN;
+ if (mode & PollSet::POLL_WRITE)
+ ev.events |= EPOLLOUT;
+ if (mode & PollSet::POLL_ERROR)
+ ev.events |= EPOLLERR;
+ ev.data.ptr = socket.impl();
+ int err = epoll_ctl(_epollfd, EPOLL_CTL_MOD, fd, &ev);
+ if (err)
+ {
+ SocketImpl::error();
+ }
+ }
+
+ void clear()
+ {
+ Poco::FastMutex::ScopedLock lock(_mutex);
+
+ ::close(_epollfd);
+ _socketMap.clear();
+ _epollfd = epoll_create(1);
+ if (_epollfd < 0)
+ {
+ SocketImpl::error();
+ }
+ }
+
+ PollSet::SocketModeMap poll(const Poco::Timespan& timeout)
+ {
+ PollSet::SocketModeMap result;
+ Poco::Timespan remainingTime(timeout);
+ int rc;
+ do
+ {
+ Poco::Timestamp start;
+ rc = epoll_wait(_epollfd, &_events[0],
+ static_cast(_events.size()),
+ static_cast(remainingTime.totalMilliseconds()));
+ if (rc == 0) return result;
+ if (rc < 0 && SocketImpl::lastError() == POCO_EINTR)
+ {
+ Poco::Timestamp end;
+ Poco::Timespan waited = end - start;
+ if (waited < remainingTime)
+ remainingTime -= waited;
+ else
+ remainingTime = 0;
+ }
+ } while (rc < 0 && SocketImpl::lastError() == POCO_EINTR);
+ if (rc < 0) SocketImpl::error();
+
+ Poco::FastMutex::ScopedLock lock(_mutex);
+
+ for (int i = 0; i < rc; i++)
+ {
+ if (_events[i].data.ptr) // skip eventfd
+ {
+ std::map::iterator it = _socketMap.find(_events[i].data.ptr);
+ if (it != _socketMap.end())
+ {
+ if (_events[i].events & EPOLLIN)
+ result[it->second] |= PollSet::POLL_READ;
+ if (_events[i].events & EPOLLOUT)
+ result[it->second] |= PollSet::POLL_WRITE;
+ if (_events[i].events & EPOLLERR)
+ result[it->second] |= PollSet::POLL_ERROR;
+ }
+ }
+ }
+
+ return result;
+ }
+
+ void wakeUp()
+ {
+ }
+
+ int count() const
+ {
+ Poco::FastMutex::ScopedLock lock(_mutex);
+ return static_cast(_socketMap.size());
+ }
+
+private:
+ int addImpl(int fd, int mode, void* ptr)
+ {
+ struct epoll_event ev;
+ ev.events = 0;
+ if (mode & PollSet::POLL_READ)
+ ev.events |= EPOLLIN;
+ if (mode & PollSet::POLL_WRITE)
+ ev.events |= EPOLLOUT;
+ if (mode & PollSet::POLL_ERROR)
+ ev.events |= EPOLLERR;
+ ev.data.ptr = ptr;
+ return epoll_ctl(_epollfd, EPOLL_CTL_ADD, fd, &ev);
+ }
+
+ mutable Poco::FastMutex _mutex;
+ HANDLE _epollfd;
+ std::map _socketMap;
+ std::vector _events;
+ int _eventfd;
+};
+
+#endif // POCO_OS_FAMILY_WINDOWS
#elif defined(POCO_HAVE_FD_POLL)
diff --git a/Net/src/wepoll.c b/Net/src/wepoll.c
new file mode 100644
index 00000000000..186d3f2d485
--- /dev/null
+++ b/Net/src/wepoll.c
@@ -0,0 +1,2253 @@
+/*
+ * wepoll - epoll for Windows
+ * https://github.com/piscisaureus/wepoll
+ *
+ * Copyright 2012-2020, Bert Belder
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WEPOLL_EXPORT
+#define WEPOLL_EXPORT
+#endif
+
+#include
+
+enum EPOLL_EVENTS {
+ EPOLLIN = (int) (1U << 0),
+ EPOLLPRI = (int) (1U << 1),
+ EPOLLOUT = (int) (1U << 2),
+ EPOLLERR = (int) (1U << 3),
+ EPOLLHUP = (int) (1U << 4),
+ EPOLLRDNORM = (int) (1U << 6),
+ EPOLLRDBAND = (int) (1U << 7),
+ EPOLLWRNORM = (int) (1U << 8),
+ EPOLLWRBAND = (int) (1U << 9),
+ EPOLLMSG = (int) (1U << 10), /* Never reported. */
+ EPOLLRDHUP = (int) (1U << 13),
+ EPOLLONESHOT = (int) (1U << 31)
+};
+
+#define EPOLLIN (1U << 0)
+#define EPOLLPRI (1U << 1)
+#define EPOLLOUT (1U << 2)
+#define EPOLLERR (1U << 3)
+#define EPOLLHUP (1U << 4)
+#define EPOLLRDNORM (1U << 6)
+#define EPOLLRDBAND (1U << 7)
+#define EPOLLWRNORM (1U << 8)
+#define EPOLLWRBAND (1U << 9)
+#define EPOLLMSG (1U << 10)
+#define EPOLLRDHUP (1U << 13)
+#define EPOLLONESHOT (1U << 31)
+
+#define EPOLL_CTL_ADD 1
+#define EPOLL_CTL_MOD 2
+#define EPOLL_CTL_DEL 3
+
+typedef void* HANDLE;
+typedef uintptr_t SOCKET;
+
+typedef union epoll_data {
+ void* ptr;
+ int fd;
+ uint32_t u32;
+ uint64_t u64;
+ SOCKET sock; /* Windows specific */
+ HANDLE hnd; /* Windows specific */
+} epoll_data_t;
+
+struct epoll_event {
+ uint32_t events; /* Epoll events and flags */
+ epoll_data_t data; /* User data variable */
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+WEPOLL_EXPORT HANDLE epoll_create(int size);
+WEPOLL_EXPORT HANDLE epoll_create1(int flags);
+
+WEPOLL_EXPORT int epoll_close(HANDLE ephnd);
+
+WEPOLL_EXPORT int epoll_ctl(HANDLE ephnd,
+ int op,
+ SOCKET sock,
+ struct epoll_event* event);
+
+WEPOLL_EXPORT int epoll_wait(HANDLE ephnd,
+ struct epoll_event* events,
+ int maxevents,
+ int timeout);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#include
+
+#include
+
+#define WEPOLL_INTERNAL static
+#define WEPOLL_INTERNAL_EXTERN static
+
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnonportable-system-include-path"
+#pragma clang diagnostic ignored "-Wreserved-id-macro"
+#elif defined(_MSC_VER)
+#pragma warning(push, 1)
+#endif
+
+#undef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x0600
+
+#include
+#include
+#include
+
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#elif defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+WEPOLL_INTERNAL int nt_global_init(void);
+
+typedef LONG NTSTATUS;
+typedef NTSTATUS* PNTSTATUS;
+
+#ifndef NT_SUCCESS
+#define NT_SUCCESS(status) (((NTSTATUS)(status)) >= 0)
+#endif
+
+#ifndef STATUS_SUCCESS
+#define STATUS_SUCCESS ((NTSTATUS) 0x00000000L)
+#endif
+
+#ifndef STATUS_PENDING
+#define STATUS_PENDING ((NTSTATUS) 0x00000103L)
+#endif
+
+#ifndef STATUS_CANCELLED
+#define STATUS_CANCELLED ((NTSTATUS) 0xC0000120L)
+#endif
+
+#ifndef STATUS_NOT_FOUND
+#define STATUS_NOT_FOUND ((NTSTATUS) 0xC0000225L)
+#endif
+
+typedef struct _IO_STATUS_BLOCK {
+ NTSTATUS Status;
+ ULONG_PTR Information;
+} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK;
+
+typedef VOID(NTAPI* PIO_APC_ROUTINE)(PVOID ApcContext,
+ PIO_STATUS_BLOCK IoStatusBlock,
+ ULONG Reserved);
+
+typedef struct _UNICODE_STRING {
+ USHORT Length;
+ USHORT MaximumLength;
+ PWSTR Buffer;
+} UNICODE_STRING, *PUNICODE_STRING;
+
+#define RTL_CONSTANT_STRING(s) \
+ { sizeof(s) - sizeof((s)[0]), sizeof(s), s }
+
+typedef struct _OBJECT_ATTRIBUTES {
+ ULONG Length;
+ HANDLE RootDirectory;
+ PUNICODE_STRING ObjectName;
+ ULONG Attributes;
+ PVOID SecurityDescriptor;
+ PVOID SecurityQualityOfService;
+} OBJECT_ATTRIBUTES, *POBJECT_ATTRIBUTES;
+
+#define RTL_CONSTANT_OBJECT_ATTRIBUTES(ObjectName, Attributes) \
+ { sizeof(OBJECT_ATTRIBUTES), NULL, ObjectName, Attributes, NULL, NULL }
+
+#ifndef FILE_OPEN
+#define FILE_OPEN 0x00000001UL
+#endif
+
+#define KEYEDEVENT_WAIT 0x00000001UL
+#define KEYEDEVENT_WAKE 0x00000002UL
+#define KEYEDEVENT_ALL_ACCESS \
+ (STANDARD_RIGHTS_REQUIRED | KEYEDEVENT_WAIT | KEYEDEVENT_WAKE)
+
+#define NT_NTDLL_IMPORT_LIST(X) \
+ X(NTSTATUS, \
+ NTAPI, \
+ NtCancelIoFileEx, \
+ (HANDLE FileHandle, \
+ PIO_STATUS_BLOCK IoRequestToCancel, \
+ PIO_STATUS_BLOCK IoStatusBlock)) \
+ \
+ X(NTSTATUS, \
+ NTAPI, \
+ NtCreateFile, \
+ (PHANDLE FileHandle, \
+ ACCESS_MASK DesiredAccess, \
+ POBJECT_ATTRIBUTES ObjectAttributes, \
+ PIO_STATUS_BLOCK IoStatusBlock, \
+ PLARGE_INTEGER AllocationSize, \
+ ULONG FileAttributes, \
+ ULONG ShareAccess, \
+ ULONG CreateDisposition, \
+ ULONG CreateOptions, \
+ PVOID EaBuffer, \
+ ULONG EaLength)) \
+ \
+ X(NTSTATUS, \
+ NTAPI, \
+ NtCreateKeyedEvent, \
+ (PHANDLE KeyedEventHandle, \
+ ACCESS_MASK DesiredAccess, \
+ POBJECT_ATTRIBUTES ObjectAttributes, \
+ ULONG Flags)) \
+ \
+ X(NTSTATUS, \
+ NTAPI, \
+ NtDeviceIoControlFile, \
+ (HANDLE FileHandle, \
+ HANDLE Event, \
+ PIO_APC_ROUTINE ApcRoutine, \
+ PVOID ApcContext, \
+ PIO_STATUS_BLOCK IoStatusBlock, \
+ ULONG IoControlCode, \
+ PVOID InputBuffer, \
+ ULONG InputBufferLength, \
+ PVOID OutputBuffer, \
+ ULONG OutputBufferLength)) \
+ \
+ X(NTSTATUS, \
+ NTAPI, \
+ NtReleaseKeyedEvent, \
+ (HANDLE KeyedEventHandle, \
+ PVOID KeyValue, \
+ BOOLEAN Alertable, \
+ PLARGE_INTEGER Timeout)) \
+ \
+ X(NTSTATUS, \
+ NTAPI, \
+ NtWaitForKeyedEvent, \
+ (HANDLE KeyedEventHandle, \
+ PVOID KeyValue, \
+ BOOLEAN Alertable, \
+ PLARGE_INTEGER Timeout)) \
+ \
+ X(ULONG, WINAPI, RtlNtStatusToDosError, (NTSTATUS Status))
+
+#define X(return_type, attributes, name, parameters) \
+ WEPOLL_INTERNAL_EXTERN return_type(attributes* name) parameters;
+NT_NTDLL_IMPORT_LIST(X)
+#undef X
+
+#define AFD_POLL_RECEIVE 0x0001
+#define AFD_POLL_RECEIVE_EXPEDITED 0x0002
+#define AFD_POLL_SEND 0x0004
+#define AFD_POLL_DISCONNECT 0x0008
+#define AFD_POLL_ABORT 0x0010
+#define AFD_POLL_LOCAL_CLOSE 0x0020
+#define AFD_POLL_ACCEPT 0x0080
+#define AFD_POLL_CONNECT_FAIL 0x0100
+
+typedef struct _AFD_POLL_HANDLE_INFO {
+ HANDLE Handle;
+ ULONG Events;
+ NTSTATUS Status;
+} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO;
+
+typedef struct _AFD_POLL_INFO {
+ LARGE_INTEGER Timeout;
+ ULONG NumberOfHandles;
+ ULONG Exclusive;
+ AFD_POLL_HANDLE_INFO Handles[1];
+} AFD_POLL_INFO, *PAFD_POLL_INFO;
+
+WEPOLL_INTERNAL int afd_create_device_handle(HANDLE iocp_handle,
+ HANDLE* afd_device_handle_out);
+
+WEPOLL_INTERNAL int afd_poll(HANDLE afd_device_handle,
+ AFD_POLL_INFO* poll_info,
+ IO_STATUS_BLOCK* io_status_block);
+WEPOLL_INTERNAL int afd_cancel_poll(HANDLE afd_device_handle,
+ IO_STATUS_BLOCK* io_status_block);
+
+#define return_map_error(value) \
+ do { \
+ err_map_win_error(); \
+ return (value); \
+ } while (0)
+
+#define return_set_error(value, error) \
+ do { \
+ err_set_win_error(error); \
+ return (value); \
+ } while (0)
+
+WEPOLL_INTERNAL void err_map_win_error(void);
+WEPOLL_INTERNAL void err_set_win_error(DWORD error);
+WEPOLL_INTERNAL int err_check_handle(HANDLE handle);
+
+#define IOCTL_AFD_POLL 0x00012024
+
+static UNICODE_STRING afd__device_name =
+ RTL_CONSTANT_STRING(L"\\Device\\Afd\\Wepoll");
+
+static OBJECT_ATTRIBUTES afd__device_attributes =
+ RTL_CONSTANT_OBJECT_ATTRIBUTES(&afd__device_name, 0);
+
+int afd_create_device_handle(HANDLE iocp_handle,
+ HANDLE* afd_device_handle_out) {
+ HANDLE afd_device_handle;
+ IO_STATUS_BLOCK iosb;
+ NTSTATUS status;
+
+ /* By opening \Device\Afd without specifying any extended attributes, we'll
+ * get a handle that lets us talk to the AFD driver, but that doesn't have an
+ * associated endpoint (so it's not a socket). */
+ status = NtCreateFile(&afd_device_handle,
+ SYNCHRONIZE,
+ &afd__device_attributes,
+ &iosb,
+ NULL,
+ 0,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ FILE_OPEN,
+ 0,
+ NULL,
+ 0);
+ if (status != STATUS_SUCCESS)
+ return_set_error(-1, RtlNtStatusToDosError(status));
+
+ if (CreateIoCompletionPort(afd_device_handle, iocp_handle, 0, 0) == NULL)
+ goto error;
+
+ if (!SetFileCompletionNotificationModes(afd_device_handle,
+ FILE_SKIP_SET_EVENT_ON_HANDLE))
+ goto error;
+
+ *afd_device_handle_out = afd_device_handle;
+ return 0;
+
+error:
+ CloseHandle(afd_device_handle);
+ return_map_error(-1);
+}
+
+int afd_poll(HANDLE afd_device_handle,
+ AFD_POLL_INFO* poll_info,
+ IO_STATUS_BLOCK* io_status_block) {
+ NTSTATUS status;
+
+ /* Blocking operation is not supported. */
+ assert(io_status_block != NULL);
+
+ io_status_block->Status = STATUS_PENDING;
+ status = NtDeviceIoControlFile(afd_device_handle,
+ NULL,
+ NULL,
+ io_status_block,
+ io_status_block,
+ IOCTL_AFD_POLL,
+ poll_info,
+ sizeof *poll_info,
+ poll_info,
+ sizeof *poll_info);
+
+ if (status == STATUS_SUCCESS)
+ return 0;
+ else if (status == STATUS_PENDING)
+ return_set_error(-1, ERROR_IO_PENDING);
+ else
+ return_set_error(-1, RtlNtStatusToDosError(status));
+}
+
+int afd_cancel_poll(HANDLE afd_device_handle,
+ IO_STATUS_BLOCK* io_status_block) {
+ NTSTATUS cancel_status;
+ IO_STATUS_BLOCK cancel_iosb;
+
+ /* If the poll operation has already completed or has been cancelled earlier,
+ * there's nothing left for us to do. */
+ if (io_status_block->Status != STATUS_PENDING)
+ return 0;
+
+ cancel_status =
+ NtCancelIoFileEx(afd_device_handle, io_status_block, &cancel_iosb);
+
+ /* NtCancelIoFileEx() may return STATUS_NOT_FOUND if the operation completed
+ * just before calling NtCancelIoFileEx(). This is not an error. */
+ if (cancel_status == STATUS_SUCCESS || cancel_status == STATUS_NOT_FOUND)
+ return 0;
+ else
+ return_set_error(-1, RtlNtStatusToDosError(cancel_status));
+}
+
+WEPOLL_INTERNAL int epoll_global_init(void);
+
+WEPOLL_INTERNAL int init(void);
+
+typedef struct port_state port_state_t;
+typedef struct queue queue_t;
+typedef struct sock_state sock_state_t;
+typedef struct ts_tree_node ts_tree_node_t;
+
+WEPOLL_INTERNAL port_state_t* port_new(HANDLE* iocp_handle_out);
+WEPOLL_INTERNAL int port_close(port_state_t* port_state);
+WEPOLL_INTERNAL int port_delete(port_state_t* port_state);
+
+WEPOLL_INTERNAL int port_wait(port_state_t* port_state,
+ struct epoll_event* events,
+ int maxevents,
+ int timeout);
+
+WEPOLL_INTERNAL int port_ctl(port_state_t* port_state,
+ int op,
+ SOCKET sock,
+ struct epoll_event* ev);
+
+WEPOLL_INTERNAL int port_register_socket(port_state_t* port_state,
+ sock_state_t* sock_state,
+ SOCKET socket);
+WEPOLL_INTERNAL void port_unregister_socket(port_state_t* port_state,
+ sock_state_t* sock_state);
+WEPOLL_INTERNAL sock_state_t* port_find_socket(port_state_t* port_state,
+ SOCKET socket);
+
+WEPOLL_INTERNAL void port_request_socket_update(port_state_t* port_state,
+ sock_state_t* sock_state);
+WEPOLL_INTERNAL void port_cancel_socket_update(port_state_t* port_state,
+ sock_state_t* sock_state);
+
+WEPOLL_INTERNAL void port_add_deleted_socket(port_state_t* port_state,
+ sock_state_t* sock_state);
+WEPOLL_INTERNAL void port_remove_deleted_socket(port_state_t* port_state,
+ sock_state_t* sock_state);
+
+WEPOLL_INTERNAL HANDLE port_get_iocp_handle(port_state_t* port_state);
+WEPOLL_INTERNAL queue_t* port_get_poll_group_queue(port_state_t* port_state);
+
+WEPOLL_INTERNAL port_state_t* port_state_from_handle_tree_node(
+ ts_tree_node_t* tree_node);
+WEPOLL_INTERNAL ts_tree_node_t* port_state_to_handle_tree_node(
+ port_state_t* port_state);
+
+/* The reflock is a special kind of lock that normally prevents a chunk of
+ * memory from being freed, but does allow the chunk of memory to eventually be
+ * released in a coordinated fashion.
+ *
+ * Under normal operation, threads increase and decrease the reference count,
+ * which are wait-free operations.
+ *
+ * Exactly once during the reflock's lifecycle, a thread holding a reference to
+ * the lock may "destroy" the lock; this operation blocks until all other
+ * threads holding a reference to the lock have dereferenced it. After
+ * "destroy" returns, the calling thread may assume that no other threads have
+ * a reference to the lock.
+ *
+ * Attemmpting to lock or destroy a lock after reflock_unref_and_destroy() has
+ * been called is invalid and results in undefined behavior. Therefore the user
+ * should use another lock to guarantee that this can't happen.
+ */
+
+typedef struct reflock {
+ volatile long state; /* 32-bit Interlocked APIs operate on `long` values. */
+} reflock_t;
+
+WEPOLL_INTERNAL int reflock_global_init(void);
+
+WEPOLL_INTERNAL void reflock_init(reflock_t* reflock);
+WEPOLL_INTERNAL void reflock_ref(reflock_t* reflock);
+WEPOLL_INTERNAL void reflock_unref(reflock_t* reflock);
+WEPOLL_INTERNAL void reflock_unref_and_destroy(reflock_t* reflock);
+
+#include
+
+/* N.b.: the tree functions do not set errno or LastError when they fail. Each
+ * of the API functions has at most one failure mode. It is up to the caller to
+ * set an appropriate error code when necessary. */
+
+typedef struct tree tree_t;
+typedef struct tree_node tree_node_t;
+
+typedef struct tree {
+ tree_node_t* root;
+} tree_t;
+
+typedef struct tree_node {
+ tree_node_t* left;
+ tree_node_t* right;
+ tree_node_t* parent;
+ uintptr_t key;
+ bool red;
+} tree_node_t;
+
+WEPOLL_INTERNAL void tree_init(tree_t* tree);
+WEPOLL_INTERNAL void tree_node_init(tree_node_t* node);
+
+WEPOLL_INTERNAL int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key);
+WEPOLL_INTERNAL void tree_del(tree_t* tree, tree_node_t* node);
+
+WEPOLL_INTERNAL tree_node_t* tree_find(const tree_t* tree, uintptr_t key);
+WEPOLL_INTERNAL tree_node_t* tree_root(const tree_t* tree);
+
+typedef struct ts_tree {
+ tree_t tree;
+ SRWLOCK lock;
+} ts_tree_t;
+
+typedef struct ts_tree_node {
+ tree_node_t tree_node;
+ reflock_t reflock;
+} ts_tree_node_t;
+
+WEPOLL_INTERNAL void ts_tree_init(ts_tree_t* rtl);
+WEPOLL_INTERNAL void ts_tree_node_init(ts_tree_node_t* node);
+
+WEPOLL_INTERNAL int ts_tree_add(ts_tree_t* ts_tree,
+ ts_tree_node_t* node,
+ uintptr_t key);
+
+WEPOLL_INTERNAL ts_tree_node_t* ts_tree_del_and_ref(ts_tree_t* ts_tree,
+ uintptr_t key);
+WEPOLL_INTERNAL ts_tree_node_t* ts_tree_find_and_ref(ts_tree_t* ts_tree,
+ uintptr_t key);
+
+WEPOLL_INTERNAL void ts_tree_node_unref(ts_tree_node_t* node);
+WEPOLL_INTERNAL void ts_tree_node_unref_and_destroy(ts_tree_node_t* node);
+
+static ts_tree_t epoll__handle_tree;
+
+int epoll_global_init(void) {
+ ts_tree_init(&epoll__handle_tree);
+ return 0;
+}
+
+static HANDLE epoll__create(void) {
+ port_state_t* port_state;
+ HANDLE ephnd;
+ ts_tree_node_t* tree_node;
+
+ if (init() < 0)
+ return NULL;
+
+ port_state = port_new(&ephnd);
+ if (port_state == NULL)
+ return NULL;
+
+ tree_node = port_state_to_handle_tree_node(port_state);
+ if (ts_tree_add(&epoll__handle_tree, tree_node, (uintptr_t) ephnd) < 0) {
+ /* This should never happen. */
+ port_delete(port_state);
+ return_set_error(NULL, ERROR_ALREADY_EXISTS);
+ }
+
+ return ephnd;
+}
+
+HANDLE epoll_create(int size) {
+ if (size <= 0)
+ return_set_error(NULL, ERROR_INVALID_PARAMETER);
+
+ return epoll__create();
+}
+
+HANDLE epoll_create1(int flags) {
+ if (flags != 0)
+ return_set_error(NULL, ERROR_INVALID_PARAMETER);
+
+ return epoll__create();
+}
+
+int epoll_close(HANDLE ephnd) {
+ ts_tree_node_t* tree_node;
+ port_state_t* port_state;
+
+ if (init() < 0)
+ return -1;
+
+ tree_node = ts_tree_del_and_ref(&epoll__handle_tree, (uintptr_t) ephnd);
+ if (tree_node == NULL) {
+ err_set_win_error(ERROR_INVALID_PARAMETER);
+ goto err;
+ }
+
+ port_state = port_state_from_handle_tree_node(tree_node);
+ port_close(port_state);
+
+ ts_tree_node_unref_and_destroy(tree_node);
+
+ return port_delete(port_state);
+
+err:
+ err_check_handle(ephnd);
+ return -1;
+}
+
+int epoll_ctl(HANDLE ephnd, int op, SOCKET sock, struct epoll_event* ev) {
+ ts_tree_node_t* tree_node;
+ port_state_t* port_state;
+ int r;
+
+ if (init() < 0)
+ return -1;
+
+ tree_node = ts_tree_find_and_ref(&epoll__handle_tree, (uintptr_t) ephnd);
+ if (tree_node == NULL) {
+ err_set_win_error(ERROR_INVALID_PARAMETER);
+ goto err;
+ }
+
+ port_state = port_state_from_handle_tree_node(tree_node);
+ r = port_ctl(port_state, op, sock, ev);
+
+ ts_tree_node_unref(tree_node);
+
+ if (r < 0)
+ goto err;
+
+ return 0;
+
+err:
+ /* On Linux, in the case of epoll_ctl(), EBADF takes priority over other
+ * errors. Wepoll mimics this behavior. */
+ err_check_handle(ephnd);
+ err_check_handle((HANDLE) sock);
+ return -1;
+}
+
+int epoll_wait(HANDLE ephnd,
+ struct epoll_event* events,
+ int maxevents,
+ int timeout) {
+ ts_tree_node_t* tree_node;
+ port_state_t* port_state;
+ int num_events;
+
+ if (maxevents <= 0)
+ return_set_error(-1, ERROR_INVALID_PARAMETER);
+
+ if (init() < 0)
+ return -1;
+
+ tree_node = ts_tree_find_and_ref(&epoll__handle_tree, (uintptr_t) ephnd);
+ if (tree_node == NULL) {
+ err_set_win_error(ERROR_INVALID_PARAMETER);
+ goto err;
+ }
+
+ port_state = port_state_from_handle_tree_node(tree_node);
+ num_events = port_wait(port_state, events, maxevents, timeout);
+
+ ts_tree_node_unref(tree_node);
+
+ if (num_events < 0)
+ goto err;
+
+ return num_events;
+
+err:
+ err_check_handle(ephnd);
+ return -1;
+}
+
+#include
+
+#define ERR__ERRNO_MAPPINGS(X) \
+ X(ERROR_ACCESS_DENIED, EACCES) \
+ X(ERROR_ALREADY_EXISTS, EEXIST) \
+ X(ERROR_BAD_COMMAND, EACCES) \
+ X(ERROR_BAD_EXE_FORMAT, ENOEXEC) \
+ X(ERROR_BAD_LENGTH, EACCES) \
+ X(ERROR_BAD_NETPATH, ENOENT) \
+ X(ERROR_BAD_NET_NAME, ENOENT) \
+ X(ERROR_BAD_NET_RESP, ENETDOWN) \
+ X(ERROR_BAD_PATHNAME, ENOENT) \
+ X(ERROR_BROKEN_PIPE, EPIPE) \
+ X(ERROR_CANNOT_MAKE, EACCES) \
+ X(ERROR_COMMITMENT_LIMIT, ENOMEM) \
+ X(ERROR_CONNECTION_ABORTED, ECONNABORTED) \
+ X(ERROR_CONNECTION_ACTIVE, EISCONN) \
+ X(ERROR_CONNECTION_REFUSED, ECONNREFUSED) \
+ X(ERROR_CRC, EACCES) \
+ X(ERROR_DIR_NOT_EMPTY, ENOTEMPTY) \
+ X(ERROR_DISK_FULL, ENOSPC) \
+ X(ERROR_DUP_NAME, EADDRINUSE) \
+ X(ERROR_FILENAME_EXCED_RANGE, ENOENT) \
+ X(ERROR_FILE_NOT_FOUND, ENOENT) \
+ X(ERROR_GEN_FAILURE, EACCES) \
+ X(ERROR_GRACEFUL_DISCONNECT, EPIPE) \
+ X(ERROR_HOST_DOWN, EHOSTUNREACH) \
+ X(ERROR_HOST_UNREACHABLE, EHOSTUNREACH) \
+ X(ERROR_INSUFFICIENT_BUFFER, EFAULT) \
+ X(ERROR_INVALID_ADDRESS, EADDRNOTAVAIL) \
+ X(ERROR_INVALID_FUNCTION, EINVAL) \
+ X(ERROR_INVALID_HANDLE, EBADF) \
+ X(ERROR_INVALID_NETNAME, EADDRNOTAVAIL) \
+ X(ERROR_INVALID_PARAMETER, EINVAL) \
+ X(ERROR_INVALID_USER_BUFFER, EMSGSIZE) \
+ X(ERROR_IO_PENDING, EINPROGRESS) \
+ X(ERROR_LOCK_VIOLATION, EACCES) \
+ X(ERROR_MORE_DATA, EMSGSIZE) \
+ X(ERROR_NETNAME_DELETED, ECONNABORTED) \
+ X(ERROR_NETWORK_ACCESS_DENIED, EACCES) \
+ X(ERROR_NETWORK_BUSY, ENETDOWN) \
+ X(ERROR_NETWORK_UNREACHABLE, ENETUNREACH) \
+ X(ERROR_NOACCESS, EFAULT) \
+ X(ERROR_NONPAGED_SYSTEM_RESOURCES, ENOMEM) \
+ X(ERROR_NOT_ENOUGH_MEMORY, ENOMEM) \
+ X(ERROR_NOT_ENOUGH_QUOTA, ENOMEM) \
+ X(ERROR_NOT_FOUND, ENOENT) \
+ X(ERROR_NOT_LOCKED, EACCES) \
+ X(ERROR_NOT_READY, EACCES) \
+ X(ERROR_NOT_SAME_DEVICE, EXDEV) \
+ X(ERROR_NOT_SUPPORTED, ENOTSUP) \
+ X(ERROR_NO_MORE_FILES, ENOENT) \
+ X(ERROR_NO_SYSTEM_RESOURCES, ENOMEM) \
+ X(ERROR_OPERATION_ABORTED, EINTR) \
+ X(ERROR_OUT_OF_PAPER, EACCES) \
+ X(ERROR_PAGED_SYSTEM_RESOURCES, ENOMEM) \
+ X(ERROR_PAGEFILE_QUOTA, ENOMEM) \
+ X(ERROR_PATH_NOT_FOUND, ENOENT) \
+ X(ERROR_PIPE_NOT_CONNECTED, EPIPE) \
+ X(ERROR_PORT_UNREACHABLE, ECONNRESET) \
+ X(ERROR_PROTOCOL_UNREACHABLE, ENETUNREACH) \
+ X(ERROR_REM_NOT_LIST, ECONNREFUSED) \
+ X(ERROR_REQUEST_ABORTED, EINTR) \
+ X(ERROR_REQ_NOT_ACCEP, EWOULDBLOCK) \
+ X(ERROR_SECTOR_NOT_FOUND, EACCES) \
+ X(ERROR_SEM_TIMEOUT, ETIMEDOUT) \
+ X(ERROR_SHARING_VIOLATION, EACCES) \
+ X(ERROR_TOO_MANY_NAMES, ENOMEM) \
+ X(ERROR_TOO_MANY_OPEN_FILES, EMFILE) \
+ X(ERROR_UNEXP_NET_ERR, ECONNABORTED) \
+ X(ERROR_WAIT_NO_CHILDREN, ECHILD) \
+ X(ERROR_WORKING_SET_QUOTA, ENOMEM) \
+ X(ERROR_WRITE_PROTECT, EACCES) \
+ X(ERROR_WRONG_DISK, EACCES) \
+ X(WSAEACCES, EACCES) \
+ X(WSAEADDRINUSE, EADDRINUSE) \
+ X(WSAEADDRNOTAVAIL, EADDRNOTAVAIL) \
+ X(WSAEAFNOSUPPORT, EAFNOSUPPORT) \
+ X(WSAECONNABORTED, ECONNABORTED) \
+ X(WSAECONNREFUSED, ECONNREFUSED) \
+ X(WSAECONNRESET, ECONNRESET) \
+ X(WSAEDISCON, EPIPE) \
+ X(WSAEFAULT, EFAULT) \
+ X(WSAEHOSTDOWN, EHOSTUNREACH) \
+ X(WSAEHOSTUNREACH, EHOSTUNREACH) \
+ X(WSAEINPROGRESS, EBUSY) \
+ X(WSAEINTR, EINTR) \
+ X(WSAEINVAL, EINVAL) \
+ X(WSAEISCONN, EISCONN) \
+ X(WSAEMSGSIZE, EMSGSIZE) \
+ X(WSAENETDOWN, ENETDOWN) \
+ X(WSAENETRESET, EHOSTUNREACH) \
+ X(WSAENETUNREACH, ENETUNREACH) \
+ X(WSAENOBUFS, ENOMEM) \
+ X(WSAENOTCONN, ENOTCONN) \
+ X(WSAENOTSOCK, ENOTSOCK) \
+ X(WSAEOPNOTSUPP, EOPNOTSUPP) \
+ X(WSAEPROCLIM, ENOMEM) \
+ X(WSAESHUTDOWN, EPIPE) \
+ X(WSAETIMEDOUT, ETIMEDOUT) \
+ X(WSAEWOULDBLOCK, EWOULDBLOCK) \
+ X(WSANOTINITIALISED, ENETDOWN) \
+ X(WSASYSNOTREADY, ENETDOWN) \
+ X(WSAVERNOTSUPPORTED, ENOSYS)
+
+static errno_t err__map_win_error_to_errno(DWORD error) {
+ switch (error) {
+#define X(error_sym, errno_sym) \
+ case error_sym: \
+ return errno_sym;
+ ERR__ERRNO_MAPPINGS(X)
+#undef X
+ }
+ return EINVAL;
+}
+
+void err_map_win_error(void) {
+ errno = err__map_win_error_to_errno(GetLastError());
+}
+
+void err_set_win_error(DWORD error) {
+ SetLastError(error);
+ errno = err__map_win_error_to_errno(error);
+}
+
+int err_check_handle(HANDLE handle) {
+ DWORD flags;
+
+ /* GetHandleInformation() succeeds when passed INVALID_HANDLE_VALUE, so check
+ * for this condition explicitly. */
+ if (handle == INVALID_HANDLE_VALUE)
+ return_set_error(-1, ERROR_INVALID_HANDLE);
+
+ if (!GetHandleInformation(handle, &flags))
+ return_map_error(-1);
+
+ return 0;
+}
+
+#include
+
+#define array_count(a) (sizeof(a) / (sizeof((a)[0])))
+
+#define container_of(ptr, type, member) \
+ ((type*) ((uintptr_t) (ptr) - offsetof(type, member)))
+
+#define unused_var(v) ((void) (v))
+
+/* Polyfill `inline` for older versions of msvc (up to Visual Studio 2013) */
+#if defined(_MSC_VER) && _MSC_VER < 1900
+#define inline __inline
+#endif
+
+WEPOLL_INTERNAL int ws_global_init(void);
+WEPOLL_INTERNAL SOCKET ws_get_base_socket(SOCKET socket);
+
+static bool init__done = false;
+static INIT_ONCE init__once = INIT_ONCE_STATIC_INIT;
+
+static BOOL CALLBACK init__once_callback(INIT_ONCE* once,
+ void* parameter,
+ void** context) {
+ unused_var(once);
+ unused_var(parameter);
+ unused_var(context);
+
+ /* N.b. that initialization order matters here. */
+ if (ws_global_init() < 0 || nt_global_init() < 0 ||
+ reflock_global_init() < 0 || epoll_global_init() < 0)
+ return FALSE;
+
+ init__done = true;
+ return TRUE;
+}
+
+int init(void) {
+ if (!init__done &&
+ !InitOnceExecuteOnce(&init__once, init__once_callback, NULL, NULL))
+ /* `InitOnceExecuteOnce()` itself is infallible, and it doesn't set any
+ * error code when the once-callback returns FALSE. We return -1 here to
+ * indicate that global initialization failed; the failing init function is
+ * resposible for setting `errno` and calling `SetLastError()`. */
+ return -1;
+
+ return 0;
+}
+
+/* Set up a workaround for the following problem:
+ * FARPROC addr = GetProcAddress(...);
+ * MY_FUNC func = (MY_FUNC) addr; <-- GCC 8 warning/error.
+ * MY_FUNC func = (MY_FUNC) (void*) addr; <-- MSVC warning/error.
+ * To compile cleanly with either compiler, do casts with this "bridge" type:
+ * MY_FUNC func = (MY_FUNC) (nt__fn_ptr_cast_t) addr; */
+#ifdef __GNUC__
+typedef void* nt__fn_ptr_cast_t;
+#else
+typedef FARPROC nt__fn_ptr_cast_t;
+#endif
+
+#define X(return_type, attributes, name, parameters) \
+ WEPOLL_INTERNAL return_type(attributes* name) parameters = NULL;
+NT_NTDLL_IMPORT_LIST(X)
+#undef X
+
+int nt_global_init(void) {
+ HMODULE ntdll;
+ FARPROC fn_ptr;
+
+ ntdll = GetModuleHandleW(L"ntdll.dll");
+ if (ntdll == NULL)
+ return -1;
+
+#define X(return_type, attributes, name, parameters) \
+ fn_ptr = GetProcAddress(ntdll, #name); \
+ if (fn_ptr == NULL) \
+ return -1; \
+ name = (return_type(attributes*) parameters)(nt__fn_ptr_cast_t) fn_ptr;
+ NT_NTDLL_IMPORT_LIST(X)
+#undef X
+
+ return 0;
+}
+
+#include
+
+typedef struct poll_group poll_group_t;
+
+typedef struct queue_node queue_node_t;
+
+WEPOLL_INTERNAL poll_group_t* poll_group_acquire(port_state_t* port);
+WEPOLL_INTERNAL void poll_group_release(poll_group_t* poll_group);
+
+WEPOLL_INTERNAL void poll_group_delete(poll_group_t* poll_group);
+
+WEPOLL_INTERNAL poll_group_t* poll_group_from_queue_node(
+ queue_node_t* queue_node);
+WEPOLL_INTERNAL HANDLE
+ poll_group_get_afd_device_handle(poll_group_t* poll_group);
+
+typedef struct queue_node {
+ queue_node_t* prev;
+ queue_node_t* next;
+} queue_node_t;
+
+typedef struct queue {
+ queue_node_t head;
+} queue_t;
+
+WEPOLL_INTERNAL void queue_init(queue_t* queue);
+WEPOLL_INTERNAL void queue_node_init(queue_node_t* node);
+
+WEPOLL_INTERNAL queue_node_t* queue_first(const queue_t* queue);
+WEPOLL_INTERNAL queue_node_t* queue_last(const queue_t* queue);
+
+WEPOLL_INTERNAL void queue_prepend(queue_t* queue, queue_node_t* node);
+WEPOLL_INTERNAL void queue_append(queue_t* queue, queue_node_t* node);
+WEPOLL_INTERNAL void queue_move_to_start(queue_t* queue, queue_node_t* node);
+WEPOLL_INTERNAL void queue_move_to_end(queue_t* queue, queue_node_t* node);
+WEPOLL_INTERNAL void queue_remove(queue_node_t* node);
+
+WEPOLL_INTERNAL bool queue_is_empty(const queue_t* queue);
+WEPOLL_INTERNAL bool queue_is_enqueued(const queue_node_t* node);
+
+#define POLL_GROUP__MAX_GROUP_SIZE 32
+
+typedef struct poll_group {
+ port_state_t* port_state;
+ queue_node_t queue_node;
+ HANDLE afd_device_handle;
+ size_t group_size;
+} poll_group_t;
+
+static poll_group_t* poll_group__new(port_state_t* port_state) {
+ HANDLE iocp_handle = port_get_iocp_handle(port_state);
+ queue_t* poll_group_queue = port_get_poll_group_queue(port_state);
+
+ poll_group_t* poll_group = malloc(sizeof *poll_group);
+ if (poll_group == NULL)
+ return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
+
+ memset(poll_group, 0, sizeof *poll_group);
+
+ queue_node_init(&poll_group->queue_node);
+ poll_group->port_state = port_state;
+
+ if (afd_create_device_handle(iocp_handle, &poll_group->afd_device_handle) <
+ 0) {
+ free(poll_group);
+ return NULL;
+ }
+
+ queue_append(poll_group_queue, &poll_group->queue_node);
+
+ return poll_group;
+}
+
+void poll_group_delete(poll_group_t* poll_group) {
+ assert(poll_group->group_size == 0);
+ CloseHandle(poll_group->afd_device_handle);
+ queue_remove(&poll_group->queue_node);
+ free(poll_group);
+}
+
+poll_group_t* poll_group_from_queue_node(queue_node_t* queue_node) {
+ return container_of(queue_node, poll_group_t, queue_node);
+}
+
+HANDLE poll_group_get_afd_device_handle(poll_group_t* poll_group) {
+ return poll_group->afd_device_handle;
+}
+
+poll_group_t* poll_group_acquire(port_state_t* port_state) {
+ queue_t* poll_group_queue = port_get_poll_group_queue(port_state);
+ poll_group_t* poll_group =
+ !queue_is_empty(poll_group_queue)
+ ? container_of(
+ queue_last(poll_group_queue), poll_group_t, queue_node)
+ : NULL;
+
+ if (poll_group == NULL ||
+ poll_group->group_size >= POLL_GROUP__MAX_GROUP_SIZE)
+ poll_group = poll_group__new(port_state);
+ if (poll_group == NULL)
+ return NULL;
+
+ if (++poll_group->group_size == POLL_GROUP__MAX_GROUP_SIZE)
+ queue_move_to_start(poll_group_queue, &poll_group->queue_node);
+
+ return poll_group;
+}
+
+void poll_group_release(poll_group_t* poll_group) {
+ port_state_t* port_state = poll_group->port_state;
+ queue_t* poll_group_queue = port_get_poll_group_queue(port_state);
+
+ poll_group->group_size--;
+ assert(poll_group->group_size < POLL_GROUP__MAX_GROUP_SIZE);
+
+ queue_move_to_end(poll_group_queue, &poll_group->queue_node);
+
+ /* Poll groups are currently only freed when the epoll port is closed. */
+}
+
+WEPOLL_INTERNAL sock_state_t* sock_new(port_state_t* port_state,
+ SOCKET socket);
+WEPOLL_INTERNAL void sock_delete(port_state_t* port_state,
+ sock_state_t* sock_state);
+WEPOLL_INTERNAL void sock_force_delete(port_state_t* port_state,
+ sock_state_t* sock_state);
+
+WEPOLL_INTERNAL int sock_set_event(port_state_t* port_state,
+ sock_state_t* sock_state,
+ const struct epoll_event* ev);
+
+WEPOLL_INTERNAL int sock_update(port_state_t* port_state,
+ sock_state_t* sock_state);
+WEPOLL_INTERNAL int sock_feed_event(port_state_t* port_state,
+ IO_STATUS_BLOCK* io_status_block,
+ struct epoll_event* ev);
+
+WEPOLL_INTERNAL sock_state_t* sock_state_from_queue_node(
+ queue_node_t* queue_node);
+WEPOLL_INTERNAL queue_node_t* sock_state_to_queue_node(
+ sock_state_t* sock_state);
+WEPOLL_INTERNAL sock_state_t* sock_state_from_tree_node(
+ tree_node_t* tree_node);
+WEPOLL_INTERNAL tree_node_t* sock_state_to_tree_node(sock_state_t* sock_state);
+
+#define PORT__MAX_ON_STACK_COMPLETIONS 256
+
+typedef struct port_state {
+ HANDLE iocp_handle;
+ tree_t sock_tree;
+ queue_t sock_update_queue;
+ queue_t sock_deleted_queue;
+ queue_t poll_group_queue;
+ ts_tree_node_t handle_tree_node;
+ CRITICAL_SECTION lock;
+ size_t active_poll_count;
+} port_state_t;
+
+static inline port_state_t* port__alloc(void) {
+ port_state_t* port_state = malloc(sizeof *port_state);
+ if (port_state == NULL)
+ return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
+
+ return port_state;
+}
+
+static inline void port__free(port_state_t* port) {
+ assert(port != NULL);
+ free(port);
+}
+
+static inline HANDLE port__create_iocp(void) {
+ HANDLE iocp_handle =
+ CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
+ if (iocp_handle == NULL)
+ return_map_error(NULL);
+
+ return iocp_handle;
+}
+
+port_state_t* port_new(HANDLE* iocp_handle_out) {
+ port_state_t* port_state;
+ HANDLE iocp_handle;
+
+ port_state = port__alloc();
+ if (port_state == NULL)
+ goto err1;
+
+ iocp_handle = port__create_iocp();
+ if (iocp_handle == NULL)
+ goto err2;
+
+ memset(port_state, 0, sizeof *port_state);
+
+ port_state->iocp_handle = iocp_handle;
+ tree_init(&port_state->sock_tree);
+ queue_init(&port_state->sock_update_queue);
+ queue_init(&port_state->sock_deleted_queue);
+ queue_init(&port_state->poll_group_queue);
+ ts_tree_node_init(&port_state->handle_tree_node);
+ InitializeCriticalSection(&port_state->lock);
+
+ *iocp_handle_out = iocp_handle;
+ return port_state;
+
+err2:
+ port__free(port_state);
+err1:
+ return NULL;
+}
+
+static inline int port__close_iocp(port_state_t* port_state) {
+ HANDLE iocp_handle = port_state->iocp_handle;
+ port_state->iocp_handle = NULL;
+
+ if (!CloseHandle(iocp_handle))
+ return_map_error(-1);
+
+ return 0;
+}
+
+int port_close(port_state_t* port_state) {
+ int result;
+
+ EnterCriticalSection(&port_state->lock);
+ result = port__close_iocp(port_state);
+ LeaveCriticalSection(&port_state->lock);
+
+ return result;
+}
+
+int port_delete(port_state_t* port_state) {
+ tree_node_t* tree_node;
+ queue_node_t* queue_node;
+
+ /* At this point the IOCP port should have been closed. */
+ assert(port_state->iocp_handle == NULL);
+
+ while ((tree_node = tree_root(&port_state->sock_tree)) != NULL) {
+ sock_state_t* sock_state = sock_state_from_tree_node(tree_node);
+ sock_force_delete(port_state, sock_state);
+ }
+
+ while ((queue_node = queue_first(&port_state->sock_deleted_queue)) != NULL) {
+ sock_state_t* sock_state = sock_state_from_queue_node(queue_node);
+ sock_force_delete(port_state, sock_state);
+ }
+
+ while ((queue_node = queue_first(&port_state->poll_group_queue)) != NULL) {
+ poll_group_t* poll_group = poll_group_from_queue_node(queue_node);
+ poll_group_delete(poll_group);
+ }
+
+ assert(queue_is_empty(&port_state->sock_update_queue));
+
+ DeleteCriticalSection(&port_state->lock);
+
+ port__free(port_state);
+
+ return 0;
+}
+
+static int port__update_events(port_state_t* port_state) {
+ queue_t* sock_update_queue = &port_state->sock_update_queue;
+
+ /* Walk the queue, submitting new poll requests for every socket that needs
+ * it. */
+ while (!queue_is_empty(sock_update_queue)) {
+ queue_node_t* queue_node = queue_first(sock_update_queue);
+ sock_state_t* sock_state = sock_state_from_queue_node(queue_node);
+
+ if (sock_update(port_state, sock_state) < 0)
+ return -1;
+
+ /* sock_update() removes the socket from the update queue. */
+ }
+
+ return 0;
+}
+
+static inline void port__update_events_if_polling(port_state_t* port_state) {
+ if (port_state->active_poll_count > 0)
+ port__update_events(port_state);
+}
+
+static inline int port__feed_events(port_state_t* port_state,
+ struct epoll_event* epoll_events,
+ OVERLAPPED_ENTRY* iocp_events,
+ DWORD iocp_event_count) {
+ int epoll_event_count = 0;
+ DWORD i;
+
+ for (i = 0; i < iocp_event_count; i++) {
+ IO_STATUS_BLOCK* io_status_block =
+ (IO_STATUS_BLOCK*) iocp_events[i].lpOverlapped;
+ struct epoll_event* ev = &epoll_events[epoll_event_count];
+
+ epoll_event_count += sock_feed_event(port_state, io_status_block, ev);
+ }
+
+ return epoll_event_count;
+}
+
+static inline int port__poll(port_state_t* port_state,
+ struct epoll_event* epoll_events,
+ OVERLAPPED_ENTRY* iocp_events,
+ DWORD maxevents,
+ DWORD timeout) {
+ DWORD completion_count;
+
+ if (port__update_events(port_state) < 0)
+ return -1;
+
+ port_state->active_poll_count++;
+
+ LeaveCriticalSection(&port_state->lock);
+
+ BOOL r = GetQueuedCompletionStatusEx(port_state->iocp_handle,
+ iocp_events,
+ maxevents,
+ &completion_count,
+ timeout,
+ FALSE);
+
+ EnterCriticalSection(&port_state->lock);
+
+ port_state->active_poll_count--;
+
+ if (!r)
+ return_map_error(-1);
+
+ return port__feed_events(
+ port_state, epoll_events, iocp_events, completion_count);
+}
+
+int port_wait(port_state_t* port_state,
+ struct epoll_event* events,
+ int maxevents,
+ int timeout) {
+ OVERLAPPED_ENTRY stack_iocp_events[PORT__MAX_ON_STACK_COMPLETIONS];
+ OVERLAPPED_ENTRY* iocp_events;
+ uint64_t due = 0;
+ DWORD gqcs_timeout;
+ int result;
+
+ /* Check whether `maxevents` is in range. */
+ if (maxevents <= 0)
+ return_set_error(-1, ERROR_INVALID_PARAMETER);
+
+ /* Decide whether the IOCP completion list can live on the stack, or allocate
+ * memory for it on the heap. */
+ if ((size_t) maxevents <= array_count(stack_iocp_events)) {
+ iocp_events = stack_iocp_events;
+ } else if ((iocp_events =
+ malloc((size_t) maxevents * sizeof *iocp_events)) == NULL) {
+ iocp_events = stack_iocp_events;
+ maxevents = array_count(stack_iocp_events);
+ }
+
+ /* Compute the timeout for GetQueuedCompletionStatus, and the wait end
+ * time, if the user specified a timeout other than zero or infinite. */
+ if (timeout > 0) {
+ due = GetTickCount64() + (uint64_t) timeout;
+ gqcs_timeout = (DWORD) timeout;
+ } else if (timeout == 0) {
+ gqcs_timeout = 0;
+ } else {
+ gqcs_timeout = INFINITE;
+ }
+
+ EnterCriticalSection(&port_state->lock);
+
+ /* Dequeue completion packets until either at least one interesting event
+ * has been discovered, or the timeout is reached. */
+ for (;;) {
+ uint64_t now;
+
+ result = port__poll(
+ port_state, events, iocp_events, (DWORD) maxevents, gqcs_timeout);
+ if (result < 0 || result > 0)
+ break; /* Result, error, or time-out. */
+
+ if (timeout < 0)
+ continue; /* When timeout is negative, never time out. */
+
+ /* Update time. */
+ now = GetTickCount64();
+
+ /* Do not allow the due time to be in the past. */
+ if (now >= due) {
+ SetLastError(WAIT_TIMEOUT);
+ break;
+ }
+
+ /* Recompute time-out argument for GetQueuedCompletionStatus. */
+ gqcs_timeout = (DWORD)(due - now);
+ }
+
+ port__update_events_if_polling(port_state);
+
+ LeaveCriticalSection(&port_state->lock);
+
+ if (iocp_events != stack_iocp_events)
+ free(iocp_events);
+
+ if (result >= 0)
+ return result;
+ else if (GetLastError() == WAIT_TIMEOUT)
+ return 0;
+ else
+ return -1;
+}
+
+static inline int port__ctl_add(port_state_t* port_state,
+ SOCKET sock,
+ struct epoll_event* ev) {
+ sock_state_t* sock_state = sock_new(port_state, sock);
+ if (sock_state == NULL)
+ return -1;
+
+ if (sock_set_event(port_state, sock_state, ev) < 0) {
+ sock_delete(port_state, sock_state);
+ return -1;
+ }
+
+ port__update_events_if_polling(port_state);
+
+ return 0;
+}
+
+static inline int port__ctl_mod(port_state_t* port_state,
+ SOCKET sock,
+ struct epoll_event* ev) {
+ sock_state_t* sock_state = port_find_socket(port_state, sock);
+ if (sock_state == NULL)
+ return -1;
+
+ if (sock_set_event(port_state, sock_state, ev) < 0)
+ return -1;
+
+ port__update_events_if_polling(port_state);
+
+ return 0;
+}
+
+static inline int port__ctl_del(port_state_t* port_state, SOCKET sock) {
+ sock_state_t* sock_state = port_find_socket(port_state, sock);
+ if (sock_state == NULL)
+ return -1;
+
+ sock_delete(port_state, sock_state);
+
+ return 0;
+}
+
+static inline int port__ctl_op(port_state_t* port_state,
+ int op,
+ SOCKET sock,
+ struct epoll_event* ev) {
+ switch (op) {
+ case EPOLL_CTL_ADD:
+ return port__ctl_add(port_state, sock, ev);
+ case EPOLL_CTL_MOD:
+ return port__ctl_mod(port_state, sock, ev);
+ case EPOLL_CTL_DEL:
+ return port__ctl_del(port_state, sock);
+ default:
+ return_set_error(-1, ERROR_INVALID_PARAMETER);
+ }
+}
+
+int port_ctl(port_state_t* port_state,
+ int op,
+ SOCKET sock,
+ struct epoll_event* ev) {
+ int result;
+
+ EnterCriticalSection(&port_state->lock);
+ result = port__ctl_op(port_state, op, sock, ev);
+ LeaveCriticalSection(&port_state->lock);
+
+ return result;
+}
+
+int port_register_socket(port_state_t* port_state,
+ sock_state_t* sock_state,
+ SOCKET socket) {
+ if (tree_add(&port_state->sock_tree,
+ sock_state_to_tree_node(sock_state),
+ socket) < 0)
+ return_set_error(-1, ERROR_ALREADY_EXISTS);
+ return 0;
+}
+
+void port_unregister_socket(port_state_t* port_state,
+ sock_state_t* sock_state) {
+ tree_del(&port_state->sock_tree, sock_state_to_tree_node(sock_state));
+}
+
+sock_state_t* port_find_socket(port_state_t* port_state, SOCKET socket) {
+ tree_node_t* tree_node = tree_find(&port_state->sock_tree, socket);
+ if (tree_node == NULL)
+ return_set_error(NULL, ERROR_NOT_FOUND);
+ return sock_state_from_tree_node(tree_node);
+}
+
+void port_request_socket_update(port_state_t* port_state,
+ sock_state_t* sock_state) {
+ if (queue_is_enqueued(sock_state_to_queue_node(sock_state)))
+ return;
+ queue_append(&port_state->sock_update_queue,
+ sock_state_to_queue_node(sock_state));
+}
+
+void port_cancel_socket_update(port_state_t* port_state,
+ sock_state_t* sock_state) {
+ unused_var(port_state);
+ if (!queue_is_enqueued(sock_state_to_queue_node(sock_state)))
+ return;
+ queue_remove(sock_state_to_queue_node(sock_state));
+}
+
+void port_add_deleted_socket(port_state_t* port_state,
+ sock_state_t* sock_state) {
+ if (queue_is_enqueued(sock_state_to_queue_node(sock_state)))
+ return;
+ queue_append(&port_state->sock_deleted_queue,
+ sock_state_to_queue_node(sock_state));
+}
+
+void port_remove_deleted_socket(port_state_t* port_state,
+ sock_state_t* sock_state) {
+ unused_var(port_state);
+ if (!queue_is_enqueued(sock_state_to_queue_node(sock_state)))
+ return;
+ queue_remove(sock_state_to_queue_node(sock_state));
+}
+
+HANDLE port_get_iocp_handle(port_state_t* port_state) {
+ assert(port_state->iocp_handle != NULL);
+ return port_state->iocp_handle;
+}
+
+queue_t* port_get_poll_group_queue(port_state_t* port_state) {
+ return &port_state->poll_group_queue;
+}
+
+port_state_t* port_state_from_handle_tree_node(ts_tree_node_t* tree_node) {
+ return container_of(tree_node, port_state_t, handle_tree_node);
+}
+
+ts_tree_node_t* port_state_to_handle_tree_node(port_state_t* port_state) {
+ return &port_state->handle_tree_node;
+}
+
+void queue_init(queue_t* queue) {
+ queue_node_init(&queue->head);
+}
+
+void queue_node_init(queue_node_t* node) {
+ node->prev = node;
+ node->next = node;
+}
+
+static inline void queue__detach_node(queue_node_t* node) {
+ node->prev->next = node->next;
+ node->next->prev = node->prev;
+}
+
+queue_node_t* queue_first(const queue_t* queue) {
+ return !queue_is_empty(queue) ? queue->head.next : NULL;
+}
+
+queue_node_t* queue_last(const queue_t* queue) {
+ return !queue_is_empty(queue) ? queue->head.prev : NULL;
+}
+
+void queue_prepend(queue_t* queue, queue_node_t* node) {
+ node->next = queue->head.next;
+ node->prev = &queue->head;
+ node->next->prev = node;
+ queue->head.next = node;
+}
+
+void queue_append(queue_t* queue, queue_node_t* node) {
+ node->next = &queue->head;
+ node->prev = queue->head.prev;
+ node->prev->next = node;
+ queue->head.prev = node;
+}
+
+void queue_move_to_start(queue_t* queue, queue_node_t* node) {
+ queue__detach_node(node);
+ queue_prepend(queue, node);
+}
+
+void queue_move_to_end(queue_t* queue, queue_node_t* node) {
+ queue__detach_node(node);
+ queue_append(queue, node);
+}
+
+void queue_remove(queue_node_t* node) {
+ queue__detach_node(node);
+ queue_node_init(node);
+}
+
+bool queue_is_empty(const queue_t* queue) {
+ return !queue_is_enqueued(&queue->head);
+}
+
+bool queue_is_enqueued(const queue_node_t* node) {
+ return node->prev != node;
+}
+
+#define REFLOCK__REF ((long) 0x00000001UL)
+#define REFLOCK__REF_MASK ((long) 0x0fffffffUL)
+#define REFLOCK__DESTROY ((long) 0x10000000UL)
+#define REFLOCK__DESTROY_MASK ((long) 0xf0000000UL)
+#define REFLOCK__POISON ((long) 0x300dead0UL)
+
+static HANDLE reflock__keyed_event = NULL;
+
+int reflock_global_init(void) {
+ NTSTATUS status = NtCreateKeyedEvent(
+ &reflock__keyed_event, KEYEDEVENT_ALL_ACCESS, NULL, 0);
+ if (status != STATUS_SUCCESS)
+ return_set_error(-1, RtlNtStatusToDosError(status));
+ return 0;
+}
+
+void reflock_init(reflock_t* reflock) {
+ reflock->state = 0;
+}
+
+static void reflock__signal_event(void* address) {
+ NTSTATUS status =
+ NtReleaseKeyedEvent(reflock__keyed_event, address, FALSE, NULL);
+ if (status != STATUS_SUCCESS)
+ abort();
+}
+
+static void reflock__await_event(void* address) {
+ NTSTATUS status =
+ NtWaitForKeyedEvent(reflock__keyed_event, address, FALSE, NULL);
+ if (status != STATUS_SUCCESS)
+ abort();
+}
+
+void reflock_ref(reflock_t* reflock) {
+ long state = InterlockedAdd(&reflock->state, REFLOCK__REF);
+
+ /* Verify that the counter didn't overflow and the lock isn't destroyed. */
+ assert((state & REFLOCK__DESTROY_MASK) == 0);
+ unused_var(state);
+}
+
+void reflock_unref(reflock_t* reflock) {
+ long state = InterlockedAdd(&reflock->state, -REFLOCK__REF);
+
+ /* Verify that the lock was referenced and not already destroyed. */
+ assert((state & REFLOCK__DESTROY_MASK & ~REFLOCK__DESTROY) == 0);
+
+ if (state == REFLOCK__DESTROY)
+ reflock__signal_event(reflock);
+}
+
+void reflock_unref_and_destroy(reflock_t* reflock) {
+ long state =
+ InterlockedAdd(&reflock->state, REFLOCK__DESTROY - REFLOCK__REF);
+ long ref_count = state & REFLOCK__REF_MASK;
+
+ /* Verify that the lock was referenced and not already destroyed. */
+ assert((state & REFLOCK__DESTROY_MASK) == REFLOCK__DESTROY);
+
+ if (ref_count != 0)
+ reflock__await_event(reflock);
+
+ state = InterlockedExchange(&reflock->state, REFLOCK__POISON);
+ assert(state == REFLOCK__DESTROY);
+}
+
+#define SOCK__KNOWN_EPOLL_EVENTS \
+ (EPOLLIN | EPOLLPRI | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | \
+ EPOLLRDBAND | EPOLLWRNORM | EPOLLWRBAND | EPOLLMSG | EPOLLRDHUP)
+
+typedef enum sock__poll_status {
+ SOCK__POLL_IDLE = 0,
+ SOCK__POLL_PENDING,
+ SOCK__POLL_CANCELLED
+} sock__poll_status_t;
+
+typedef struct sock_state {
+ IO_STATUS_BLOCK io_status_block;
+ AFD_POLL_INFO poll_info;
+ queue_node_t queue_node;
+ tree_node_t tree_node;
+ poll_group_t* poll_group;
+ SOCKET base_socket;
+ epoll_data_t user_data;
+ uint32_t user_events;
+ uint32_t pending_events;
+ sock__poll_status_t poll_status;
+ bool delete_pending;
+} sock_state_t;
+
+static inline sock_state_t* sock__alloc(void) {
+ sock_state_t* sock_state = malloc(sizeof *sock_state);
+ if (sock_state == NULL)
+ return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
+ return sock_state;
+}
+
+static inline void sock__free(sock_state_t* sock_state) {
+ assert(sock_state != NULL);
+ free(sock_state);
+}
+
+static inline int sock__cancel_poll(sock_state_t* sock_state) {
+ assert(sock_state->poll_status == SOCK__POLL_PENDING);
+
+ if (afd_cancel_poll(poll_group_get_afd_device_handle(sock_state->poll_group),
+ &sock_state->io_status_block) < 0)
+ return -1;
+
+ sock_state->poll_status = SOCK__POLL_CANCELLED;
+ sock_state->pending_events = 0;
+ return 0;
+}
+
+sock_state_t* sock_new(port_state_t* port_state, SOCKET socket) {
+ SOCKET base_socket;
+ poll_group_t* poll_group;
+ sock_state_t* sock_state;
+
+ if (socket == 0 || socket == INVALID_SOCKET)
+ return_set_error(NULL, ERROR_INVALID_HANDLE);
+
+ base_socket = ws_get_base_socket(socket);
+ if (base_socket == INVALID_SOCKET)
+ return NULL;
+
+ poll_group = poll_group_acquire(port_state);
+ if (poll_group == NULL)
+ return NULL;
+
+ sock_state = sock__alloc();
+ if (sock_state == NULL)
+ goto err1;
+
+ memset(sock_state, 0, sizeof *sock_state);
+
+ sock_state->base_socket = base_socket;
+ sock_state->poll_group = poll_group;
+
+ tree_node_init(&sock_state->tree_node);
+ queue_node_init(&sock_state->queue_node);
+
+ if (port_register_socket(port_state, sock_state, socket) < 0)
+ goto err2;
+
+ return sock_state;
+
+err2:
+ sock__free(sock_state);
+err1:
+ poll_group_release(poll_group);
+
+ return NULL;
+}
+
+static int sock__delete(port_state_t* port_state,
+ sock_state_t* sock_state,
+ bool force) {
+ if (!sock_state->delete_pending) {
+ if (sock_state->poll_status == SOCK__POLL_PENDING)
+ sock__cancel_poll(sock_state);
+
+ port_cancel_socket_update(port_state, sock_state);
+ port_unregister_socket(port_state, sock_state);
+
+ sock_state->delete_pending = true;
+ }
+
+ /* If the poll request still needs to complete, the sock_state object can't
+ * be free()d yet. `sock_feed_event()` or `port_close()` will take care
+ * of this later. */
+ if (force || sock_state->poll_status == SOCK__POLL_IDLE) {
+ /* Free the sock_state now. */
+ port_remove_deleted_socket(port_state, sock_state);
+ poll_group_release(sock_state->poll_group);
+ sock__free(sock_state);
+ } else {
+ /* Free the socket later. */
+ port_add_deleted_socket(port_state, sock_state);
+ }
+
+ return 0;
+}
+
+void sock_delete(port_state_t* port_state, sock_state_t* sock_state) {
+ sock__delete(port_state, sock_state, false);
+}
+
+void sock_force_delete(port_state_t* port_state, sock_state_t* sock_state) {
+ sock__delete(port_state, sock_state, true);
+}
+
+int sock_set_event(port_state_t* port_state,
+ sock_state_t* sock_state,
+ const struct epoll_event* ev) {
+ /* EPOLLERR and EPOLLHUP are always reported, even when not requested by the
+ * caller. However they are disabled after a event has been reported for a
+ * socket for which the EPOLLONESHOT flag was set. */
+ uint32_t events = ev->events | EPOLLERR | EPOLLHUP;
+
+ sock_state->user_events = events;
+ sock_state->user_data = ev->data;
+
+ if ((events & SOCK__KNOWN_EPOLL_EVENTS & ~sock_state->pending_events) != 0)
+ port_request_socket_update(port_state, sock_state);
+
+ return 0;
+}
+
+static inline DWORD sock__epoll_events_to_afd_events(uint32_t epoll_events) {
+ /* Always monitor for AFD_POLL_LOCAL_CLOSE, which is triggered when the
+ * socket is closed with closesocket() or CloseHandle(). */
+ DWORD afd_events = AFD_POLL_LOCAL_CLOSE;
+
+ if (epoll_events & (EPOLLIN | EPOLLRDNORM))
+ afd_events |= AFD_POLL_RECEIVE | AFD_POLL_ACCEPT;
+ if (epoll_events & (EPOLLPRI | EPOLLRDBAND))
+ afd_events |= AFD_POLL_RECEIVE_EXPEDITED;
+ if (epoll_events & (EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND))
+ afd_events |= AFD_POLL_SEND;
+ if (epoll_events & (EPOLLIN | EPOLLRDNORM | EPOLLRDHUP))
+ afd_events |= AFD_POLL_DISCONNECT;
+ if (epoll_events & EPOLLHUP)
+ afd_events |= AFD_POLL_ABORT;
+ if (epoll_events & EPOLLERR)
+ afd_events |= AFD_POLL_CONNECT_FAIL;
+
+ return afd_events;
+}
+
+static inline uint32_t sock__afd_events_to_epoll_events(DWORD afd_events) {
+ uint32_t epoll_events = 0;
+
+ if (afd_events & (AFD_POLL_RECEIVE | AFD_POLL_ACCEPT))
+ epoll_events |= EPOLLIN | EPOLLRDNORM;
+ if (afd_events & AFD_POLL_RECEIVE_EXPEDITED)
+ epoll_events |= EPOLLPRI | EPOLLRDBAND;
+ if (afd_events & AFD_POLL_SEND)
+ epoll_events |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
+ if (afd_events & AFD_POLL_DISCONNECT)
+ epoll_events |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ if (afd_events & AFD_POLL_ABORT)
+ epoll_events |= EPOLLHUP;
+ if (afd_events & AFD_POLL_CONNECT_FAIL)
+ /* Linux reports all these events after connect() has failed. */
+ epoll_events |=
+ EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLRDNORM | EPOLLWRNORM | EPOLLRDHUP;
+
+ return epoll_events;
+}
+
+int sock_update(port_state_t* port_state, sock_state_t* sock_state) {
+ assert(!sock_state->delete_pending);
+
+ if ((sock_state->poll_status == SOCK__POLL_PENDING) &&
+ (sock_state->user_events & SOCK__KNOWN_EPOLL_EVENTS &
+ ~sock_state->pending_events) == 0) {
+ /* All the events the user is interested in are already being monitored by
+ * the pending poll operation. It might spuriously complete because of an
+ * event that we're no longer interested in; when that happens we'll submit
+ * a new poll operation with the updated event mask. */
+
+ } else if (sock_state->poll_status == SOCK__POLL_PENDING) {
+ /* A poll operation is already pending, but it's not monitoring for all the
+ * events that the user is interested in. Therefore, cancel the pending
+ * poll operation; when we receive it's completion package, a new poll
+ * operation will be submitted with the correct event mask. */
+ if (sock__cancel_poll(sock_state) < 0)
+ return -1;
+
+ } else if (sock_state->poll_status == SOCK__POLL_CANCELLED) {
+ /* The poll operation has already been cancelled, we're still waiting for
+ * it to return. For now, there's nothing that needs to be done. */
+
+ } else if (sock_state->poll_status == SOCK__POLL_IDLE) {
+ /* No poll operation is pending; start one. */
+ sock_state->poll_info.Exclusive = FALSE;
+ sock_state->poll_info.NumberOfHandles = 1;
+ sock_state->poll_info.Timeout.QuadPart = INT64_MAX;
+ sock_state->poll_info.Handles[0].Handle = (HANDLE) sock_state->base_socket;
+ sock_state->poll_info.Handles[0].Status = 0;
+ sock_state->poll_info.Handles[0].Events =
+ sock__epoll_events_to_afd_events(sock_state->user_events);
+
+ if (afd_poll(poll_group_get_afd_device_handle(sock_state->poll_group),
+ &sock_state->poll_info,
+ &sock_state->io_status_block) < 0) {
+ switch (GetLastError()) {
+ case ERROR_IO_PENDING:
+ /* Overlapped poll operation in progress; this is expected. */
+ break;
+ case ERROR_INVALID_HANDLE:
+ /* Socket closed; it'll be dropped from the epoll set. */
+ return sock__delete(port_state, sock_state, false);
+ default:
+ /* Other errors are propagated to the caller. */
+ return_map_error(-1);
+ }
+ }
+
+ /* The poll request was successfully submitted. */
+ sock_state->poll_status = SOCK__POLL_PENDING;
+ sock_state->pending_events = sock_state->user_events;
+
+ } else {
+ /* Unreachable. */
+ assert(false);
+ }
+
+ port_cancel_socket_update(port_state, sock_state);
+ return 0;
+}
+
+int sock_feed_event(port_state_t* port_state,
+ IO_STATUS_BLOCK* io_status_block,
+ struct epoll_event* ev) {
+ sock_state_t* sock_state =
+ container_of(io_status_block, sock_state_t, io_status_block);
+ AFD_POLL_INFO* poll_info = &sock_state->poll_info;
+ uint32_t epoll_events = 0;
+
+ sock_state->poll_status = SOCK__POLL_IDLE;
+ sock_state->pending_events = 0;
+
+ if (sock_state->delete_pending) {
+ /* Socket has been deleted earlier and can now be freed. */
+ return sock__delete(port_state, sock_state, false);
+
+ } else if (io_status_block->Status == STATUS_CANCELLED) {
+ /* The poll request was cancelled by CancelIoEx. */
+
+ } else if (!NT_SUCCESS(io_status_block->Status)) {
+ /* The overlapped request itself failed in an unexpected way. */
+ epoll_events = EPOLLERR;
+
+ } else if (poll_info->NumberOfHandles < 1) {
+ /* This poll operation succeeded but didn't report any socket events. */
+
+ } else if (poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
+ /* The poll operation reported that the socket was closed. */
+ return sock__delete(port_state, sock_state, false);
+
+ } else {
+ /* Events related to our socket were reported. */
+ epoll_events =
+ sock__afd_events_to_epoll_events(poll_info->Handles[0].Events);
+ }
+
+ /* Requeue the socket so a new poll request will be submitted. */
+ port_request_socket_update(port_state, sock_state);
+
+ /* Filter out events that the user didn't ask for. */
+ epoll_events &= sock_state->user_events;
+
+ /* Return if there are no epoll events to report. */
+ if (epoll_events == 0)
+ return 0;
+
+ /* If the the socket has the EPOLLONESHOT flag set, unmonitor all events,
+ * even EPOLLERR and EPOLLHUP. But always keep looking for closed sockets. */
+ if (sock_state->user_events & EPOLLONESHOT)
+ sock_state->user_events = 0;
+
+ ev->data = sock_state->user_data;
+ ev->events = epoll_events;
+ return 1;
+}
+
+sock_state_t* sock_state_from_queue_node(queue_node_t* queue_node) {
+ return container_of(queue_node, sock_state_t, queue_node);
+}
+
+queue_node_t* sock_state_to_queue_node(sock_state_t* sock_state) {
+ return &sock_state->queue_node;
+}
+
+sock_state_t* sock_state_from_tree_node(tree_node_t* tree_node) {
+ return container_of(tree_node, sock_state_t, tree_node);
+}
+
+tree_node_t* sock_state_to_tree_node(sock_state_t* sock_state) {
+ return &sock_state->tree_node;
+}
+
+void ts_tree_init(ts_tree_t* ts_tree) {
+ tree_init(&ts_tree->tree);
+ InitializeSRWLock(&ts_tree->lock);
+}
+
+void ts_tree_node_init(ts_tree_node_t* node) {
+ tree_node_init(&node->tree_node);
+ reflock_init(&node->reflock);
+}
+
+int ts_tree_add(ts_tree_t* ts_tree, ts_tree_node_t* node, uintptr_t key) {
+ int r;
+
+ AcquireSRWLockExclusive(&ts_tree->lock);
+ r = tree_add(&ts_tree->tree, &node->tree_node, key);
+ ReleaseSRWLockExclusive(&ts_tree->lock);
+
+ return r;
+}
+
+static inline ts_tree_node_t* ts_tree__find_node(ts_tree_t* ts_tree,
+ uintptr_t key) {
+ tree_node_t* tree_node = tree_find(&ts_tree->tree, key);
+ if (tree_node == NULL)
+ return NULL;
+
+ return container_of(tree_node, ts_tree_node_t, tree_node);
+}
+
+ts_tree_node_t* ts_tree_del_and_ref(ts_tree_t* ts_tree, uintptr_t key) {
+ ts_tree_node_t* ts_tree_node;
+
+ AcquireSRWLockExclusive(&ts_tree->lock);
+
+ ts_tree_node = ts_tree__find_node(ts_tree, key);
+ if (ts_tree_node != NULL) {
+ tree_del(&ts_tree->tree, &ts_tree_node->tree_node);
+ reflock_ref(&ts_tree_node->reflock);
+ }
+
+ ReleaseSRWLockExclusive(&ts_tree->lock);
+
+ return ts_tree_node;
+}
+
+ts_tree_node_t* ts_tree_find_and_ref(ts_tree_t* ts_tree, uintptr_t key) {
+ ts_tree_node_t* ts_tree_node;
+
+ AcquireSRWLockShared(&ts_tree->lock);
+
+ ts_tree_node = ts_tree__find_node(ts_tree, key);
+ if (ts_tree_node != NULL)
+ reflock_ref(&ts_tree_node->reflock);
+
+ ReleaseSRWLockShared(&ts_tree->lock);
+
+ return ts_tree_node;
+}
+
+void ts_tree_node_unref(ts_tree_node_t* node) {
+ reflock_unref(&node->reflock);
+}
+
+void ts_tree_node_unref_and_destroy(ts_tree_node_t* node) {
+ reflock_unref_and_destroy(&node->reflock);
+}
+
+void tree_init(tree_t* tree) {
+ memset(tree, 0, sizeof *tree);
+}
+
+void tree_node_init(tree_node_t* node) {
+ memset(node, 0, sizeof *node);
+}
+
+#define TREE__ROTATE(cis, trans) \
+ tree_node_t* p = node; \
+ tree_node_t* q = node->trans; \
+ tree_node_t* parent = p->parent; \
+ \
+ if (parent) { \
+ if (parent->left == p) \
+ parent->left = q; \
+ else \
+ parent->right = q; \
+ } else { \
+ tree->root = q; \
+ } \
+ \
+ q->parent = parent; \
+ p->parent = q; \
+ p->trans = q->cis; \
+ if (p->trans) \
+ p->trans->parent = p; \
+ q->cis = p;
+
+static inline void tree__rotate_left(tree_t* tree, tree_node_t* node) {
+ TREE__ROTATE(left, right)
+}
+
+static inline void tree__rotate_right(tree_t* tree, tree_node_t* node) {
+ TREE__ROTATE(right, left)
+}
+
+#define TREE__INSERT_OR_DESCEND(side) \
+ if (parent->side) { \
+ parent = parent->side; \
+ } else { \
+ parent->side = node; \
+ break; \
+ }
+
+#define TREE__REBALANCE_AFTER_INSERT(cis, trans) \
+ tree_node_t* grandparent = parent->parent; \
+ tree_node_t* uncle = grandparent->trans; \
+ \
+ if (uncle && uncle->red) { \
+ parent->red = uncle->red = false; \
+ grandparent->red = true; \
+ node = grandparent; \
+ } else { \
+ if (node == parent->trans) { \
+ tree__rotate_##cis(tree, parent); \
+ node = parent; \
+ parent = node->parent; \
+ } \
+ parent->red = false; \
+ grandparent->red = true; \
+ tree__rotate_##trans(tree, grandparent); \
+ }
+
+int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key) {
+ tree_node_t* parent;
+
+ parent = tree->root;
+ if (parent) {
+ for (;;) {
+ if (key < parent->key) {
+ TREE__INSERT_OR_DESCEND(left)
+ } else if (key > parent->key) {
+ TREE__INSERT_OR_DESCEND(right)
+ } else {
+ return -1;
+ }
+ }
+ } else {
+ tree->root = node;
+ }
+
+ node->key = key;
+ node->left = node->right = NULL;
+ node->parent = parent;
+ node->red = true;
+
+ for (; parent && parent->red; parent = node->parent) {
+ if (parent == parent->parent->left) {
+ TREE__REBALANCE_AFTER_INSERT(left, right)
+ } else {
+ TREE__REBALANCE_AFTER_INSERT(right, left)
+ }
+ }
+ tree->root->red = false;
+
+ return 0;
+}
+
+#define TREE__REBALANCE_AFTER_REMOVE(cis, trans) \
+ tree_node_t* sibling = parent->trans; \
+ \
+ if (sibling->red) { \
+ sibling->red = false; \
+ parent->red = true; \
+ tree__rotate_##cis(tree, parent); \
+ sibling = parent->trans; \
+ } \
+ if ((sibling->left && sibling->left->red) || \
+ (sibling->right && sibling->right->red)) { \
+ if (!sibling->trans || !sibling->trans->red) { \
+ sibling->cis->red = false; \
+ sibling->red = true; \
+ tree__rotate_##trans(tree, sibling); \
+ sibling = parent->trans; \
+ } \
+ sibling->red = parent->red; \
+ parent->red = sibling->trans->red = false; \
+ tree__rotate_##cis(tree, parent); \
+ node = tree->root; \
+ break; \
+ } \
+ sibling->red = true;
+
+void tree_del(tree_t* tree, tree_node_t* node) {
+ tree_node_t* parent = node->parent;
+ tree_node_t* left = node->left;
+ tree_node_t* right = node->right;
+ tree_node_t* next;
+ bool red;
+
+ if (!left) {
+ next = right;
+ } else if (!right) {
+ next = left;
+ } else {
+ next = right;
+ while (next->left)
+ next = next->left;
+ }
+
+ if (parent) {
+ if (parent->left == node)
+ parent->left = next;
+ else
+ parent->right = next;
+ } else {
+ tree->root = next;
+ }
+
+ if (left && right) {
+ red = next->red;
+ next->red = node->red;
+ next->left = left;
+ left->parent = next;
+ if (next != right) {
+ parent = next->parent;
+ next->parent = node->parent;
+ node = next->right;
+ parent->left = node;
+ next->right = right;
+ right->parent = next;
+ } else {
+ next->parent = parent;
+ parent = next;
+ node = next->right;
+ }
+ } else {
+ red = node->red;
+ node = next;
+ }
+
+ if (node)
+ node->parent = parent;
+ if (red)
+ return;
+ if (node && node->red) {
+ node->red = false;
+ return;
+ }
+
+ do {
+ if (node == tree->root)
+ break;
+ if (node == parent->left) {
+ TREE__REBALANCE_AFTER_REMOVE(left, right)
+ } else {
+ TREE__REBALANCE_AFTER_REMOVE(right, left)
+ }
+ node = parent;
+ parent = parent->parent;
+ } while (!node->red);
+
+ if (node)
+ node->red = false;
+}
+
+tree_node_t* tree_find(const tree_t* tree, uintptr_t key) {
+ tree_node_t* node = tree->root;
+ while (node) {
+ if (key < node->key)
+ node = node->left;
+ else if (key > node->key)
+ node = node->right;
+ else
+ return node;
+ }
+ return NULL;
+}
+
+tree_node_t* tree_root(const tree_t* tree) {
+ return tree->root;
+}
+
+#ifndef SIO_BSP_HANDLE_POLL
+#define SIO_BSP_HANDLE_POLL 0x4800001D
+#endif
+
+#ifndef SIO_BASE_HANDLE
+#define SIO_BASE_HANDLE 0x48000022
+#endif
+
+int ws_global_init(void) {
+ int r;
+ WSADATA wsa_data;
+
+ r = WSAStartup(MAKEWORD(2, 2), &wsa_data);
+ if (r != 0)
+ return_set_error(-1, (DWORD) r);
+
+ return 0;
+}
+
+static inline SOCKET ws__ioctl_get_bsp_socket(SOCKET socket, DWORD ioctl) {
+ SOCKET bsp_socket;
+ DWORD bytes;
+
+ if (WSAIoctl(socket,
+ ioctl,
+ NULL,
+ 0,
+ &bsp_socket,
+ sizeof bsp_socket,
+ &bytes,
+ NULL,
+ NULL) != SOCKET_ERROR)
+ return bsp_socket;
+ else
+ return INVALID_SOCKET;
+}
+
+SOCKET ws_get_base_socket(SOCKET socket) {
+ SOCKET base_socket;
+ DWORD error;
+
+ for (;;) {
+ base_socket = ws__ioctl_get_bsp_socket(socket, SIO_BASE_HANDLE);
+ if (base_socket != INVALID_SOCKET)
+ return base_socket;
+
+ error = GetLastError();
+ if (error == WSAENOTSOCK)
+ return_set_error(INVALID_SOCKET, error);
+
+ /* Even though Microsoft documentation clearly states that LSPs should
+ * never intercept the `SIO_BASE_HANDLE` ioctl [1], Komodia based LSPs do
+ * so anyway, breaking it, with the apparent intention of preventing LSP
+ * bypass [2]. Fortunately they don't handle `SIO_BSP_HANDLE_POLL`, which
+ * will at least let us obtain the socket associated with the next winsock
+ * protocol chain entry. If this succeeds, loop around and call
+ * `SIO_BASE_HANDLE` again with the returned BSP socket, to make sure that
+ * we unwrap all layers and retrieve the actual base socket.
+ * [1] https://docs.microsoft.com/en-us/windows/win32/winsock/winsock-ioctls
+ * [2] https://www.komodia.com/newwiki/index.php?title=Komodia%27s_Redirector_bug_fixes#Version_2.2.2.6
+ */
+ base_socket = ws__ioctl_get_bsp_socket(socket, SIO_BSP_HANDLE_POLL);
+ if (base_socket != INVALID_SOCKET && base_socket != socket)
+ socket = base_socket;
+ else
+ return_set_error(INVALID_SOCKET, error);
+ }
+}
diff --git a/Net/src/wepoll.h b/Net/src/wepoll.h
new file mode 100644
index 00000000000..95a93ca4e65
--- /dev/null
+++ b/Net/src/wepoll.h
@@ -0,0 +1,116 @@
+/*
+ * wepoll - epoll for Windows
+ * https://github.com/piscisaureus/wepoll
+ *
+ * Copyright 2012-2020, Bert Belder
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WEPOLL_H_
+#define WEPOLL_H_
+
+#ifndef WEPOLL_EXPORT
+#define WEPOLL_EXPORT
+#endif
+
+#include
+
+enum EPOLL_EVENTS {
+ EPOLLIN = (int) (1U << 0),
+ EPOLLPRI = (int) (1U << 1),
+ EPOLLOUT = (int) (1U << 2),
+ EPOLLERR = (int) (1U << 3),
+ EPOLLHUP = (int) (1U << 4),
+ EPOLLRDNORM = (int) (1U << 6),
+ EPOLLRDBAND = (int) (1U << 7),
+ EPOLLWRNORM = (int) (1U << 8),
+ EPOLLWRBAND = (int) (1U << 9),
+ EPOLLMSG = (int) (1U << 10), /* Never reported. */
+ EPOLLRDHUP = (int) (1U << 13),
+ EPOLLONESHOT = (int) (1U << 31)
+};
+
+#define EPOLLIN (1U << 0)
+#define EPOLLPRI (1U << 1)
+#define EPOLLOUT (1U << 2)
+#define EPOLLERR (1U << 3)
+#define EPOLLHUP (1U << 4)
+#define EPOLLRDNORM (1U << 6)
+#define EPOLLRDBAND (1U << 7)
+#define EPOLLWRNORM (1U << 8)
+#define EPOLLWRBAND (1U << 9)
+#define EPOLLMSG (1U << 10)
+#define EPOLLRDHUP (1U << 13)
+#define EPOLLONESHOT (1U << 31)
+
+#define EPOLL_CTL_ADD 1
+#define EPOLL_CTL_MOD 2
+#define EPOLL_CTL_DEL 3
+
+typedef void* HANDLE;
+typedef uintptr_t SOCKET;
+
+typedef union epoll_data {
+ void* ptr;
+ int fd;
+ uint32_t u32;
+ uint64_t u64;
+ SOCKET sock; /* Windows specific */
+ HANDLE hnd; /* Windows specific */
+} epoll_data_t;
+
+struct epoll_event {
+ uint32_t events; /* Epoll events and flags */
+ epoll_data_t data; /* User data variable */
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+WEPOLL_EXPORT HANDLE epoll_create(int size);
+WEPOLL_EXPORT HANDLE epoll_create1(int flags);
+
+WEPOLL_EXPORT int epoll_close(HANDLE ephnd);
+
+WEPOLL_EXPORT int epoll_ctl(HANDLE ephnd,
+ int op,
+ SOCKET sock,
+ struct epoll_event* event);
+
+WEPOLL_EXPORT int epoll_wait(HANDLE ephnd,
+ struct epoll_event* events,
+ int maxevents,
+ int timeout);
+
+#define close epoll_close
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* WEPOLL_H_ */