diff --git a/.github/workflows/makefile.yml b/.github/workflows/makefile.yml index efcad5302..c785dee80 100644 --- a/.github/workflows/makefile.yml +++ b/.github/workflows/makefile.yml @@ -2,9 +2,9 @@ name: Build Check on: push: - branches: [ "master" ] + branches: [ "master", "feat-*", "fix-*"] pull_request: - branches: [ "master" ] + branches: [ "master", "feat-*", "fix-*"] jobs: # ensure the toolchain is cached diff --git a/kernel/src/driver/net/e1000e/e1000e_driver.rs b/kernel/src/driver/net/e1000e/e1000e_driver.rs index fbac2f834..8fbbdc6cd 100644 --- a/kernel/src/driver/net/e1000e/e1000e_driver.rs +++ b/kernel/src/driver/net/e1000e/e1000e_driver.rs @@ -8,7 +8,7 @@ use crate::{ device::{bus::Bus, driver::Driver, Device, DeviceType, IdTable}, kobject::{KObjType, KObject, KObjectState}, }, - net::NetDevice, + net::{Iface, IfaceCommon}, }, libs::spinlock::SpinLock, net::{generate_iface_id, NET_DEVICES}, @@ -26,9 +26,9 @@ use core::{ use log::info; use smoltcp::{ phy, - wire::{self, HardwareAddress}, + wire::HardwareAddress, }; -use system_error::SystemError; +// use system_error::SystemError; use super::e1000e::{E1000EBuffer, E1000EDevice}; @@ -73,10 +73,10 @@ impl Debug for E1000EDriverWrapper { } } +#[derive(Debug)] pub struct E1000EInterface { driver: E1000EDriverWrapper, - iface_id: usize, - iface: SpinLock, + common: IfaceCommon, name: String, } impl phy::RxToken for E1000ERxToken { @@ -184,11 +184,9 @@ impl E1000EInterface { let iface = smoltcp::iface::Interface::new(iface_config, &mut driver, Instant::now().into()); - let driver: E1000EDriverWrapper = E1000EDriverWrapper(UnsafeCell::new(driver)); let result = Arc::new(E1000EInterface { - driver, - iface_id, - iface: SpinLock::new(iface), + driver: E1000EDriverWrapper(UnsafeCell::new(driver)), + common: IfaceCommon::new(iface_id, iface), name: format!("eth{}", iface_id), }); @@ -196,16 +194,6 @@ impl E1000EInterface { } } -impl Debug for E1000EInterface { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("E1000EInterface") - .field("iface_id", &self.iface_id) - .field("iface", &"smoltcp::iface::Interface") - .field("name", &self.name) - .finish() - } -} - impl Device for E1000EInterface { fn dev_type(&self) -> DeviceType { todo!() @@ -248,52 +236,23 @@ impl Device for E1000EInterface { } } -impl NetDevice for E1000EInterface { +impl Iface for E1000EInterface { + fn common(&self) -> &IfaceCommon { + return &self.common; + } + fn mac(&self) -> smoltcp::wire::EthernetAddress { let mac = self.driver.inner.lock().mac_address(); return smoltcp::wire::EthernetAddress::from_bytes(&mac); } - #[inline] - fn nic_id(&self) -> usize { - return self.iface_id; - } - #[inline] fn name(&self) -> String { return self.name.clone(); } - fn update_ip_addrs(&self, ip_addrs: &[wire::IpCidr]) -> Result<(), SystemError> { - if ip_addrs.len() != 1 { - return Err(SystemError::EINVAL); - } - - self.iface.lock().update_ip_addrs(|addrs| { - let dest = addrs.iter_mut().next(); - - if let Some(dest) = dest { - *dest = ip_addrs[0]; - } else { - addrs.push(ip_addrs[0]).expect("Push ipCidr failed: full"); - } - }); - return Ok(()); - } - - fn poll(&self, sockets: &mut smoltcp::iface::SocketSet) -> Result<(), SystemError> { - let timestamp: smoltcp::time::Instant = Instant::now().into(); - let mut guard = self.iface.lock(); - let poll_res = guard.poll(timestamp, self.driver.force_get_mut(), sockets); - if poll_res { - return Ok(()); - } - return Err(SystemError::EAGAIN_OR_EWOULDBLOCK); - } - - #[inline(always)] - fn inner_iface(&self) -> &SpinLock { - return &self.iface; + fn poll(&self) { + self.common.poll(self.driver.force_get_mut()) } } diff --git a/kernel/src/driver/net/irq_handle.rs b/kernel/src/driver/net/irq_handle.rs index 6a1a3a328..5436a349c 100644 --- a/kernel/src/driver/net/irq_handle.rs +++ b/kernel/src/driver/net/irq_handle.rs @@ -7,7 +7,8 @@ use crate::{ irqdesc::{IrqHandler, IrqReturn}, IrqNumber, }, - net::net_core::poll_ifaces_try_lock_onetime, + // net::net_core::poll_ifaces_try_lock_onetime, + net::net_core::poll_ifaces, }; /// 默认的网卡中断处理函数 @@ -21,7 +22,9 @@ impl IrqHandler for DefaultNetIrqHandler { _static_data: Option<&dyn IrqHandlerData>, _dynamic_data: Option>, ) -> Result { - poll_ifaces_try_lock_onetime().ok(); + // poll_ifaces_try_lock_onetime().ok(); + log::warn!("DefaultNetIrqHandler: poll_ifaces_try_lock_onetime -> poll_ifaces"); + poll_ifaces(); Ok(IrqReturn::Handled) } } diff --git a/kernel/src/driver/net/loopback.rs b/kernel/src/driver/net/loopback.rs index 582fcf8d2..707ba816d 100644 --- a/kernel/src/driver/net/loopback.rs +++ b/kernel/src/driver/net/loopback.rs @@ -22,7 +22,7 @@ use smoltcp::{ }; use system_error::SystemError; -use super::NetDevice; +use super::{Iface, IfaceCommon}; const DEVICE_NAME: &str = "loopback"; @@ -131,6 +131,7 @@ impl Loopback { /// 为实现获得不可变引用的Interface的内部可变性,故为Driver提供UnsafeCell包裹器 /// /// 参考virtio_net.rs +#[derive(Debug)] struct LoopbackDriverWapper(UnsafeCell); unsafe impl Send for LoopbackDriverWapper {} unsafe impl Sync for LoopbackDriverWapper {} @@ -235,11 +236,10 @@ impl phy::Device for LoopbackDriver { /// ## LoopbackInterface结构 /// 封装驱动包裹器和iface,设置接口名称 +#[derive(Debug)] pub struct LoopbackInterface { driver: LoopbackDriverWapper, - iface_id: usize, - iface: SpinLock, - name: String, + common: IfaceCommon, } impl LoopbackInterface { @@ -270,25 +270,14 @@ impl LoopbackInterface { .expect("Push ipCidr failed: full"); } }); - let driver = LoopbackDriverWapper(UnsafeCell::new(driver)); + Arc::new(LoopbackInterface { - driver, - iface_id, - iface: SpinLock::new(iface), - name: "lo".to_string(), + driver: LoopbackDriverWapper(UnsafeCell::new(driver)), + common: IfaceCommon::new(iface_id, iface), }) } } -impl Debug for LoopbackInterface { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("LoopbackInterface") - .field("iface_id", &self.iface_id) - .field("iface", &"smtoltcp::iface::Interface") - .field("name", &self.name) - .finish() - } -} //TODO: 向sysfs注册lo设备 impl KObject for LoopbackInterface { fn as_any_ref(&self) -> &dyn core::any::Any { @@ -324,7 +313,7 @@ impl KObject for LoopbackInterface { } fn name(&self) -> String { - self.name.clone() + "lo".to_string() } fn set_name(&self, _name: String) { @@ -394,79 +383,30 @@ impl Device for LoopbackInterface { } } -impl NetDevice for LoopbackInterface { - /// 由于lo网卡设备不是实际的物理设备,其mac地址需要手动设置为一个默认值,这里默认为0200000001 - fn mac(&self) -> smoltcp::wire::EthernetAddress { - let mac = [0x02, 0x00, 0x00, 0x00, 0x00, 0x01]; - smoltcp::wire::EthernetAddress(mac) - } - - #[inline] - fn nic_id(&self) -> usize { - self.iface_id +impl Iface for LoopbackInterface { + fn common(&self) -> &IfaceCommon { + &self.common } - #[inline] fn name(&self) -> String { - self.name.clone() + "lo".to_string() } - /// ## `update_ip_addrs` 用于更新接口的 IP 地址。 - /// - /// ## 参数 - /// - `&self` :自身引用 - /// - `ip_addrs` :一个包含 `smoltcp::wire::IpCidr` 的切片,表示要设置的 IP 地址和子网掩码 - /// - /// ## 返回值 - /// - 如果 `ip_addrs` 的长度不为 1,返回 `Err(SystemError::EINVAL)`,表示输入参数无效 - /// - 如果更新成功,返回 `Ok(())` - fn update_ip_addrs( - &self, - ip_addrs: &[smoltcp::wire::IpCidr], - ) -> Result<(), system_error::SystemError> { - if ip_addrs.len() != 1 { - return Err(SystemError::EINVAL); - } - self.iface.lock().update_ip_addrs(|addrs| { - let dest = addrs.iter_mut().next(); - - if let Some(dest) = dest { - *dest = ip_addrs[0]; - } else { - addrs.push(ip_addrs[0]).expect("Push ipCidr failed: full"); - } - }); - return Ok(()); - } - /// ## `poll` 用于轮询接口的状态。 - /// - /// ## 参数 - /// - `&self` :自身引用 - /// - `sockets` :一个可变引用到 `smoltcp::iface::SocketSet`,表示要轮询的套接字集 - /// - /// ## 返回值 - /// - 如果轮询成功,返回 `Ok(())` - /// - 如果轮询失败,返回 `Err(SystemError::EAGAIN_OR_EWOULDBLOCK)`,表示需要再次尝试或者操作会阻塞 - fn poll(&self, sockets: &mut smoltcp::iface::SocketSet) -> Result<(), SystemError> { - let timestamp: smoltcp::time::Instant = Instant::now().into(); - let mut guard = self.iface.lock(); - let poll_res = guard.poll(timestamp, self.driver.force_get_mut(), sockets); - if poll_res { - return Ok(()); - } - return Err(SystemError::EAGAIN_OR_EWOULDBLOCK); + /// 由于lo网卡设备不是实际的物理设备,其mac地址需要手动设置为一个默认值,这里默认为0200000001 + fn mac(&self) -> smoltcp::wire::EthernetAddress { + let mac = [0x02, 0x00, 0x00, 0x00, 0x00, 0x01]; + smoltcp::wire::EthernetAddress(mac) } - #[inline(always)] - fn inner_iface(&self) -> &SpinLock { - return &self.iface; + fn poll(&self) { + self.common.poll(self.driver.force_get_mut()) } } pub fn loopback_probe() { loopback_driver_init(); } -/// ## lo网卡设备初始化函数 +/// # lo网卡设备初始化函数 /// 创建驱动和iface,初始化一个lo网卡,添加到全局NET_DEVICES中 pub fn loopback_driver_init() { let driver = LoopbackDriver::new(); @@ -474,7 +414,7 @@ pub fn loopback_driver_init() { NET_DEVICES .write_irqsave() - .insert(iface.iface_id, iface.clone()); + .insert(iface.nic_id(), iface.clone()); } /// ## lo网卡设备的注册函数 diff --git a/kernel/src/driver/net/mod.rs b/kernel/src/driver/net/mod.rs index 18e5a3fe1..478bd6354 100644 --- a/kernel/src/driver/net/mod.rs +++ b/kernel/src/driver/net/mod.rs @@ -1,33 +1,189 @@ -use alloc::string::String; -use smoltcp::{ - iface, - wire::{self, EthernetAddress}, -}; - -use super::base::device::Device; -use crate::libs::spinlock::SpinLock; + +use alloc::{sync::Arc, string::String, fmt, vec::Vec}; + +use smoltcp; use system_error::SystemError; +use crate::net::socket::inet::InetSocket; +use crate::libs::{spinlock::SpinLock, rwlock::RwLock}; +use crate::net::socket::inet::common::PortManager; mod dma; pub mod e1000e; pub mod irq_handle; pub mod loopback; pub mod virtio_net; + #[allow(dead_code)] -pub trait NetDevice: Device { - /// @brief 获取网卡的MAC地址 - fn mac(&self) -> EthernetAddress; +pub trait Iface: crate::driver::base::device::Device { + /// # `common` + /// 获取网卡的公共信息 + fn common(&self) -> &IfaceCommon; + /// # `mac` + /// 获取网卡的MAC地址 + fn mac(&self) -> smoltcp::wire::EthernetAddress; + + /// # `name` + /// 获取网卡名 fn name(&self) -> String; - /// @brief 获取网卡的id - fn nic_id(&self) -> usize; + /// # `nic_id` + /// 获取网卡id + fn nic_id(&self) -> usize { + self.common().iface_id + } - fn poll(&self, sockets: &mut iface::SocketSet) -> Result<(), SystemError>; + /// # `poll` + /// 用于轮询接口的状态。 + /// ## 参数 + /// - `sockets` :一个可变引用到 `smoltcp::iface::SocketSet`,表示要轮询的套接字集 + /// ## 返回值 + /// - 成功返回 `Ok(())` + /// - 如果轮询失败,返回 `Err(SystemError::EAGAIN_OR_EWOULDBLOCK)`,表示需要再次尝试或者操作会阻塞 + fn poll(&self); - fn update_ip_addrs(&self, ip_addrs: &[wire::IpCidr]) -> Result<(), SystemError>; + /// # `update_ip_addrs` + /// 用于更新接口的 IP 地址 + /// ## 参数 + /// - `ip_addrs` :一个包含 `smoltcp::wire::IpCidr` 的切片,表示要设置的 IP 地址和子网掩码 + /// ## 返回值 + /// - 如果 `ip_addrs` 的长度不为 1,返回 `Err(SystemError::EINVAL)`,表示输入参数无效 + fn update_ip_addrs(&self, ip_addrs: &[smoltcp::wire::IpCidr]) -> Result<(), SystemError> { + self.common().update_ip_addrs(ip_addrs) + } /// @brief 获取smoltcp的网卡接口类型 - fn inner_iface(&self) -> &SpinLock; + #[inline(always)] + fn smol_iface(&self) -> &SpinLock { + &self.common().smol_iface + } // fn as_any_ref(&'static self) -> &'static dyn core::any::Any; + + /// # `sockets` + /// 获取网卡的套接字集 + fn sockets(&self) -> &SpinLock> { + &self.common().sockets + } + + /// # `port_manager` + /// 用于管理网卡的端口 + fn port_manager(&self) -> &PortManager { + &self.common().port_manager + } +} + +pub struct IfaceCommon { + iface_id: usize, + smol_iface: SpinLock, + /// 存smoltcp网卡的套接字集 + sockets: SpinLock>, + /// 存 kernel wrap smoltcp socket 的集合 + bounds: RwLock>>, + /// 端口管理器 + port_manager: PortManager, + /// 下次轮询的时间 + poll_at_ms: core::sync::atomic::AtomicU64, +} + +impl fmt::Debug for IfaceCommon { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("IfaceCommon") + .field("iface_id", &self.iface_id) + .field("sockets", &self.sockets) + .field("bounds", &self.bounds) + .field("port_manager", &self.port_manager) + .field("poll_at_ms", &self.poll_at_ms) + .finish() + } +} + +impl IfaceCommon { + pub fn new(iface_id: usize, iface: smoltcp::iface::Interface) -> Self { + IfaceCommon { + iface_id, + smol_iface: SpinLock::new(iface), + sockets: SpinLock::new(smoltcp::iface::SocketSet::new(Vec::new())), + bounds: RwLock::new(Vec::new()), + port_manager: PortManager::new(), + poll_at_ms: core::sync::atomic::AtomicU64::new(0), + } + } + + pub fn poll(&self, device: &mut D) + where + D: smoltcp::phy::Device + ?Sized, + { + let timestamp = crate::time::Instant::now().into(); + let mut sockets = self.sockets.lock_no_preempt(); + let mut interface = self.smol_iface.lock_no_preempt(); + + let (has_events, poll_at) = { + let mut has_events = false; + let mut poll_at; + loop { + has_events |= interface.poll(timestamp, device, &mut sockets); + poll_at = interface.poll_at(timestamp, &sockets); + let Some(instant) = poll_at else { + break; + }; + if instant > timestamp { + break; + } + } + (has_events, poll_at) + }; + + // drop sockets here to avoid deadlock + drop(interface); + drop(sockets); + + use core::sync::atomic::Ordering; + if let Some(instant) = poll_at { + let _old_instant = self.poll_at_ms.load(Ordering::Relaxed); + let new_instant = instant.total_millis() as u64; + self.poll_at_ms.store(new_instant, Ordering::Relaxed); + + // if old_instant == 0 || new_instant < old_instant { + // self.polling_wait_queue.wake_all(); + // } + } else { + self.poll_at_ms.store(0, Ordering::Relaxed); + } + + if has_events { + // We never try to hold the write lock in the IRQ context, and we disable IRQ when + // holding the write lock. So we don't need to disable IRQ when holding the read lock. + self.bounds.read().iter().for_each(|bound_socket| { + bound_socket.on_iface_events(); + }); + + // let closed_sockets = self + // .closing_sockets + // .lock_irq_disabled() + // .extract_if(|closing_socket| closing_socket.is_closed()) + // .collect::>(); + // drop(closed_sockets); + } + } + + pub fn update_ip_addrs(&self, ip_addrs: &[smoltcp::wire::IpCidr]) -> Result<(), SystemError> { + if ip_addrs.len() != 1 { + return Err(SystemError::EINVAL); + } + + self.smol_iface.lock().update_ip_addrs(|addrs| { + let dest = addrs.iter_mut().next(); + + if let Some(dest) = dest { + *dest = ip_addrs[0]; + } else { + addrs.push(ip_addrs[0]).expect("Push ipCidr failed: full"); + } + }); + return Ok(()); + } + + pub fn bind_socket(&self, socket: Arc) { + self.bounds.write().push(socket); + } } diff --git a/kernel/src/driver/net/virtio_net.rs b/kernel/src/driver/net/virtio_net.rs index 58551e4a9..09311b109 100644 --- a/kernel/src/driver/net/virtio_net.rs +++ b/kernel/src/driver/net/virtio_net.rs @@ -15,7 +15,7 @@ use smoltcp::{iface, phy, wire}; use unified_init::macros::unified_init; use virtio_drivers::device::net::VirtIONet; -use super::NetDevice; +use super::Iface; use crate::{ arch::rand::rand, driver::{ @@ -44,7 +44,7 @@ use crate::{ rwlock::{RwLockReadGuard, RwLockWriteGuard}, spinlock::{SpinLock, SpinLockGuard}, }, - net::{generate_iface_id, net_core::poll_ifaces_try_lock_onetime, NET_DEVICES}, + net::{generate_iface_id, net_core::poll_ifaces, NET_DEVICES}, time::Instant, }; use system_error::SystemError; @@ -129,12 +129,12 @@ impl Debug for VirtIONicDeviceInner { #[cast_to([sync] VirtIODevice)] #[cast_to([sync] Device)] +#[derive(Debug)] pub struct VirtioInterface { device_inner: VirtIONicDeviceInnerWrapper, - iface_id: usize, iface_name: String, + iface_common: super::IfaceCommon, dev_id: Arc, - iface: SpinLock, inner: SpinLock, locked_kobj_state: LockedKObjectState, } @@ -147,18 +147,6 @@ struct InnerVirtIOInterface { kobj_common: KObjectCommonData, } -impl core::fmt::Debug for VirtioInterface { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("VirtioInterface") - .field("iface_id", &self.iface_id) - .field("iface_name", &self.iface_name) - .field("dev_id", &self.dev_id) - .field("inner", &self.inner) - .field("locked_kobj_state", &self.locked_kobj_state) - .finish() - } -} - impl VirtioInterface { pub fn new(mut device_inner: VirtIONicDeviceInner, dev_id: Arc) -> Arc { let iface_id = generate_iface_id(); @@ -171,10 +159,9 @@ impl VirtioInterface { let result = Arc::new(VirtioInterface { device_inner: VirtIONicDeviceInnerWrapper(UnsafeCell::new(device_inner)), - iface_id, locked_kobj_state: LockedKObjectState::default(), - iface: SpinLock::new(iface), iface_name: format!("eth{}", iface_id), + iface_common: super::IfaceCommon::new(iface_id, iface), dev_id, inner: SpinLock::new(InnerVirtIOInterface { name: None, @@ -203,7 +190,9 @@ impl VirtioInterface { impl VirtIODevice for VirtioInterface { fn handle_irq(&self, _irq: IrqNumber) -> Result { - poll_ifaces_try_lock_onetime().ok(); + // poll_ifaces_try_lock_onetime().ok(); + log::warn!("VirtioInterface: poll_ifaces_try_lock_onetime -> poll_ifaces"); + poll_ifaces(); return Ok(IrqReturn::Handled); } @@ -245,7 +234,7 @@ impl VirtIODevice for VirtioInterface { impl Drop for VirtioInterface { fn drop(&mut self) { // 从全局的网卡接口信息表中删除这个网卡的接口信息 - NET_DEVICES.write_irqsave().remove(&self.iface_id); + NET_DEVICES.write_irqsave().remove(&self.nic_id()); } } @@ -431,57 +420,25 @@ pub fn virtio_net(transport: VirtIOTransport, dev_id: Arc) { .expect("Add virtio net failed"); } -impl NetDevice for VirtioInterface { +impl Iface for VirtioInterface { + fn common(&self) -> &super::IfaceCommon { + &self.iface_common + } + fn mac(&self) -> wire::EthernetAddress { let mac: [u8; 6] = self.device_inner.inner.lock().mac_address(); return wire::EthernetAddress::from_bytes(&mac); } - #[inline] - fn nic_id(&self) -> usize { - return self.iface_id; - } - #[inline] fn name(&self) -> String { return self.iface_name.clone(); } - fn update_ip_addrs(&self, ip_addrs: &[wire::IpCidr]) -> Result<(), SystemError> { - if ip_addrs.len() != 1 { - return Err(SystemError::EINVAL); - } - - self.iface.lock().update_ip_addrs(|addrs| { - let dest = addrs.iter_mut().next(); - - if let Some(dest) = dest { - *dest = ip_addrs[0]; - } else { - addrs - .push(ip_addrs[0]) - .expect("Push wire::IpCidr failed: full"); - } - }); - return Ok(()); + fn poll(&self) { + self.iface_common.poll(self.device_inner.force_get_mut()) } - fn poll(&self, sockets: &mut iface::SocketSet) -> Result<(), SystemError> { - let timestamp: smoltcp::time::Instant = Instant::now().into(); - let mut guard = self.iface.lock(); - let poll_res = guard.poll(timestamp, self.device_inner.force_get_mut(), sockets); - // todo: notify!!! - // debug!("Virtio Interface poll:{poll_res}"); - if poll_res { - return Ok(()); - } - return Err(SystemError::EAGAIN_OR_EWOULDBLOCK); - } - - #[inline(always)] - fn inner_iface(&self) -> &SpinLock { - return &self.iface; - } // fn as_any_ref(&'static self) -> &'static dyn core::any::Any { // return self; // } diff --git a/kernel/src/driver/tty/tty_ldisc/ntty.rs b/kernel/src/driver/tty/tty_ldisc/ntty.rs index e5b63422f..4f2958064 100644 --- a/kernel/src/driver/tty/tty_ldisc/ntty.rs +++ b/kernel/src/driver/tty/tty_ldisc/ntty.rs @@ -388,9 +388,9 @@ impl NTtyData { continue; } - if self.char_map.get(c as usize).unwrap() { + if ((c as usize) < self.char_map.size()) && self.char_map.get(c as usize).unwrap() { // 特殊字符 - self.receive_special_char(c, tty.clone(), lookahead_done) + self.receive_special_char(c, tty.clone(), lookahead_done); } else { self.receive_char(c, tty.clone()); } diff --git a/kernel/src/filesystem/vfs/core.rs b/kernel/src/filesystem/vfs/core.rs index 2eb8222bb..5a1d55176 100644 --- a/kernel/src/filesystem/vfs/core.rs +++ b/kernel/src/filesystem/vfs/core.rs @@ -239,7 +239,7 @@ pub fn do_unlink_at(dirfd: i32, path: &str) -> Result { return Err(SystemError::EPERM); } - let (filename, parent_path) = rsplit_path(path); + let (filename, parent_path) = rsplit_path(&remain_path); // 查找父目录 let parent_inode: Arc = inode_begin .lookup_follow_symlink(parent_path.unwrap_or("/"), VFS_MAX_FOLLOW_SYMLINK_TIMES)?; diff --git a/kernel/src/filesystem/vfs/file.rs b/kernel/src/filesystem/vfs/file.rs index faf020ed9..b92cb25e5 100644 --- a/kernel/src/filesystem/vfs/file.rs +++ b/kernel/src/filesystem/vfs/file.rs @@ -18,11 +18,13 @@ use crate::{ libs::{rwlock::RwLock, spinlock::SpinLock}, net::{ event_poll::{EPollItem, EPollPrivateData, EventPoll}, - socket::SocketInode, + socket::Socket, }, process::{cred::Cred, ProcessManager}, }; +use crate::libs::casting::DowncastArc; + use super::{Dirent, FileType, IndexNode, InodeId, Metadata, SpecialNodeData}; /// 文件私有信息的枚举类型 @@ -486,10 +488,11 @@ impl File { pub fn add_epoll(&self, epitem: Arc) -> Result<(), SystemError> { match self.file_type { FileType::Socket => { - let inode = self.inode.downcast_ref::().unwrap(); - let mut socket = inode.inner(); + let inode = self.inode.downcast_ref::().unwrap(); + // let mut socket = inode.inner(); - return socket.add_epoll(epitem); + inode.epoll_items().add(epitem); + return Ok(()); } FileType::Pipe => { let inode = self.inode.downcast_ref::().unwrap(); @@ -510,10 +513,9 @@ impl File { pub fn remove_epoll(&self, epoll: &Weak>) -> Result<(), SystemError> { match self.file_type { FileType::Socket => { - let inode = self.inode.downcast_ref::().unwrap(); - let mut socket = inode.inner(); + let inode = self.inode.downcast_arc::().unwrap(); - return socket.remove_epoll(epoll); + return inode.epoll_items().remove(epoll); } _ => return Err(SystemError::ENOSYS), } diff --git a/kernel/src/libs/keyboard_parser.rs b/kernel/src/libs/keyboard_parser.rs index 4cbe3e00d..2cfe57b3e 100644 --- a/kernel/src/libs/keyboard_parser.rs +++ b/kernel/src/libs/keyboard_parser.rs @@ -313,7 +313,8 @@ impl TypeOneFSMState { } // shift被按下 - if scancode_status.shift_l || scancode_status.shift_r { + let shift = scancode_status.shift_l || scancode_status.shift_r; + if shift { col = true; } @@ -327,9 +328,8 @@ impl TypeOneFSMState { let mut ch = TYPE1_KEY_CODE_MAPTABLE[col as usize + 2 * index as usize]; if key != KeyFlag::NoneFlag { - // debug!("EMIT: ch is '{}', keyflag is {:?}\n", ch as char, key); if scancode_status.ctrl_l || scancode_status.ctrl_r { - ch = Self::to_ctrl(ch); + ch = Self::to_ctrl(ch, shift); } Self::emit(ch); } @@ -337,10 +337,16 @@ impl TypeOneFSMState { } #[inline] - fn to_ctrl(ch: u8) -> u8 { + fn to_ctrl(ch: u8, shift: bool) -> u8 { return match ch as char { - 'a'..='z' => ch - 0x40, - 'A'..='Z' => ch - 0x40, + 'a'..='z' => ch - 0x60, + 'A'..='Z' => { + if shift { + ch + } else { + ch - 0x40 + } + } '@'..='_' => ch - 0x40, _ => ch, }; diff --git a/kernel/src/net/mod.rs b/kernel/src/net/mod.rs index 4a0098c95..f642859fd 100644 --- a/kernel/src/net/mod.rs +++ b/kernel/src/net/mod.rs @@ -4,12 +4,11 @@ use core::{ }; use alloc::{collections::BTreeMap, sync::Arc}; +use socket::Socket; -use crate::{driver::net::NetDevice, libs::rwlock::RwLock}; +use crate::{driver::net::Iface, libs::rwlock::RwLock}; use smoltcp::wire::IpEndpoint; -use self::socket::SocketInode; - pub mod event_poll; pub mod net_core; pub mod socket; @@ -18,7 +17,7 @@ pub mod syscall; lazy_static! { /// # 所有网络接口的列表 /// 这个列表在中断上下文会使用到,因此需要irqsave - pub static ref NET_DEVICES: RwLock>> = RwLock::new(BTreeMap::new()); + pub static ref NET_DEVICES: RwLock>> = RwLock::new(BTreeMap::new()); } /// 生成网络接口的id (全局自增) @@ -27,24 +26,14 @@ pub fn generate_iface_id() -> usize { return IFACE_ID.fetch_add(1, core::sync::atomic::Ordering::SeqCst); } -bitflags! { - /// @brief 用于指定socket的关闭类型 - /// 参考:https://code.dragonos.org.cn/xref/linux-6.1.9/include/net/sock.h?fi=SHUTDOWN_MASK#1573 - pub struct ShutdownType: u8 { - const RCV_SHUTDOWN = 1; - const SEND_SHUTDOWN = 2; - const SHUTDOWN_MASK = 3; - } -} - #[derive(Debug, Clone)] pub enum Endpoint { /// 链路层端点 LinkLayer(LinkLayerEndpoint), /// 网络层端点 - Ip(Option), + Ip(IpEndpoint), /// inode端点 - Inode(Option>), + Inode(Arc), // todo: 增加NetLink机制后,增加NetLink端点 } @@ -66,127 +55,8 @@ impl LinkLayerEndpoint { } } -/// IP datagram encapsulated protocol. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -#[repr(u8)] -pub enum Protocol { - HopByHop = 0x00, - Icmp = 0x01, - Igmp = 0x02, - Tcp = 0x06, - Udp = 0x11, - Ipv6Route = 0x2b, - Ipv6Frag = 0x2c, - Icmpv6 = 0x3a, - Ipv6NoNxt = 0x3b, - Ipv6Opts = 0x3c, - Unknown(u8), -} - -impl fmt::Display for Protocol { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Protocol::HopByHop => write!(f, "Hop-by-Hop"), - Protocol::Icmp => write!(f, "ICMP"), - Protocol::Igmp => write!(f, "IGMP"), - Protocol::Tcp => write!(f, "TCP"), - Protocol::Udp => write!(f, "UDP"), - Protocol::Ipv6Route => write!(f, "IPv6-Route"), - Protocol::Ipv6Frag => write!(f, "IPv6-Frag"), - Protocol::Icmpv6 => write!(f, "ICMPv6"), - Protocol::Ipv6NoNxt => write!(f, "IPv6-NoNxt"), - Protocol::Ipv6Opts => write!(f, "IPv6-Opts"), - Protocol::Unknown(id) => write!(f, "0x{id:02x}"), - } - } -} - -impl From for Protocol { - fn from(value: smoltcp::wire::IpProtocol) -> Self { - let x: u8 = value.into(); - Protocol::from(x) +impl From for Endpoint { + fn from(endpoint: IpEndpoint) -> Self { + Self::Ip(endpoint) } -} - -impl From for Protocol { - fn from(value: u8) -> Self { - match value { - 0x00 => Protocol::HopByHop, - 0x01 => Protocol::Icmp, - 0x02 => Protocol::Igmp, - 0x06 => Protocol::Tcp, - 0x11 => Protocol::Udp, - 0x2b => Protocol::Ipv6Route, - 0x2c => Protocol::Ipv6Frag, - 0x3a => Protocol::Icmpv6, - 0x3b => Protocol::Ipv6NoNxt, - 0x3c => Protocol::Ipv6Opts, - _ => Protocol::Unknown(value), - } - } -} - -impl From for u8 { - fn from(value: Protocol) -> Self { - match value { - Protocol::HopByHop => 0x00, - Protocol::Icmp => 0x01, - Protocol::Igmp => 0x02, - Protocol::Tcp => 0x06, - Protocol::Udp => 0x11, - Protocol::Ipv6Route => 0x2b, - Protocol::Ipv6Frag => 0x2c, - Protocol::Icmpv6 => 0x3a, - Protocol::Ipv6NoNxt => 0x3b, - Protocol::Ipv6Opts => 0x3c, - Protocol::Unknown(id) => id, - } - } -} - -bitflags! { - pub struct SocketOptionsLevel: u32 { - const SOL_IP = 0; - // const SOL_ICMP = 1; // No-no-no! Due to Linux :-) we cannot - const SOL_SOCKET = 1; - const SOL_TCP = 6; - const SOL_UDP = 17; - const SOL_IPV6 = 41; - const SOL_ICMPV6 = 58; - const SOL_SCTP = 132; - const SOL_UDPLITE = 136; // UDP-Lite (RFC 3828) - const SOL_RAW = 255; - const SOL_IPX = 256; - const SOL_AX25 = 257; - const SOL_ATALK = 258; - const SOL_NETROM = 259; - const SOL_ROSE = 260; - const SOL_DECNET = 261; - const SOL_X25 = 262; - const SOL_PACKET = 263; - const SOL_ATM = 264; // ATM layer (cell level) - const SOL_AAL = 265; // ATM Adaption Layer (packet level) - const SOL_IRDA = 266; - const SOL_NETBEUI = 267; - const SOL_LLC = 268; - const SOL_DCCP = 269; - const SOL_NETLINK = 270; - const SOL_TIPC = 271; - const SOL_RXRPC = 272; - const SOL_PPPOL2TP = 273; - const SOL_BLUETOOTH = 274; - const SOL_PNPIPE = 275; - const SOL_RDS = 276; - const SOL_IUCV = 277; - const SOL_CAIF = 278; - const SOL_ALG = 279; - const SOL_NFC = 280; - const SOL_KCM = 281; - const SOL_TLS = 282; - const SOL_XDP = 283; - const SOL_MPTCP = 284; - const SOL_MCTP = 285; - const SOL_SMC = 286; - const SOL_VSOCK = 287; - } -} +} \ No newline at end of file diff --git a/kernel/src/net/net_core.rs b/kernel/src/net/net_core.rs index d3c474208..f81cea1ce 100644 --- a/kernel/src/net/net_core.rs +++ b/kernel/src/net/net_core.rs @@ -4,9 +4,9 @@ use smoltcp::{socket::dhcpv4, wire}; use system_error::SystemError; use crate::{ - driver::net::NetDevice, + driver::net::Iface, libs::rwlock::RwLockReadGuard, - net::{socket::SocketPollMethod, NET_DEVICES}, + net::NET_DEVICES, time::timer::{next_n_ms_timer_jiffies, Timer, TimerFunction}, }; @@ -80,19 +80,19 @@ fn dhcp_query() -> Result<(), SystemError> { if let Some(router) = config.router { net_face - .inner_iface() + .smol_iface() .lock() .routes_mut() .add_default_ipv4_route(router) .unwrap(); - let cidr = net_face.inner_iface().lock().ip_addrs().first().cloned(); + let cidr = net_face.smol_iface().lock().ip_addrs().first().cloned(); if let Some(cidr) = cidr { info!("Successfully allocated ip by Dhcpv4! Ip:{}", cidr); return Ok(()); } } else { net_face - .inner_iface() + .smol_iface() .lock() .routes_mut() .remove_default_ipv4_route(); @@ -108,7 +108,7 @@ fn dhcp_query() -> Result<(), SystemError> { ))]) .ok(); net_face - .inner_iface() + .smol_iface() .lock() .routes_mut() .remove_default_ipv4_route(); @@ -120,123 +120,108 @@ fn dhcp_query() -> Result<(), SystemError> { } pub fn poll_ifaces() { - let guard: RwLockReadGuard>> = NET_DEVICES.read_irqsave(); + let guard: RwLockReadGuard>> = NET_DEVICES.read_irqsave(); if guard.len() == 0 { warn!("poll_ifaces: No net driver found!"); return; } - let mut sockets = SOCKET_SET.lock_irqsave(); for (_, iface) in guard.iter() { - iface.poll(&mut sockets).ok(); + iface.poll(); } - let _ = send_event(&sockets); } -/// 对ifaces进行轮询,最多对SOCKET_SET尝试times次加锁。 -/// -/// @return 轮询成功,返回Ok(()) -/// @return 加锁超时,返回SystemError::EAGAIN_OR_EWOULDBLOCK -/// @return 没有网卡,返回SystemError::ENODEV -pub fn poll_ifaces_try_lock(times: u16) -> Result<(), SystemError> { - let mut i = 0; - while i < times { - let guard: RwLockReadGuard>> = - NET_DEVICES.read_irqsave(); - if guard.len() == 0 { - warn!("poll_ifaces: No net driver found!"); - // 没有网卡,返回错误 - return Err(SystemError::ENODEV); - } - let sockets = SOCKET_SET.try_lock_irqsave(); - // 加锁失败,继续尝试 - if sockets.is_err() { - i += 1; - continue; - } - - let mut sockets = sockets.unwrap(); - for (_, iface) in guard.iter() { - iface.poll(&mut sockets).ok(); - } - send_event(&sockets)?; - return Ok(()); - } - // 尝试次数用完,返回错误 - return Err(SystemError::EAGAIN_OR_EWOULDBLOCK); -} - -/// 对ifaces进行轮询,最多对SOCKET_SET尝试一次加锁。 -/// -/// @return 轮询成功,返回Ok(()) -/// @return 加锁超时,返回SystemError::EAGAIN_OR_EWOULDBLOCK -/// @return 没有网卡,返回SystemError::ENODEV -pub fn poll_ifaces_try_lock_onetime() -> Result<(), SystemError> { - let guard: RwLockReadGuard>> = NET_DEVICES.read_irqsave(); - if guard.len() == 0 { - warn!("poll_ifaces: No net driver found!"); - // 没有网卡,返回错误 - return Err(SystemError::ENODEV); - } - let mut sockets = SOCKET_SET.try_lock_irqsave()?; - for (_, iface) in guard.iter() { - iface.poll(&mut sockets).ok(); - } - send_event(&sockets)?; - return Ok(()); -} - -/// ### 处理轮询后的事件 -fn send_event(sockets: &smoltcp::iface::SocketSet) -> Result<(), SystemError> { - for (handle, socket_type) in sockets.iter() { - let handle_guard = HANDLE_MAP.read_irqsave(); - let global_handle = GlobalSocketHandle::new_smoltcp_handle(handle); - let item: Option<&super::socket::SocketHandleItem> = handle_guard.get(&global_handle); - if item.is_none() { - continue; - } - - let handle_item = item.unwrap(); - let posix_item = handle_item.posix_item(); - if posix_item.is_none() { - continue; - } - let posix_item = posix_item.unwrap(); - - // 获取socket上的事件 - let mut events = SocketPollMethod::poll(socket_type, handle_item).bits() as u64; - - // 分发到相应类型socket处理 - match socket_type { - smoltcp::socket::Socket::Raw(_) | smoltcp::socket::Socket::Udp(_) => { - posix_item.wakeup_any(events); - } - smoltcp::socket::Socket::Icmp(_) => unimplemented!("Icmp socket hasn't unimplemented"), - smoltcp::socket::Socket::Tcp(inner_socket) => { - if inner_socket.is_active() { - events |= TcpSocket::CAN_ACCPET; - } - if inner_socket.state() == smoltcp::socket::tcp::State::Established { - events |= TcpSocket::CAN_CONNECT; - } - if inner_socket.state() == smoltcp::socket::tcp::State::CloseWait { - events |= EPollEventType::EPOLLHUP.bits() as u64; - } - - posix_item.wakeup_any(events); - } - smoltcp::socket::Socket::Dhcpv4(_) => {} - smoltcp::socket::Socket::Dns(_) => unimplemented!("Dns socket hasn't unimplemented"), - } - EventPoll::wakeup_epoll( - &posix_item.epitems, - EPollEventType::from_bits_truncate(events as u32), - )?; - drop(handle_guard); - // crate::debug!( - // "{} send_event {:?}", - // handle, - // EPollEventType::from_bits_truncate(events as u32) - // ); - } - Ok(()) -} +// /// 对ifaces进行轮询,最多对SOCKET_SET尝试times次加锁。 +// /// +// /// @return 轮询成功,返回Ok(()) +// /// @return 加锁超时,返回SystemError::EAGAIN_OR_EWOULDBLOCK +// /// @return 没有网卡,返回SystemError::ENODEV +// pub fn poll_ifaces_try_lock(times: u16) -> Result<(), SystemError> { +// let mut i = 0; +// while i < times { +// let guard: RwLockReadGuard>> = +// NET_DEVICES.read_irqsave(); +// if guard.len() == 0 { +// warn!("poll_ifaces: No net driver found!"); +// // 没有网卡,返回错误 +// return Err(SystemError::ENODEV); +// } +// for (_, iface) in guard.iter() { +// iface.poll(); +// } +// return Ok(()); +// } +// // 尝试次数用完,返回错误 +// return Err(SystemError::EAGAIN_OR_EWOULDBLOCK); +// } + +// /// 对ifaces进行轮询,最多对SOCKET_SET尝试一次加锁。 +// /// +// /// @return 轮询成功,返回Ok(()) +// /// @return 加锁超时,返回SystemError::EAGAIN_OR_EWOULDBLOCK +// /// @return 没有网卡,返回SystemError::ENODEV +// pub fn poll_ifaces_try_lock_onetime() -> Result<(), SystemError> { +// let guard: RwLockReadGuard>> = NET_DEVICES.read_irqsave(); +// if guard.len() == 0 { +// warn!("poll_ifaces: No net driver found!"); +// // 没有网卡,返回错误 +// return Err(SystemError::ENODEV); +// } +// for (_, iface) in guard.iter() { +// let _ = iface.poll(); +// } +// send_event()?; +// return Ok(()); +// } + +// /// ### 处理轮询后的事件 +// fn send_event() -> Result<(), SystemError> { +// for (handle, socket_type) in .lock().iter() { + +// let global_handle = GlobalSocketHandle::new_smoltcp_handle(handle); + +// let handle_guard = HANDLE_MAP.read_irqsave(); +// let item: Option<&super::socket::SocketHandleItem> = handle_guard.get(&global_handle); +// if item.is_none() { +// continue; +// } + +// let handle_item = item.unwrap(); +// let posix_item = handle_item.posix_item(); +// if posix_item.is_none() { +// continue; +// } +// let posix_item = posix_item.unwrap(); + +// // 获取socket上的事件 +// let mut events = SocketPollMethod::poll(socket_type, handle_item).bits() as u64; + +// // 分发到相应类型socket处理 +// match socket_type { +// smoltcp::socket::Socket::Raw(_) | smoltcp::socket::Socket::Udp(_) => { +// posix_item.wakeup_any(events); +// } +// smoltcp::socket::Socket::Icmp(_) => unimplemented!("Icmp socket hasn't unimplemented"), +// smoltcp::socket::Socket::Tcp(inner_socket) => { +// if inner_socket.is_active() { +// events |= TcpSocket::CAN_ACCPET; +// } +// if inner_socket.state() == smoltcp::socket::tcp::State::Established { +// events |= TcpSocket::CAN_CONNECT; +// } +// if inner_socket.state() == smoltcp::socket::tcp::State::CloseWait { +// events |= EPollEventType::EPOLLHUP.bits() as u64; +// } + +// posix_item.wakeup_any(events); +// } +// smoltcp::socket::Socket::Dhcpv4(_) => {} +// smoltcp::socket::Socket::Dns(_) => unimplemented!("Dns socket hasn't unimplemented"), +// } +// EventPoll::wakeup_epoll( +// &posix_item.epitems, +// EPollEventType::from_bits_truncate(events as u32), +// )?; +// drop(handle_guard); +// } +// Ok(()) +// } diff --git a/kernel/src/net/socket/common/mod.rs b/kernel/src/net/socket/common/mod.rs new file mode 100644 index 000000000..fd50679b7 --- /dev/null +++ b/kernel/src/net/socket/common/mod.rs @@ -0,0 +1,18 @@ +pub mod shutdown; +pub mod poll_unit; + +#[allow(dead_code)] +pub use shutdown::Shutdown; + +// /// @brief 在trait Socket的metadata函数中返回该结构体供外部使用 +// #[derive(Debug, Clone)] +// pub struct Metadata { +// /// 接收缓冲区的大小 +// pub rx_buf_size: usize, +// /// 发送缓冲区的大小 +// pub tx_buf_size: usize, +// /// 元数据的缓冲区的大小 +// pub metadata_buf_size: usize, +// /// socket的选项 +// pub options: SocketOptions, +// } diff --git a/kernel/src/net/socket/common/poll_unit.rs b/kernel/src/net/socket/common/poll_unit.rs new file mode 100644 index 000000000..69b6df9b9 --- /dev/null +++ b/kernel/src/net/socket/common/poll_unit.rs @@ -0,0 +1,123 @@ +use alloc::{collections::LinkedList, sync::Arc, vec::Vec}; +use system_error::SystemError; + +use crate::{libs::{spinlock::SpinLock, wait_queue::EventWaitQueue}, net::event_poll::{EPollEventType, EPollItem, EventPoll}, process::ProcessManager, sched::{schedule, SchedMode}}; + + + +#[derive(Debug)] +pub struct WaitQueue { + /// socket的waitqueue + wait_queue: Arc, +} + +impl Default for WaitQueue { + fn default() -> Self { + Self { + wait_queue: Default::default(), + } + } +} + +impl WaitQueue { + pub fn new(wait_queue: Arc) -> Self { + Self { + wait_queue, + } + } + + /// # `wakeup_any` + /// 唤醒该队列上等待events的进程 + /// ## 参数 + /// - events: 发生的事件 + /// 需要注意的是,只要触发了events中的任意一件事件,进程都会被唤醒 + pub fn wakeup_any(&self, events: EPollEventType) { + self.wait_queue.wakeup_any(events.bits() as u64); + } + + /// # `wait_for` + /// 等待events事件发生 + pub fn wait_for(&self, events: EPollEventType) { + unsafe { + ProcessManager::preempt_disable(); + self.wait_queue.sleep_without_schedule(events.bits() as u64); + ProcessManager::preempt_enable(); + } + schedule(SchedMode::SM_NONE); + } + + /// # `busy_wait` + /// 轮询一个会返回EPAGAIN_OR_EWOULDBLOCK的函数 + pub fn busy_wait(&self, events: EPollEventType, mut f: F) -> Result + where + F: FnMut() -> Result + { + loop { + match f() { + Ok(r) => return Ok(r), + Err(SystemError::EAGAIN_OR_EWOULDBLOCK) => { + self.wait_for(events); + }, + Err(e) => return Err(e), + } + } + } +} + +#[derive(Debug)] +pub struct EPollItems { + items: SpinLock>>, +} + +impl Default for EPollItems { + fn default() -> Self { + Self { + items: SpinLock::new(LinkedList::new()), + } + } +} + +impl EPollItems { + pub fn new() -> Self { + Self { + items: SpinLock::new(LinkedList::new()), + } + } + + pub fn add(&self, item: Arc) { + self.items.lock_irqsave().push_back(item); + } + + pub fn remove(&self, item: &Arc) -> Result<(), SystemError> { + let to_remove = self + .items + .lock_irqsave() + .extract_if(|x| Arc::ptr_eq(x, item)) + .collect::>(); + + let result = if !to_remove.is_empty() { + Ok(()) + } else { + Err(SystemError::ENOENT) + }; + + drop(to_remove); + return result; + } + + pub fn clear(&self) -> Result<(), SystemError> { + let mut guard = self.items.lock_irqsave(); + let mut result = Ok(()); + guard.iter().for_each(|item| { + if let Some(epoll) = item.epoll().upgrade() { + let _ = EventPoll::ep_remove(&mut epoll.lock_irqsave(), item.fd(), None) + .map_err(|e| { + result = Err(e); + } + ); + } + }); + guard.clear(); + return result; + } +} \ No newline at end of file diff --git a/kernel/src/net/socket/common/shutdown.rs b/kernel/src/net/socket/common/shutdown.rs new file mode 100644 index 000000000..cb4c6a4f6 --- /dev/null +++ b/kernel/src/net/socket/common/shutdown.rs @@ -0,0 +1,98 @@ +use core::sync::atomic::AtomicU8; + + +bitflags! { + /// @brief 用于指定socket的关闭类型 + /// 参考:https://code.dragonos.org.cn/xref/linux-6.1.9/include/net/sock.h?fi=SHUTDOWN_MASK#1573 + pub struct ShutdownBit: u8 { + const SHUT_RD = 0; + const SHUT_WR = 1; + const SHUT_RDWR = 2; + } +} + +const RCV_SHUTDOWN: u8 = 0x01; +const SEND_SHUTDOWN: u8 = 0x02; +const SHUTDOWN_MASK: u8 = 0x03; + +#[derive(Debug, Default)] +pub struct Shutdown { + bit: AtomicU8 +} + +impl From for Shutdown { + fn from(shutdown_bit: ShutdownBit) -> Self { + match shutdown_bit { + ShutdownBit::SHUT_RD => Shutdown { + bit: AtomicU8::new(RCV_SHUTDOWN), + }, + ShutdownBit::SHUT_WR => Shutdown { + bit: AtomicU8::new(SEND_SHUTDOWN), + }, + ShutdownBit::SHUT_RDWR => Shutdown { + bit: AtomicU8::new(SHUTDOWN_MASK), + }, + _ => Shutdown::default(), + } + } +} + +impl Shutdown { + pub fn new() -> Self { + Self { + bit: AtomicU8::new(0), + } + } + + pub fn recv_shutdown(&self) { + self.bit.fetch_or(RCV_SHUTDOWN, core::sync::atomic::Ordering::SeqCst); + } + + pub fn send_shutdown(&self) { + self.bit.fetch_or(SEND_SHUTDOWN, core::sync::atomic::Ordering::SeqCst); + } + + // pub fn is_recv_shutdown(&self) -> bool { + // self.bit.load(core::sync::atomic::Ordering::SeqCst) & RCV_SHUTDOWN != 0 + // } + + // pub fn is_send_shutdown(&self) -> bool { + // self.bit.load(core::sync::atomic::Ordering::SeqCst) & SEND_SHUTDOWN != 0 + // } + + // pub fn is_both_shutdown(&self) -> bool { + // self.bit.load(core::sync::atomic::Ordering::SeqCst) & SHUTDOWN_MASK == SHUTDOWN_MASK + // } + + // pub fn is_empty(&self) -> bool { + // self.bit.load(core::sync::atomic::Ordering::SeqCst) == 0 + // } + + pub fn get(&self) -> ShutdownTemp { + ShutdownTemp { + bit: self.bit.load(core::sync::atomic::Ordering::SeqCst), + } + } +} + +pub struct ShutdownTemp { + bit: u8, +} + +impl ShutdownTemp { + pub fn is_recv_shutdown(&self) -> bool { + self.bit & RCV_SHUTDOWN != 0 + } + + pub fn is_send_shutdown(&self) -> bool { + self.bit & SEND_SHUTDOWN != 0 + } + + pub fn is_both_shutdown(&self) -> bool { + self.bit & SHUTDOWN_MASK == SHUTDOWN_MASK + } + + pub fn is_empty(&self) -> bool { + self.bit == 0 + } +} \ No newline at end of file diff --git a/kernel/src/net/socket/define.rs b/kernel/src/net/socket/define.rs index 7bdf23610..dc50e3bc4 100644 --- a/kernel/src/net/socket/define.rs +++ b/kernel/src/net/socket/define.rs @@ -1,105 +1,105 @@ bitflags! { // #[derive(PartialEq, Eq, Debug, Clone, Copy)] - pub struct SockOp: u32 { - const SO_DEBUG = 1; - const SO_REUSEADDR = 2; - const SO_TYPE = 3; - const SO_ERROR = 4; - const SO_DONTROUTE = 5; - const SO_BROADCAST = 6; - const SO_SNDBUF = 7; - const SO_RCVBUF = 8; - const SO_SNDBUFFORCE = 32; - const SO_RCVBUFFORCE = 33; - const SO_KEEPALIVE = 9; - const SO_OOBINLINE = 10; - const SO_NO_CHECK = 11; - const SO_PRIORITY = 12; - const SO_LINGER = 13; - const SO_BSDCOMPAT = 14; - const SO_REUSEPORT = 15; - const SO_PASSCRED = 16; - const SO_PEERCRED = 17; - const SO_RCVLOWAT = 18; - const SO_SNDLOWAT = 19; - const SO_RCVTIMEO_OLD = 20; - const SO_SNDTIMEO_OLD = 21; - - const SO_SECURITY_AUTHENTICATION = 22; - const SO_SECURITY_ENCRYPTION_TRANSPORT = 23; - const SO_SECURITY_ENCRYPTION_NETWORK = 24; - - const SO_BINDTODEVICE = 25; - - /// 与SO_GET_FILTER相同 - const SO_ATTACH_FILTER = 26; - const SO_DETACH_FILTER = 27; - - const SO_PEERNAME = 28; - - const SO_ACCEPTCONN = 30; - - const SO_PEERSEC = 31; - const SO_PASSSEC = 34; - - const SO_MARK = 36; - - const SO_PROTOCOL = 38; - const SO_DOMAIN = 39; - - const SO_RXQ_OVFL = 40; + pub struct Options: u32 { + const DEBUG = 1; + const REUSEADDR = 2; + const TYPE = 3; + const ERROR = 4; + const DONTROUTE = 5; + const BROADCAST = 6; + const SNDBUF = 7; + const RCVBUF = 8; + const SNDBUFFORCE = 32; + const RCVBUFFORCE = 33; + const KEEPALIVE = 9; + const OOBINLINE = 10; + const NO_CHECK = 11; + const PRIORITY = 12; + const LINGER = 13; + const BSDCOMPAT = 14; + const REUSEPORT = 15; + const PASSCRED = 16; + const PEERCRED = 17; + const RCVLOWAT = 18; + const SNDLOWAT = 19; + const RCVTIMEO_OLD = 20; + const SNDTIMEO_OLD = 21; + + const SECURITY_AUTHENTICATION = 22; + const SECURITY_ENCRYPTION_TRANSPORT = 23; + const SECURITY_ENCRYPTION_NETWORK = 24; + + const BINDTODEVICE = 25; + + /// 与GET_FILTER相同 + const ATTACH_FILTER = 26; + const DETACH_FILTER = 27; + + const PEERNAME = 28; + + const ACCEPTCONN = 30; + + const PEERSEC = 31; + const PASSSEC = 34; + + const MARK = 36; + + const PROTOCOL = 38; + const DOMAIN = 39; + + const RXQ_OVFL = 40; /// 与SCM_WIFI_STATUS相同 - const SO_WIFI_STATUS = 41; - const SO_PEEK_OFF = 42; + const WIFI_STATUS = 41; + const PEEK_OFF = 42; /* Instruct lower device to use last 4-bytes of skb data as FCS */ - const SO_NOFCS = 43; - - const SO_LOCK_FILTER = 44; - const SO_SELECT_ERR_QUEUE = 45; - const SO_BUSY_POLL = 46; - const SO_MAX_PACING_RATE = 47; - const SO_BPF_EXTENSIONS = 48; - const SO_INCOMING_CPU = 49; - const SO_ATTACH_BPF = 50; - // SO_DETACH_BPF = SO_DETACH_FILTER; - const SO_ATTACH_REUSEPORT_CBPF = 51; - const SO_ATTACH_REUSEPORT_EBPF = 52; - - const SO_CNX_ADVICE = 53; + const NOFCS = 43; + + const LOCK_FILTER = 44; + const SELECT_ERR_QUEUE = 45; + const BUSY_POLL = 46; + const MAX_PACING_RATE = 47; + const BPF_EXTENSIONS = 48; + const INCOMING_CPU = 49; + const ATTACH_BPF = 50; + // DETACH_BPF = DETACH_FILTER; + const ATTACH_REUSEPORT_CBPF = 51; + const ATTACH_REUSEPORT_EBPF = 52; + + const CNX_ADVICE = 53; const SCM_TIMESTAMPING_OPT_STATS = 54; - const SO_MEMINFO = 55; - const SO_INCOMING_NAPI_ID = 56; - const SO_COOKIE = 57; + const MEMINFO = 55; + const INCOMING_NAPI_ID = 56; + const COOKIE = 57; const SCM_TIMESTAMPING_PKTINFO = 58; - const SO_PEERGROUPS = 59; - const SO_ZEROCOPY = 60; + const PEERGROUPS = 59; + const ZEROCOPY = 60; /// 与SCM_TXTIME相同 - const SO_TXTIME = 61; + const TXTIME = 61; - const SO_BINDTOIFINDEX = 62; + const BINDTOIFINDEX = 62; - const SO_TIMESTAMP_OLD = 29; - const SO_TIMESTAMPNS_OLD = 35; - const SO_TIMESTAMPING_OLD = 37; - const SO_TIMESTAMP_NEW = 63; - const SO_TIMESTAMPNS_NEW = 64; - const SO_TIMESTAMPING_NEW = 65; + const TIMESTAMP_OLD = 29; + const TIMESTAMPNS_OLD = 35; + const TIMESTAMPING_OLD = 37; + const TIMESTAMP_NEW = 63; + const TIMESTAMPNS_NEW = 64; + const TIMESTAMPING_NEW = 65; - const SO_RCVTIMEO_NEW = 66; - const SO_SNDTIMEO_NEW = 67; + const RCVTIMEO_NEW = 66; + const SNDTIMEO_NEW = 67; - const SO_DETACH_REUSEPORT_BPF = 68; + const DETACH_REUSEPORT_BPF = 68; - const SO_PREFER_BUSY_POLL = 69; - const SO_BUSY_POLL_BUDGET = 70; + const PREFER_BUSY_POLL = 69; + const BUSY_POLL_BUDGET = 70; - const SO_NETNS_COOKIE = 71; - const SO_BUF_LOCK = 72; - const SO_RESERVE_MEM = 73; - const SO_TXREHASH = 74; - const SO_RCVMARK = 75; + const NETNS_COOKIE = 71; + const BUF_LOCK = 72; + const RESERVE_MEM = 73; + const TXREHASH = 74; + const RCVMARK = 75; } } @@ -133,4 +133,172 @@ impl Types { pub fn types(&self) -> Types { Types::from_bits(self.bits() & 0b_1111).unwrap() } + + #[inline(always)] + pub fn is_nonblock(&self) -> bool { + self.contains(Types::NONBLOCK) + } + + #[inline(always)] + pub fn is_cloexec(&self) -> bool { + self.contains(Types::CLOEXEC) + } +} + +/// @brief 地址族的枚举 +/// +/// 参考:https://code.dragonos.org.cn/xref/linux-5.19.10/include/linux/socket.h#180 +#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)] +pub enum AddressFamily { + /// AF_UNSPEC 表示地址族未指定 + Unspecified = 0, + /// AF_UNIX 表示Unix域的socket (与AF_LOCAL相同) + Unix = 1, + /// AF_INET 表示IPv4的socket + INet = 2, + /// AF_AX25 表示AMPR AX.25的socket + AX25 = 3, + /// AF_IPX 表示IPX的socket + IPX = 4, + /// AF_APPLETALK 表示Appletalk的socket + Appletalk = 5, + /// AF_NETROM 表示AMPR NET/ROM的socket + Netrom = 6, + /// AF_BRIDGE 表示多协议桥接的socket + Bridge = 7, + /// AF_ATMPVC 表示ATM PVCs的socket + Atmpvc = 8, + /// AF_X25 表示X.25的socket + X25 = 9, + /// AF_INET6 表示IPv6的socket + INet6 = 10, + /// AF_ROSE 表示AMPR ROSE的socket + Rose = 11, + /// AF_DECnet Reserved for DECnet project + Decnet = 12, + /// AF_NETBEUI Reserved for 802.2LLC project + Netbeui = 13, + /// AF_SECURITY 表示Security callback的伪AF + Security = 14, + /// AF_KEY 表示Key management API + Key = 15, + /// AF_NETLINK 表示Netlink的socket + Netlink = 16, + /// AF_PACKET 表示Low level packet interface + Packet = 17, + /// AF_ASH 表示Ash + Ash = 18, + /// AF_ECONET 表示Acorn Econet + Econet = 19, + /// AF_ATMSVC 表示ATM SVCs + Atmsvc = 20, + /// AF_RDS 表示Reliable Datagram Sockets + Rds = 21, + /// AF_SNA 表示Linux SNA Project + Sna = 22, + /// AF_IRDA 表示IRDA sockets + Irda = 23, + /// AF_PPPOX 表示PPPoX sockets + Pppox = 24, + /// AF_WANPIPE 表示WANPIPE API sockets + WanPipe = 25, + /// AF_LLC 表示Linux LLC + Llc = 26, + /// AF_IB 表示Native InfiniBand address + /// 介绍:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/configuring_infiniband_and_rdma_networks/index#understanding-infiniband-and-rdma_configuring-infiniband-and-rdma-networks + Ib = 27, + /// AF_MPLS 表示MPLS + Mpls = 28, + /// AF_CAN 表示Controller Area Network + Can = 29, + /// AF_TIPC 表示TIPC sockets + Tipc = 30, + /// AF_BLUETOOTH 表示Bluetooth sockets + Bluetooth = 31, + /// AF_IUCV 表示IUCV sockets + Iucv = 32, + /// AF_RXRPC 表示RxRPC sockets + Rxrpc = 33, + /// AF_ISDN 表示mISDN sockets + Isdn = 34, + /// AF_PHONET 表示Phonet sockets + Phonet = 35, + /// AF_IEEE802154 表示IEEE 802.15.4 sockets + Ieee802154 = 36, + /// AF_CAIF 表示CAIF sockets + Caif = 37, + /// AF_ALG 表示Algorithm sockets + Alg = 38, + /// AF_NFC 表示NFC sockets + Nfc = 39, + /// AF_VSOCK 表示vSockets + Vsock = 40, + /// AF_KCM 表示Kernel Connection Multiplexor + Kcm = 41, + /// AF_QIPCRTR 表示Qualcomm IPC Router + Qipcrtr = 42, + /// AF_SMC 表示SMC-R sockets. + /// reserve number for PF_SMC protocol family that reuses AF_INET address family + Smc = 43, + /// AF_XDP 表示XDP sockets + Xdp = 44, + /// AF_MCTP 表示Management Component Transport Protocol + Mctp = 45, + /// AF_MAX 表示最大的地址族 + Max = 46, +} + +impl TryFrom for AddressFamily { + type Error = system_error::SystemError; + fn try_from(x: u16) -> Result { + use num_traits::FromPrimitive; + return ::from_u16(x).ok_or(system_error::SystemError::EINVAL); + } +} + +bitflags! { + pub struct OptionsLevel: u32 { + const IP = 0; + // const SOL_ICMP = 1; // No-no-no! Due to Linux :-) we cannot + const SOCKET = 1; + const TCP = 6; + const UDP = 17; + const IPV6 = 41; + const ICMPV6 = 58; + const SCTP = 132; + const UDPLITE = 136; // UDP-Lite (RFC 3828) + const RAW = 255; + const IPX = 256; + const AX25 = 257; + const ATALK = 258; + const NETROM = 259; + const ROSE = 260; + const DECNET = 261; + const X25 = 262; + const PACKET = 263; + const ATM = 264; // ATM layer (cell level) + const AAL = 265; // ATM Adaption Layer (packet level) + const IRDA = 266; + const NETBEUI = 267; + const LLC = 268; + const DCCP = 269; + const NETLINK = 270; + const TIPC = 271; + const RXRPC = 272; + const PPPOL2TP = 273; + const BLUETOOTH = 274; + const PNPIPE = 275; + const RDS = 276; + const IUCV = 277; + const CAIF = 278; + const ALG = 279; + const NFC = 280; + const KCM = 281; + const TLS = 282; + const XDP = 283; + const MPTCP = 284; + const MCTP = 285; + const SMC = 286; + const VSOCK = 287; + } } diff --git a/kernel/src/net/socket/inet.rs b/kernel/src/net/socket/inet.rs deleted file mode 100644 index b7e1fc8fd..000000000 --- a/kernel/src/net/socket/inet.rs +++ /dev/null @@ -1,1600 +0,0 @@ -use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use log::{debug, error, warn}; -use smoltcp::{ - socket::{raw, tcp, udp}, - wire, -}; -use system_error::SystemError; - -use crate::{ - driver::net::NetDevice, - libs::rwlock::RwLock, - net::{ - event_poll::EPollEventType, net_core::poll_ifaces, socket::tcp_def::TcpOptions, syscall::PosixSocketOption, Endpoint, Protocol, ShutdownType, NET_DEVICES, SocketOptionsLevel - }, -}; - -use super::{ - handle::GlobalSocketHandle, PosixSocketHandleItem, Socket, SocketHandleItem, SocketMetadata, - SocketOptions, SocketPollMethod, SocketType, HANDLE_MAP, PORT_MANAGER, SOCKET_SET, ip_def::IpOptions, -}; - -/// @brief 表示原始的socket。原始套接字绕过传输层协议(如 TCP 或 UDP)并提供对网络层协议(如 IP)的直接访问。 -/// -/// ref: https://man7.org/linux/man-pages/man7/raw.7.html -#[derive(Debug, Clone)] -pub struct RawSocket { - handle: GlobalSocketHandle, - /// 用户发送的数据包是否包含了IP头. - /// 如果是true,用户发送的数据包,必须包含IP头。(即用户要自行设置IP头+数据) - /// 如果是false,用户发送的数据包,不包含IP头。(即用户只要设置数据) - header_included: bool, - /// socket的metadata - metadata: SocketMetadata, - posix_item: Arc, -} - -impl RawSocket { - /// 元数据的缓冲区的大小 - pub const DEFAULT_METADATA_BUF_SIZE: usize = 1024; - /// 默认的接收缓冲区的大小 receive - pub const DEFAULT_RX_BUF_SIZE: usize = 64 * 1024; - /// 默认的发送缓冲区的大小 transmiss - pub const DEFAULT_TX_BUF_SIZE: usize = 64 * 1024; - - pub const ICMP_FILTER: usize = 1; - - /// @brief 创建一个原始的socket - /// - /// @param protocol 协议号 - /// @param options socket的选项 - /// - /// @return 返回创建的原始的socket - pub fn new(protocol: Protocol, options: SocketOptions) -> Self { - let rx_buffer = raw::PacketBuffer::new( - vec![raw::PacketMetadata::EMPTY; Self::DEFAULT_METADATA_BUF_SIZE], - vec![0; Self::DEFAULT_RX_BUF_SIZE], - ); - let tx_buffer = raw::PacketBuffer::new( - vec![raw::PacketMetadata::EMPTY; Self::DEFAULT_METADATA_BUF_SIZE], - vec![0; Self::DEFAULT_TX_BUF_SIZE], - ); - let protocol: u8 = protocol.into(); - let socket = raw::Socket::new( - wire::IpVersion::Ipv4, - wire::IpProtocol::from(protocol), - rx_buffer, - tx_buffer, - ); - - // 把socket添加到socket集合中,并得到socket的句柄 - let handle = GlobalSocketHandle::new_smoltcp_handle(SOCKET_SET.lock_irqsave().add(socket)); - - let metadata = SocketMetadata::new( - SocketType::Raw, - Self::DEFAULT_RX_BUF_SIZE, - Self::DEFAULT_TX_BUF_SIZE, - Self::DEFAULT_METADATA_BUF_SIZE, - options, - ); - - let posix_item = Arc::new(PosixSocketHandleItem::new(None)); - - return Self { - handle, - header_included: false, - metadata, - posix_item, - }; - } -} - -impl Socket for RawSocket { - fn posix_item(&self) -> Arc { - self.posix_item.clone() - } - - fn close(&mut self) { - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - if let smoltcp::socket::Socket::Udp(mut sock) = - socket_set_guard.remove(self.handle.smoltcp_handle().unwrap()) - { - sock.close(); - } - drop(socket_set_guard); - poll_ifaces(); - } - - fn read(&self, buf: &mut [u8]) -> (Result, Endpoint) { - poll_ifaces(); - loop { - // 如何优化这里? - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - let socket = - socket_set_guard.get_mut::(self.handle.smoltcp_handle().unwrap()); - - match socket.recv_slice(buf) { - Ok(len) => { - let packet = wire::Ipv4Packet::new_unchecked(buf); - return ( - Ok(len), - Endpoint::Ip(Some(wire::IpEndpoint { - addr: wire::IpAddress::Ipv4(packet.src_addr()), - port: 0, - })), - ); - } - Err(_) => { - if !self.metadata.options.contains(SocketOptions::BLOCK) { - // 如果是非阻塞的socket,就返回错误 - return (Err(SystemError::EAGAIN_OR_EWOULDBLOCK), Endpoint::Ip(None)); - } - } - } - drop(socket_set_guard); - self.posix_item.sleep(EPollEventType::EPOLLIN.bits() as u64); - } - } - - fn write(&self, buf: &[u8], to: Option) -> Result { - // 如果用户发送的数据包,包含IP头,则直接发送 - if self.header_included { - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - let socket = - socket_set_guard.get_mut::(self.handle.smoltcp_handle().unwrap()); - match socket.send_slice(buf) { - Ok(_) => { - return Ok(buf.len()); - } - Err(raw::SendError::BufferFull) => { - return Err(SystemError::ENOBUFS); - } - } - } else { - // 如果用户发送的数据包,不包含IP头,则需要自己构造IP头 - - if let Some(Endpoint::Ip(Some(endpoint))) = to { - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - let socket: &mut raw::Socket = - socket_set_guard.get_mut::(self.handle.smoltcp_handle().unwrap()); - - // 暴力解决方案:只考虑0号网卡。 TODO:考虑多网卡的情况!!! - let iface = NET_DEVICES.read_irqsave().get(&0).unwrap().clone(); - - // 构造IP头 - let ipv4_src_addr: Option = - iface.inner_iface().lock().ipv4_addr(); - if ipv4_src_addr.is_none() { - return Err(SystemError::ENETUNREACH); - } - let ipv4_src_addr = ipv4_src_addr.unwrap(); - - if let wire::IpAddress::Ipv4(ipv4_dst) = endpoint.addr { - let len = buf.len(); - - // 创建20字节的IPv4头部 - let mut buffer: Vec = vec![0u8; len + 20]; - let mut packet: wire::Ipv4Packet<&mut Vec> = - wire::Ipv4Packet::new_unchecked(&mut buffer); - - // 封装ipv4 header - packet.set_version(4); - packet.set_header_len(20); - packet.set_total_len((20 + len) as u16); - packet.set_src_addr(ipv4_src_addr); - packet.set_dst_addr(ipv4_dst); - - // 设置ipv4 header的protocol字段 - packet.set_next_header(socket.ip_protocol()); - - // 获取IP数据包的负载字段 - let payload: &mut [u8] = packet.payload_mut(); - payload.copy_from_slice(buf); - - // 填充checksum字段 - packet.fill_checksum(); - - // 发送数据包 - socket.send_slice(&buffer).unwrap(); - - iface.poll(&mut socket_set_guard).ok(); - - drop(socket_set_guard); - return Ok(len); - } else { - warn!("Unsupport Ip protocol type!"); - return Err(SystemError::EINVAL); - } - } else { - // 如果没有指定目的地址,则返回错误 - return Err(SystemError::ENOTCONN); - } - } - } - - fn connect(&mut self, _endpoint: Endpoint) -> Result<(), SystemError> { - Ok(()) - } - - fn metadata(&self) -> SocketMetadata { - self.metadata.clone() - } - - fn box_clone(&self) -> Box { - Box::new(self.clone()) - } - - /// @brief 设置socket的选项 - /// - /// @param level 选项的层次 - /// @param optname 选项的名称 - /// @param optval 选项的值 - /// - /// @return 返回设置是否成功, 如果不支持该选项,返回ENOSYS - /// - /// ## See - /// https://code.dragonos.org.cn/s?refs=sk_setsockopt&project=linux-6.6.21 - fn setsockopt( - &self, - _level: SocketOptionsLevel, - optname: usize, - _optval: &[u8], - ) -> Result<(), SystemError> { - if optname == Self::ICMP_FILTER { - todo!("setsockopt ICMP_FILTER"); - } - return Err(SystemError::ENOPROTOOPT); - } - - fn socket_handle(&self) -> GlobalSocketHandle { - self.handle - } - - fn as_any_ref(&self) -> &dyn core::any::Any { - self - } - - fn as_any_mut(&mut self) -> &mut dyn core::any::Any { - self - } -} - -bitflags! { - pub struct UdpSocketOptions: u32 { - const ZERO = 0; /* No UDP options */ - const UDP_CORK = 1; /* Never send partially complete segments */ - const UDP_ENCAP = 100; /* Set the socket to accept encapsulated packets */ - const UDP_NO_CHECK6_TX = 101; /* Disable sending checksum for UDP6X */ - const UDP_NO_CHECK6_RX = 102; /* Disable accepting checksum for UDP6 */ - const UDP_SEGMENT = 103; /* Set GSO segmentation size */ - const UDP_GRO = 104; /* This socket can receive UDP GRO packets */ - - const UDPLITE_SEND_CSCOV = 10; /* sender partial coverage (as sent) */ - const UDPLITE_RECV_CSCOV = 11; /* receiver partial coverage (threshold ) */ - } -} - -bitflags! { - pub struct UdpEncapTypes: u8 { - const ZERO = 0; - const ESPINUDP_NON_IKE = 1; // draft-ietf-ipsec-nat-t-ike-00/01 - const ESPINUDP = 2; // draft-ietf-ipsec-udp-encaps-06 - const L2TPINUDP = 3; // rfc2661 - const GTP0 = 4; // GSM TS 09.60 - const GTP1U = 5; // 3GPP TS 29.060 - const RXRPC = 6; - const ESPINTCP = 7; // Yikes, this is really xfrm encap types. - } -} - -/// @brief 表示udp socket -/// -/// https://man7.org/linux/man-pages/man7/udp.7.html -#[derive(Debug, Clone)] -pub struct UdpSocket { - pub handle: GlobalSocketHandle, - remote_endpoint: Option, // 记录远程endpoint提供给connect(), 应该使用IP地址。 - metadata: SocketMetadata, - posix_item: Arc, -} - -impl UdpSocket { - /// 元数据的缓冲区的大小 - pub const DEFAULT_METADATA_BUF_SIZE: usize = 1024; - /// 默认的接收缓冲区的大小 receive - pub const DEFAULT_RX_BUF_SIZE: usize = 64 * 1024; - /// 默认的发送缓冲区的大小 transmiss - pub const DEFAULT_TX_BUF_SIZE: usize = 64 * 1024; - - /// @brief 创建一个udp的socket - /// - /// @param options socket的选项 - /// - /// @return 返回创建的udp的socket - pub fn new(options: SocketOptions) -> Self { - let rx_buffer = udp::PacketBuffer::new( - vec![udp::PacketMetadata::EMPTY; Self::DEFAULT_METADATA_BUF_SIZE], - vec![0; Self::DEFAULT_RX_BUF_SIZE], - ); - let tx_buffer = udp::PacketBuffer::new( - vec![udp::PacketMetadata::EMPTY; Self::DEFAULT_METADATA_BUF_SIZE], - vec![0; Self::DEFAULT_TX_BUF_SIZE], - ); - let socket = udp::Socket::new(rx_buffer, tx_buffer); - - // 把socket添加到socket集合中,并得到socket的句柄 - let handle: GlobalSocketHandle = - GlobalSocketHandle::new_smoltcp_handle(SOCKET_SET.lock_irqsave().add(socket)); - - let metadata = SocketMetadata::new( - SocketType::Udp, - Self::DEFAULT_RX_BUF_SIZE, - Self::DEFAULT_TX_BUF_SIZE, - Self::DEFAULT_METADATA_BUF_SIZE, - options, - ); - - let posix_item = Arc::new(PosixSocketHandleItem::new(None)); - - return Self { - handle, - remote_endpoint: None, - metadata, - posix_item, - }; - } - - fn do_bind(&self, socket: &mut udp::Socket, endpoint: Endpoint) -> Result<(), SystemError> { - if let Endpoint::Ip(Some(mut ip)) = endpoint { - // 端口为0则分配随机端口 - if ip.port == 0 { - ip.port = PORT_MANAGER.get_ephemeral_port(self.metadata.socket_type)?; - } - // 检测端口是否已被占用 - PORT_MANAGER.bind_port(self.metadata.socket_type, ip.port)?; - - let bind_res = if ip.addr.is_unspecified() { - socket.bind(ip.port) - } else { - socket.bind(ip) - }; - - match bind_res { - Ok(()) => return Ok(()), - Err(_) => return Err(SystemError::EINVAL), - } - } else { - return Err(SystemError::EINVAL); - } - } - - fn sk_setsockopt( - &self, - _socket: &mut udp::Socket, - _level: SocketOptionsLevel, - optname: PosixSocketOption, - _optval: &[u8], - ) -> Result<(), SystemError> { - use PosixSocketOption::*; - use SystemError::*; - - if optname == SO_BINDTODEVICE { - todo!("SO_BINDTODEVICE"); - } - - match optname { - SO_DEBUG => { - todo!("SO_DEBUG"); - } - SO_REUSEADDR => { - todo!("SO_REUSEADDR"); - } - SO_REUSEPORT => { - todo!("SO_REUSEPORT"); - } - SO_TYPE => {} - SO_PROTOCOL => {} - SO_DOMAIN => {} - SO_ERROR => { - return Err(ENOPROTOOPT); - } - SO_DONTROUTE => { - todo!("SO_DONTROUTE"); - } - SO_BROADCAST => { - todo!("SO_BROADCAST"); - } - SO_SNDBUF => { - todo!("SO_SNDBUF"); - } - SO_SNDBUFFORCE => { - todo!("SO_SNDBUFFORCE"); - } - SO_RCVBUF => { - todo!("SO_RCVBUF"); - } - SO_RCVBUFFORCE => { - todo!("SO_RCVBUFFORCE"); - } - SO_KEEPALIVE => { - todo!("SO_KEEPALIVE"); - } - SO_OOBINLINE => { - todo!("SO_OOBINLINE"); - } - SO_NO_CHECK => { - todo!("SO_NO_CHECK"); - } - SO_PRIORITY => { - todo!("SO_PRIORITY"); - } - SO_LINGER => { - todo!("SO_LINGER"); - } - SO_BSDCOMPAT => { - todo!("SO_BSDCOMPAT"); - } - SO_PASSCRED => { - todo!("SO_PASSCRED"); - } - SO_PASSPIDFD => { - todo!("SO_PASSPIDFD"); - } - SO_TIMESTAMP_OLD => {} - SO_TIMESTAMP_NEW => {} - SO_TIMESTAMPNS_OLD => {} - SO_TIMESTAMPNS_NEW => { - todo!("SO_TIMESTAMPNS_NEW"); - } - SO_TIMESTAMPING_OLD => {} - SO_TIMESTAMPING_NEW => { - todo!("SO_TIMESTAMPING_NEW"); - } - SO_RCVLOWAT => { - todo!("SO_RCVLOWAT"); - } - SO_RCVTIMEO_OLD => {} - SO_RCVTIMEO_NEW => { - todo!("SO_RCVTIMEO_NEW"); - } - SO_SNDTIMEO_OLD => {} - SO_SNDTIMEO_NEW => { - todo!("SO_SNDTIMEO_NEW"); - } - SO_ATTACH_FILTER => { - todo!("SO_ATTACH_FILTER"); - } - SO_ATTACH_BPF => { - todo!("SO_ATTACH_BPF"); - } - SO_ATTACH_REUSEPORT_CBPF => { - todo!("SO_ATTACH_REUSEPORT_CBPF"); - } - SO_ATTACH_REUSEPORT_EBPF => { - todo!("SO_ATTACH_REUSEPORT_EBPF"); - } - SO_DETACH_REUSEPORT_BPF => { - todo!("SO_DETACH_REUSEPORT_BPF"); - } - SO_DETACH_FILTER => { - todo!("SO_DETACH_FILTER"); - } - SO_LOCK_FILTER => { - todo!("SO_LOCK_FILTER"); - } - SO_PASSSEC => { - todo!("SO_PASSSEC"); - } - SO_MARK => { - todo!("SO_MARK"); - } - SO_RCVMARK => { - todo!("SO_RCVMARK"); - } - SO_RXQ_OVFL => { - todo!("SO_RXQ_OVFL"); - } - SO_WIFI_STATUS => { - todo!("SO_WIFI_STATUS"); - } - SO_PEEK_OFF => { - todo!("SO_PEEK_OFF"); - } - SO_NOFCS => { - todo!("SO_NOFCS"); - } - SO_SELECT_ERR_QUEUE => { - todo!("SO_SELECT_ERR_QUEUE"); - } - // if define CONFIG_NET_RX_BUSY_POLL - SO_BUSY_POLL => { - todo!("SO_BUSY_POLL"); - } - SO_PREFER_BUSY_POLL => { - todo!("SO_PREFER_BUSY_POLL"); - } - SO_BUSY_POLL_BUDGET => { - todo!("SO_BUSY_POLL_BUDGET"); - } - // end if - SO_MAX_PACING_RATE => { - todo!("SO_MAX_PACING_RATE"); - } - SO_INCOMING_CPU => { - todo!("SO_INCOMING_CPU"); - } - SO_CNX_ADVICE => { - todo!("SO_CNX_ADVICE"); - } - SO_ZEROCOPY => { - todo!("SO_ZEROCOPY"); - } - SO_TXTIME => { - todo!("SO_TXTIME"); - } - SO_BINDTOIFINDEX => { - todo!("SO_BINDTOIFINDEX"); - } - SO_BUF_LOCK => { - todo!("SO_BUF_LOCK"); - } - SO_RESERVE_MEM => { - todo!("SO_RESERVE_MEM"); - } - SO_TXREHASH => { - todo!("SO_TXREHASH"); - } - _ => { - return Err(ENOPROTOOPT); - } - } - return Err(ENOPROTOOPT); - } - - fn udp_lib_setsockopt( - &self, - level: SocketOptionsLevel, - optname: usize, - optval: &[u8], - ) -> Result<(), SystemError> { - use PosixSocketOption::*; - - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - let socket = socket_set_guard.get_mut::(self.handle.smoltcp_handle().unwrap()); - - let so_opt_name = - PosixSocketOption::try_from(optname as i32) - .map_err(|_| SystemError::ENOPROTOOPT)?; - - if level == SocketOptionsLevel::SOL_SOCKET { - self.sk_setsockopt(socket, level, so_opt_name, optval)?; - if so_opt_name == SO_RCVBUF || so_opt_name == SO_RCVBUFFORCE { - todo!("SO_RCVBUF"); - } - } - - match UdpSocketOptions::from_bits_truncate(optname as u32) { - UdpSocketOptions::UDP_CORK => { - todo!("UDP_CORK"); - } - UdpSocketOptions::UDP_ENCAP => { - match UdpEncapTypes::from_bits_truncate(optval[0]) { - UdpEncapTypes::ESPINUDP_NON_IKE => { - todo!("ESPINUDP_NON_IKE"); - } - UdpEncapTypes::ESPINUDP => { - todo!("ESPINUDP"); - } - UdpEncapTypes::L2TPINUDP => { - todo!("L2TPINUDP"); - } - UdpEncapTypes::GTP0 => { - todo!("GTP0"); - } - UdpEncapTypes::GTP1U => { - todo!("GTP1U"); - } - UdpEncapTypes::RXRPC => { - todo!("RXRPC"); - } - UdpEncapTypes::ESPINTCP => { - todo!("ESPINTCP"); - } - UdpEncapTypes::ZERO => {} - _ => { - return Err(SystemError::ENOPROTOOPT); - } - } - } - UdpSocketOptions::UDP_NO_CHECK6_TX => { - todo!("UDP_NO_CHECK6_TX"); - } - UdpSocketOptions::UDP_NO_CHECK6_RX => { - todo!("UDP_NO_CHECK6_RX"); - } - UdpSocketOptions::UDP_SEGMENT => { - todo!("UDP_SEGMENT"); - } - UdpSocketOptions::UDP_GRO => { - todo!("UDP_GRO"); - } - - UdpSocketOptions::UDPLITE_RECV_CSCOV => { - todo!("UDPLITE_RECV_CSCOV"); - } - UdpSocketOptions::UDPLITE_SEND_CSCOV => { - todo!("UDPLITE_SEND_CSCOV"); - } - - UdpSocketOptions::ZERO => {} - _ => { - return Err(SystemError::ENOPROTOOPT); - } - } - return Ok(()); - } -} - -impl Socket for UdpSocket { - fn posix_item(&self) -> Arc { - self.posix_item.clone() - } - - fn close(&mut self) { - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - if let smoltcp::socket::Socket::Udp(mut sock) = - socket_set_guard.remove(self.handle.smoltcp_handle().unwrap()) - { - sock.close(); - } - drop(socket_set_guard); - poll_ifaces(); - } - - /// @brief 在read函数执行之前,请先bind到本地的指定端口 - fn read(&self, buf: &mut [u8]) -> (Result, Endpoint) { - loop { - // debug!("Wait22 to Read"); - poll_ifaces(); - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - let socket = - socket_set_guard.get_mut::(self.handle.smoltcp_handle().unwrap()); - - // debug!("Wait to Read"); - - if socket.can_recv() { - if let Ok((size, metadata)) = socket.recv_slice(buf) { - drop(socket_set_guard); - poll_ifaces(); - return (Ok(size), Endpoint::Ip(Some(metadata.endpoint))); - } - } else { - // 如果socket没有连接,则忙等 - // return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); - } - drop(socket_set_guard); - self.posix_item.sleep(EPollEventType::EPOLLIN.bits() as u64); - } - } - - fn write(&self, buf: &[u8], to: Option) -> Result { - // debug!("udp to send: {:?}, len={}", to, buf.len()); - let remote_endpoint: &wire::IpEndpoint = { - if let Some(Endpoint::Ip(Some(ref endpoint))) = to { - endpoint - } else if let Some(Endpoint::Ip(Some(ref endpoint))) = self.remote_endpoint { - endpoint - } else { - return Err(SystemError::ENOTCONN); - } - }; - // debug!("udp write: remote = {:?}", remote_endpoint); - - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - let socket = socket_set_guard.get_mut::(self.handle.smoltcp_handle().unwrap()); - // debug!("is open()={}", socket.is_open()); - // debug!("socket endpoint={:?}", socket.endpoint()); - if socket.can_send() { - // debug!("udp write: can send"); - match socket.send_slice(buf, *remote_endpoint) { - Ok(()) => { - // debug!("udp write: send ok"); - drop(socket_set_guard); - poll_ifaces(); - return Ok(buf.len()); - } - Err(_) => { - // debug!("udp write: send err"); - return Err(SystemError::ENOBUFS); - } - } - } else { - // debug!("udp write: can not send"); - return Err(SystemError::ENOBUFS); - }; - } - - fn bind(&mut self, endpoint: Endpoint) -> Result<(), SystemError> { - let mut sockets = SOCKET_SET.lock_irqsave(); - let socket = sockets.get_mut::(self.handle.smoltcp_handle().unwrap()); - // debug!("UDP Bind to {:?}", endpoint); - return self.do_bind(socket, endpoint); - } - - fn poll(&self) -> EPollEventType { - let sockets = SOCKET_SET.lock_irqsave(); - let socket = sockets.get::(self.handle.smoltcp_handle().unwrap()); - - return SocketPollMethod::udp_poll( - socket, - HANDLE_MAP - .read_irqsave() - .get(&self.socket_handle()) - .unwrap() - .shutdown_type(), - ); - } - - fn connect(&mut self, endpoint: Endpoint) -> Result<(), SystemError> { - if let Endpoint::Ip(_) = endpoint { - self.remote_endpoint = Some(endpoint); - Ok(()) - } else { - Err(SystemError::EINVAL) - } - } - - fn ioctl( - &self, - _cmd: usize, - _arg0: usize, - _arg1: usize, - _arg2: usize, - ) -> Result { - todo!() - } - - fn metadata(&self) -> SocketMetadata { - self.metadata.clone() - } - - fn box_clone(&self) -> Box { - return Box::new(self.clone()); - } - - fn setsockopt( - &self, - level: SocketOptionsLevel, - optname: usize, - optval: &[u8], - ) -> Result<(), SystemError> { - if level == SocketOptionsLevel::SOL_UDP || level == SocketOptionsLevel::SOL_UDPLITE || level == SocketOptionsLevel::SOL_SOCKET { - return self.udp_lib_setsockopt(level, optname, optval); - } - todo!("ip_setsockopt"); - } - - fn endpoint(&self) -> Option { - let sockets = SOCKET_SET.lock_irqsave(); - let socket = sockets.get::(self.handle.smoltcp_handle().unwrap()); - let listen_endpoint = socket.endpoint(); - - if listen_endpoint.port == 0 { - return None; - } else { - // 如果listen_endpoint的address是None,意味着“监听所有的地址”。 - // 这里假设所有的地址都是ipv4 - // TODO: 支持ipv6 - let result = wire::IpEndpoint::new( - listen_endpoint - .addr - .unwrap_or(wire::IpAddress::v4(0, 0, 0, 0)), - listen_endpoint.port, - ); - return Some(Endpoint::Ip(Some(result))); - } - } - - fn peer_endpoint(&self) -> Option { - return self.remote_endpoint.clone(); - } - - fn socket_handle(&self) -> GlobalSocketHandle { - self.handle - } - - fn as_any_ref(&self) -> &dyn core::any::Any { - self - } - - fn as_any_mut(&mut self) -> &mut dyn core::any::Any { - self - } -} - -/// @brief 表示 tcp socket -/// -/// https://man7.org/linux/man-pages/man7/tcp.7.html -#[derive(Debug, Clone)] -pub struct TcpSocket { - handles: Vec, - local_endpoint: Option, // save local endpoint for bind() - is_listening: bool, - metadata: SocketMetadata, - posix_item: Arc, -} - -impl TcpSocket { - /// 元数据的缓冲区的大小 - pub const DEFAULT_METADATA_BUF_SIZE: usize = 1024; - /// 默认的接收缓冲区的大小 receive - pub const DEFAULT_RX_BUF_SIZE: usize = 512 * 1024; - /// 默认的发送缓冲区的大小 transmiss - pub const DEFAULT_TX_BUF_SIZE: usize = 512 * 1024; - - /// TcpSocket的特殊事件,用于在事件等待队列上sleep - pub const CAN_CONNECT: u64 = 1u64 << 63; - pub const CAN_ACCPET: u64 = 1u64 << 62; - - /// @brief 创建一个tcp的socket - /// - /// @param options socket的选项 - /// - /// @return 返回创建的tcp的socket - pub fn new(options: SocketOptions) -> Self { - // 创建handles数组并把socket添加到socket集合中,并得到socket的句柄 - let handles: Vec = vec![GlobalSocketHandle::new_smoltcp_handle( - SOCKET_SET.lock_irqsave().add(Self::create_new_socket()), - )]; - - let metadata = SocketMetadata::new( - SocketType::Tcp, - Self::DEFAULT_RX_BUF_SIZE, - Self::DEFAULT_TX_BUF_SIZE, - Self::DEFAULT_METADATA_BUF_SIZE, - options, - ); - let posix_item = Arc::new(PosixSocketHandleItem::new(None)); - // debug!("when there's a new tcp socket,its'len: {}",handles.len()); - - return Self { - handles, - local_endpoint: None, - is_listening: false, - metadata, - posix_item, - }; - } - - fn do_listen( - &mut self, - socket: &mut tcp::Socket, - local_endpoint: wire::IpEndpoint, - ) -> Result<(), SystemError> { - let listen_result = if local_endpoint.addr.is_unspecified() { - socket.listen(local_endpoint.port) - } else { - socket.listen(local_endpoint) - }; - return match listen_result { - Ok(()) => { - // debug!( - // "Tcp Socket Listen on {local_endpoint}, open?:{}", - // socket.is_open() - // ); - self.is_listening = true; - - Ok(()) - } - Err(_) => Err(SystemError::EINVAL), - }; - } - - /// # create_new_socket - 创建新的TCP套接字 - /// - /// 该函数用于创建一个新的TCP套接字,并返回该套接字的引用。 - fn create_new_socket() -> tcp::Socket<'static> { - // 初始化tcp的buffer - let rx_buffer = tcp::SocketBuffer::new(vec![0; Self::DEFAULT_RX_BUF_SIZE]); - let tx_buffer = tcp::SocketBuffer::new(vec![0; Self::DEFAULT_TX_BUF_SIZE]); - tcp::Socket::new(rx_buffer, tx_buffer) - } - - /// listening状态的posix socket是需要特殊处理的 - fn tcp_poll_listening(&self) -> EPollEventType { - let socketset_guard = SOCKET_SET.lock_irqsave(); - - let can_accept = self.handles.iter().any(|h| { - if let Some(sh) = h.smoltcp_handle() { - let socket = socketset_guard.get::(sh); - socket.is_active() - } else { - false - } - }); - - if can_accept { - return EPollEventType::EPOLL_LISTEN_CAN_ACCEPT; - } else { - return EPollEventType::empty(); - } - } - - fn sk_setsockopt( - &self, - _socket: &mut tcp::Socket, - _level: SocketOptionsLevel, - optname: PosixSocketOption, - _optval: &[u8], - ) -> Result<(), SystemError> { - use PosixSocketOption::*; - use SystemError::*; - - debug!("[SYS] [TCP] [setsockopt: {:?}]", optname); - - if optname == SO_BINDTODEVICE { - todo!("SO_BINDTODEVICE"); - } - - match optname { - SO_DEBUG => { - todo!("SO_DEBUG"); - } - SO_REUSEADDR => { - return Ok(()); - } - SO_REUSEPORT => { - todo!("SO_REUSEPORT"); - } - SO_TYPE => {} - SO_PROTOCOL => {} - SO_DOMAIN => {} - SO_ERROR => { - return Err(ENOPROTOOPT); - } - SO_DONTROUTE => { - todo!("SO_DONTROUTE"); - } - SO_BROADCAST => { - todo!("SO_BROADCAST"); - } - SO_SNDBUF => { - todo!("SO_SNDBUF"); - } - SO_SNDBUFFORCE => { - todo!("SO_SNDBUFFORCE"); - } - SO_RCVBUF => { - todo!("SO_RCVBUF"); - } - SO_RCVBUFFORCE => { - todo!("SO_RCVBUFFORCE"); - } - SO_KEEPALIVE => { - todo!("SO_KEEPALIVE"); - } - SO_OOBINLINE => { - todo!("SO_OOBINLINE"); - } - SO_NO_CHECK => { - todo!("SO_NO_CHECK"); - } - SO_PRIORITY => { - todo!("SO_PRIORITY"); - } - SO_LINGER => { - todo!("SO_LINGER"); - } - SO_BSDCOMPAT => { - todo!("SO_BSDCOMPAT"); - } - SO_PASSCRED => { - todo!("SO_PASSCRED"); - } - SO_PASSPIDFD => { - todo!("SO_PASSPIDFD"); - } - SO_TIMESTAMP_OLD => {} - SO_TIMESTAMP_NEW => {} - SO_TIMESTAMPNS_OLD => {} - SO_TIMESTAMPNS_NEW => { - todo!("SO_TIMESTAMPNS_NEW"); - } - SO_TIMESTAMPING_OLD => {} - SO_TIMESTAMPING_NEW => { - todo!("SO_TIMESTAMPING_NEW"); - } - SO_RCVLOWAT => { - todo!("SO_RCVLOWAT"); - } - SO_RCVTIMEO_OLD => {} - SO_RCVTIMEO_NEW => { - todo!("SO_RCVTIMEO_NEW"); - } - SO_SNDTIMEO_OLD => {} - SO_SNDTIMEO_NEW => { - todo!("SO_SNDTIMEO_NEW"); - } - SO_ATTACH_FILTER => { - todo!("SO_ATTACH_FILTER"); - } - SO_ATTACH_BPF => { - todo!("SO_ATTACH_BPF"); - } - SO_ATTACH_REUSEPORT_CBPF => { - todo!("SO_ATTACH_REUSEPORT_CBPF"); - } - SO_ATTACH_REUSEPORT_EBPF => { - todo!("SO_ATTACH_REUSEPORT_EBPF"); - } - SO_DETACH_REUSEPORT_BPF => { - todo!("SO_DETACH_REUSEPORT_BPF"); - } - SO_DETACH_FILTER => { - todo!("SO_DETACH_FILTER"); - } - SO_LOCK_FILTER => { - todo!("SO_LOCK_FILTER"); - } - SO_PASSSEC => { - todo!("SO_PASSSEC"); - } - SO_MARK => { - todo!("SO_MARK"); - } - SO_RCVMARK => { - todo!("SO_RCVMARK"); - } - SO_RXQ_OVFL => { - todo!("SO_RXQ_OVFL"); - } - SO_WIFI_STATUS => { - todo!("SO_WIFI_STATUS"); - } - SO_PEEK_OFF => { - todo!("SO_PEEK_OFF"); - } - SO_NOFCS => { - todo!("SO_NOFCS"); - } - SO_SELECT_ERR_QUEUE => { - todo!("SO_SELECT_ERR_QUEUE"); - } - // if define CONFIG_NET_RX_BUSY_POLL - SO_BUSY_POLL => { - todo!("SO_BUSY_POLL"); - } - SO_PREFER_BUSY_POLL => { - todo!("SO_PREFER_BUSY_POLL"); - } - SO_BUSY_POLL_BUDGET => { - todo!("SO_BUSY_POLL_BUDGET"); - } - // end if - SO_MAX_PACING_RATE => { - todo!("SO_MAX_PACING_RATE"); - } - SO_INCOMING_CPU => { - todo!("SO_INCOMING_CPU"); - } - SO_CNX_ADVICE => { - todo!("SO_CNX_ADVICE"); - } - SO_ZEROCOPY => { - todo!("SO_ZEROCOPY"); - } - SO_TXTIME => { - todo!("SO_TXTIME"); - } - SO_BINDTOIFINDEX => { - todo!("SO_BINDTOIFINDEX"); - } - SO_BUF_LOCK => { - todo!("SO_BUF_LOCK"); - } - SO_RESERVE_MEM => { - todo!("SO_RESERVE_MEM"); - } - SO_TXREHASH => { - todo!("SO_TXREHASH"); - } - _ => { - return Err(ENOPROTOOPT); - } - } - return Err(ENOPROTOOPT); - } - - fn do_tcp_setsockopt( - &self, - socket: &mut tcp::Socket, - _level: SocketOptionsLevel, - optname: usize, - optval: &[u8], - ) -> Result<(), SystemError> { - - let boolval = optval[0] != 0; - - match TcpOptions::from_bits_truncate(optname as u32) { - TcpOptions::TCP_CONGESTION => { - todo!("TCP_CONGESTION"); - } - TcpOptions::TCP_QUICKACK => { - if boolval { - socket.set_ack_delay(None); - } else { - socket.set_ack_delay(Some(smoltcp::time::Duration::from_millis(10))); - } - } - TcpOptions::TCP_NODELAY => { - socket.set_nagle_enabled(boolval); - } - TcpOptions::TCP_USER_TIMEOUT => { - let duration = u32::from_ne_bytes(optval.try_into().map_err(|_| SystemError::EINVAL)?) as u64; - socket.set_timeout(Some(smoltcp::time::Duration::from_millis(duration))); - } - TcpOptions::TCP_KEEPINTVL => { - let duration = u32::from_ne_bytes(optval.try_into().map_err(|_| SystemError::EINVAL)?) as u64; - socket.set_keep_alive(Some(smoltcp::time::Duration::from_millis(duration))); - } - // TcpOptions::TCP_NL - _ => { - return Err(SystemError::ENOPROTOOPT); - } - } - return Ok(()); - } - - fn do_ip_setsockopt( - &self, - _level: SocketOptionsLevel, - optname: usize, - _optval: &[u8], - ) -> Result<(), SystemError> { - debug!("ip_setsockopt: optname={}", optname); - match IpOptions::from_bits_truncate(optname as u32) { - IpOptions::IP_LOCAL_PORT_RANGE => {} - _ => {} - } - return Ok(()); - } -} - -impl Socket for TcpSocket { - fn posix_item(&self) -> Arc { - self.posix_item.clone() - } - - fn close(&mut self) { - for handle in self.handles.iter() { - { - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - let smoltcp_handle = handle.smoltcp_handle().unwrap(); - socket_set_guard - .get_mut::(smoltcp_handle) - .close(); - drop(socket_set_guard); - } - poll_ifaces(); - SOCKET_SET - .lock_irqsave() - .remove(handle.smoltcp_handle().unwrap()); - // debug!("[Socket] [TCP] Close: {:?}", handle); - } - } - - fn read(&self, buf: &mut [u8]) -> (Result, Endpoint) { - if HANDLE_MAP - .read_irqsave() - .get(&self.socket_handle()) - .unwrap() - .shutdown_type() - .contains(ShutdownType::RCV_SHUTDOWN) - { - return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); - } - // debug!("tcp socket: read, buf len={}", buf.len()); - // debug!("tcp socket:read, socket'len={}",self.handle.len()); - loop { - poll_ifaces(); - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - - let socket = socket_set_guard - .get_mut::(self.handles.first().unwrap().smoltcp_handle().unwrap()); - - // 如果socket已经关闭,返回错误 - if !socket.is_active() { - // debug!("Tcp Socket Read Error, socket is closed"); - return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); - } - - if socket.may_recv() { - match socket.recv_slice(buf) { - Ok(size) => { - if size > 0 { - let endpoint = if let Some(p) = socket.remote_endpoint() { - p - } else { - return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); - }; - - drop(socket_set_guard); - poll_ifaces(); - return (Ok(size), Endpoint::Ip(Some(endpoint))); - } - } - Err(tcp::RecvError::InvalidState) => { - warn!("Tcp Socket Read Error, InvalidState"); - return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); - } - Err(tcp::RecvError::Finished) => { - // 对端写端已关闭,我们应该关闭读端 - HANDLE_MAP - .write_irqsave() - .get_mut(&self.socket_handle()) - .unwrap() - .shutdown_type_writer() - .insert(ShutdownType::RCV_SHUTDOWN); - return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); - } - } - } else { - return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); - } - drop(socket_set_guard); - self.posix_item - .sleep((EPollEventType::EPOLLIN | EPollEventType::EPOLLHUP).bits() as u64); - } - } - - fn write(&self, buf: &[u8], _to: Option) -> Result { - if HANDLE_MAP - .read_irqsave() - .get(&self.socket_handle()) - .unwrap() - .shutdown_type() - .contains(ShutdownType::RCV_SHUTDOWN) - { - return Err(SystemError::ENOTCONN); - } - // debug!("tcp socket:write, socket'len={}",self.handle.len()); - - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - - let socket = socket_set_guard - .get_mut::(self.handles.first().unwrap().smoltcp_handle().unwrap()); - - if socket.is_open() { - if socket.can_send() { - match socket.send_slice(buf) { - Ok(size) => { - drop(socket_set_guard); - poll_ifaces(); - return Ok(size); - } - Err(e) => { - error!("Tcp Socket Write Error {e:?}"); - return Err(SystemError::ENOBUFS); - } - } - } else { - return Err(SystemError::ENOBUFS); - } - } - - return Err(SystemError::ENOTCONN); - } - - fn poll(&self) -> EPollEventType { - // 处理listen的快速路径 - if self.is_listening { - return self.tcp_poll_listening(); - } - // 由于上面处理了listening状态,所以这里只处理非listening状态,这种情况下只有一个handle - - assert!(self.handles.len() == 1); - - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - // debug!("tcp socket:poll, socket'len={}",self.handle.len()); - - let socket = socket_set_guard - .get_mut::(self.handles.first().unwrap().smoltcp_handle().unwrap()); - let handle_map_guard = HANDLE_MAP.read_irqsave(); - let handle_item = handle_map_guard.get(&self.socket_handle()).unwrap(); - let shutdown_type = handle_item.shutdown_type(); - let is_posix_listen = handle_item.is_posix_listen; - drop(handle_map_guard); - - return SocketPollMethod::tcp_poll(socket, shutdown_type, is_posix_listen); - } - - fn connect(&mut self, endpoint: Endpoint) -> Result<(), SystemError> { - let mut sockets = SOCKET_SET.lock_irqsave(); - // debug!("tcp socket:connect, socket'len={}", self.handles.len()); - - let socket = - sockets.get_mut::(self.handles.first().unwrap().smoltcp_handle().unwrap()); - - if let Endpoint::Ip(Some(ip)) = endpoint { - let temp_port = PORT_MANAGER.get_ephemeral_port(self.metadata.socket_type)?; - // 检测端口是否被占用 - PORT_MANAGER.bind_port(self.metadata.socket_type, temp_port)?; - - // debug!("temp_port: {}", temp_port); - let iface: Arc = NET_DEVICES.write_irqsave().get(&0).unwrap().clone(); - let mut inner_iface = iface.inner_iface().lock(); - // debug!("to connect: {ip:?}"); - - match socket.connect(inner_iface.context(), ip, temp_port) { - Ok(()) => { - // avoid deadlock - drop(inner_iface); - drop(iface); - drop(sockets); - loop { - poll_ifaces(); - let mut sockets = SOCKET_SET.lock_irqsave(); - let socket = sockets.get_mut::( - self.handles.first().unwrap().smoltcp_handle().unwrap(), - ); - - match socket.state() { - tcp::State::Established => { - return Ok(()); - } - tcp::State::SynSent => { - drop(sockets); - self.posix_item.sleep(Self::CAN_CONNECT); - } - _ => { - return Err(SystemError::ECONNREFUSED); - } - } - } - } - Err(e) => { - // error!("Tcp Socket Connect Error {e:?}"); - match e { - tcp::ConnectError::InvalidState => return Err(SystemError::EISCONN), - tcp::ConnectError::Unaddressable => return Err(SystemError::EADDRNOTAVAIL), - } - } - } - } else { - return Err(SystemError::EINVAL); - } - } - - /// @brief tcp socket 监听 local_endpoint 端口 - /// - /// @param backlog 未处理的连接队列的最大长度 - fn listen(&mut self, backlog: usize) -> Result<(), SystemError> { - if self.is_listening { - return Ok(()); - } - - // debug!( - // "tcp socket:listen, socket'len={}, backlog = {backlog}", - // self.handles.len() - // ); - - let local_endpoint = self.local_endpoint.ok_or(SystemError::EINVAL)?; - let mut sockets = SOCKET_SET.lock_irqsave(); - // 获取handle的数量 - let handlen = self.handles.len(); - let backlog = handlen.max(backlog); - - // 添加剩余需要构建的socket - // debug!("tcp socket:before listen, socket'len={}", self.handle_list.len()); - let mut handle_guard = HANDLE_MAP.write_irqsave(); - let socket_handle_item_0 = handle_guard.get_mut(&self.socket_handle()).unwrap(); - socket_handle_item_0.is_posix_listen = true; - - self.handles.extend((handlen..backlog).map(|_| { - let socket = Self::create_new_socket(); - let handle = GlobalSocketHandle::new_smoltcp_handle(sockets.add(socket)); - let mut handle_item = SocketHandleItem::new(Arc::downgrade(&self.posix_item)); - handle_item.is_posix_listen = true; - handle_guard.insert(handle, handle_item); - handle - })); - - // debug!("tcp socket:listen, socket'len={}", self.handles.len()); - // debug!("tcp socket:listen, backlog={backlog}"); - - // 监听所有的socket - for i in 0..backlog { - let handle = self.handles.get(i).unwrap(); - - let socket = sockets.get_mut::(handle.smoltcp_handle().unwrap()); - - if !socket.is_listening() { - // debug!("Tcp Socket is already listening on {local_endpoint}"); - self.do_listen(socket, local_endpoint)?; - } - // debug!("Tcp Socket before listen, open={}", socket.is_open()); - } - - return Ok(()); - } - - fn bind(&mut self, endpoint: Endpoint) -> Result<(), SystemError> { - if let Endpoint::Ip(Some(mut ip)) = endpoint { - if ip.port == 0 { - ip.port = PORT_MANAGER.get_ephemeral_port(self.metadata.socket_type)?; - } - - // 检测端口是否已被占用 - PORT_MANAGER.bind_port(self.metadata.socket_type, ip.port)?; - // debug!("tcp socket:bind, socket'len={}",self.handle.len()); - - self.local_endpoint = Some(ip); - self.is_listening = false; - - return Ok(()); - } - return Err(SystemError::EINVAL); - } - - fn shutdown(&mut self, shutdown_type: super::ShutdownType) -> Result<(), SystemError> { - // TODO:目前只是在表层判断,对端不知晓,后续需使用tcp实现 - HANDLE_MAP - .write_irqsave() - .get_mut(&self.socket_handle()) - .unwrap() - .shutdown_type = RwLock::new(shutdown_type); - return Ok(()); - } - - fn accept(&mut self) -> Result<(Box, Endpoint), SystemError> { - if !self.is_listening { - return Err(SystemError::EINVAL); - } - let endpoint = self.local_endpoint.ok_or(SystemError::EINVAL)?; - loop { - // debug!("tcp accept: poll_ifaces()"); - poll_ifaces(); - // debug!("tcp socket:accept, socket'len={}", self.handle_list.len()); - - let mut sockset = SOCKET_SET.lock_irqsave(); - // Get the corresponding activated handler - let global_handle_index = self.handles.iter().position(|handle| { - let con_smol_sock = sockset.get::(handle.smoltcp_handle().unwrap()); - con_smol_sock.is_active() - }); - - if let Some(handle_index) = global_handle_index { - let con_smol_sock = sockset - .get::(self.handles[handle_index].smoltcp_handle().unwrap()); - - // debug!("[Socket] [TCP] Accept: {:?}", handle); - // handle is connected socket's handle - let remote_ep = con_smol_sock - .remote_endpoint() - .ok_or(SystemError::ENOTCONN)?; - - let tcp_socket = Self::create_new_socket(); - - let new_handle = GlobalSocketHandle::new_smoltcp_handle(sockset.add(tcp_socket)); - - // let handle in TcpSock be the new empty handle, and return the old connected handle - let old_handle = core::mem::replace(&mut self.handles[handle_index], new_handle); - - let metadata = SocketMetadata::new( - SocketType::Tcp, - Self::DEFAULT_TX_BUF_SIZE, - Self::DEFAULT_RX_BUF_SIZE, - Self::DEFAULT_METADATA_BUF_SIZE, - self.metadata.options, - ); - - let sock_ret = Box::new(TcpSocket { - handles: vec![old_handle], - local_endpoint: self.local_endpoint, - is_listening: false, - metadata, - posix_item: Arc::new(PosixSocketHandleItem::new(None)), - }); - - { - let mut handle_guard = HANDLE_MAP.write_irqsave(); - // 先删除原来的 - let item = handle_guard.remove(&old_handle).unwrap(); - item.reset_shutdown_type(); - assert!(item.is_posix_listen); - - // 按照smoltcp行为,将新的handle绑定到原来的item - let new_item = SocketHandleItem::new(Arc::downgrade(&sock_ret.posix_item)); - handle_guard.insert(old_handle, new_item); - // 插入新的item - handle_guard.insert(new_handle, item); - - let socket = sockset.get_mut::( - self.handles[handle_index].smoltcp_handle().unwrap(), - ); - - if !socket.is_listening() { - self.do_listen(socket, endpoint)?; - } - - drop(handle_guard); - } - - return Ok((sock_ret, Endpoint::Ip(Some(remote_ep)))); - } - - drop(sockset); - - // debug!("[TCP] [Accept] sleeping socket with handle: {:?}", self.handles.first().unwrap().smoltcp_handle().unwrap()); - self.posix_item.sleep(Self::CAN_ACCPET); - // debug!("tcp socket:after sleep, handle_guard'len={}",HANDLE_MAP.write_irqsave().len()); - } - } - - fn endpoint(&self) -> Option { - let mut result: Option = self.local_endpoint.map(|x| Endpoint::Ip(Some(x))); - - if result.is_none() { - let sockets = SOCKET_SET.lock_irqsave(); - // debug!("tcp socket:endpoint, socket'len={}",self.handle.len()); - - let socket = - sockets.get::(self.handles.first().unwrap().smoltcp_handle().unwrap()); - if let Some(ep) = socket.local_endpoint() { - result = Some(Endpoint::Ip(Some(ep))); - } - } - return result; - } - - fn peer_endpoint(&self) -> Option { - let sockets = SOCKET_SET.lock_irqsave(); - // debug!("tcp socket:peer_endpoint, socket'len={}",self.handle.len()); - - let socket = - sockets.get::(self.handles.first().unwrap().smoltcp_handle().unwrap()); - return socket.remote_endpoint().map(|x| Endpoint::Ip(Some(x))); - } - - fn metadata(&self) -> SocketMetadata { - self.metadata.clone() - } - - fn box_clone(&self) -> Box { - Box::new(self.clone()) - } - - fn setsockopt( - &self, - level: SocketOptionsLevel, - optname: usize, - optval: &[u8], - ) -> Result<(), SystemError> { - - let mut socket_set_guard = SOCKET_SET.lock_irqsave(); - let socket = socket_set_guard.get_mut::(self.handles[0].smoltcp_handle().unwrap()); - - if level == SocketOptionsLevel::SOL_SOCKET { - return self.sk_setsockopt(socket, level, PosixSocketOption::try_from(optname as i32)?, optval); - } - - if level != SocketOptionsLevel::SOL_TCP { - return self.do_ip_setsockopt(level, optname, optval); - } - return self.do_tcp_setsockopt(socket, level, optname, optval); - } - - fn socket_handle(&self) -> GlobalSocketHandle { - // debug!("tcp socket:socket_handle, socket'len={}",self.handle.len()); - - *self.handles.first().unwrap() - } - - fn as_any_ref(&self) -> &dyn core::any::Any { - self - } - - fn as_any_mut(&mut self) -> &mut dyn core::any::Any { - self - } -} diff --git a/kernel/src/net/socket/inet/common/mod.rs b/kernel/src/net/socket/inet/common/mod.rs new file mode 100644 index 000000000..f9170508c --- /dev/null +++ b/kernel/src/net/socket/inet/common/mod.rs @@ -0,0 +1,110 @@ +use system_error::SystemError::{self, *}; +use alloc::sync::Arc; +use crate::net::{Iface, NET_DEVICES}; + +pub mod port; +pub use port::PortManager; + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum Types { + Raw, + Icmp, + Udp, + Tcp, + Dhcpv4, + Dns, +} + +#[derive(Debug)] +pub struct BoundInner { + handle: smoltcp::iface::SocketHandle, + iface: Arc, + // address: smoltcp::wire::IpAddress, +} + +impl BoundInner { + /// # `bind` + /// 将socket绑定到指定的地址上,置入指定的网络接口中 + pub fn bind( + socket: T, + // socket_type: Types, + address: &smoltcp::wire::IpAddress + ) -> Result + where + T: smoltcp::socket::AnySocket<'static> + { + let iface = get_iface_to_bind(address).ok_or(ENODEV)?; + let handle = iface.sockets().lock_no_preempt().add(socket); + Ok( Self { handle, iface } ) + } + + pub fn bind_ephemeral( + socket: T, + // socket_type: Types, + remote: smoltcp::wire::IpAddress + ) -> Result<(Self, smoltcp::wire::IpAddress), SystemError> + where + T: smoltcp::socket::AnySocket<'static> + { + let (iface, address) = get_ephemeral_iface(&remote); + // let bound_port = iface.port_manager().bind_ephemeral_port(socket_type)?; + let handle = iface.sockets().lock_no_preempt().add(socket); + // let endpoint = smoltcp::wire::IpEndpoint::new(local_addr, bound_port); + Ok( (Self { handle, iface }, address) ) + } + + pub fn port_manager(&self) -> &PortManager { + self.iface.port_manager() + } + + pub fn with_mut, R, F: FnMut(&mut T) -> R>(&self, mut f: F) -> R { + f(self.iface.sockets().lock().get_mut::(self.handle)) + } + + pub fn with, R, F: Fn(&T) -> R>(&self, f: F) -> R { + f(self.iface.sockets().lock().get::(self.handle)) + } + + pub fn iface(&self) -> &Arc { + &self.iface + } + + pub fn release(&self) { + self.iface.sockets().lock_no_preempt().remove(self.handle); + } +} + +#[inline] +pub fn get_iface_to_bind(ip_addr: &smoltcp::wire::IpAddress) -> Option> { + crate::net::NET_DEVICES + .read_irqsave() + .iter() + .find(|(_, iface)| { + iface.smol_iface().lock().has_ip_addr(*ip_addr) + }) + .map(|(_, iface)| iface.clone()) +} + +/// Get a suitable iface to deal with sendto/connect request if the socket is not bound to an iface. +/// If the remote address is the same as that of some iface, we will use the iface. +/// Otherwise, we will use a default interface. +fn get_ephemeral_iface(remote_ip_addr: &smoltcp::wire::IpAddress) -> (Arc, smoltcp::wire::IpAddress) { + get_iface_to_bind(remote_ip_addr) + .map(|iface| (iface, remote_ip_addr.clone())) + .or({ + let ifaces = NET_DEVICES.read_irqsave(); + ifaces + .iter() + .find_map(|(_, iface)| { + iface.smol_iface().lock().ip_addrs().iter().find(|cidr| { + cidr.contains_addr(remote_ip_addr) + }) + .map(|cidr| (iface.clone(), cidr.address())) + }) + }) + .or({ + NET_DEVICES.read_irqsave().values().next() + .map(|iface| (iface.clone(), iface.smol_iface().lock().ip_addrs()[0].address())) + }) + .expect("No network interface") +} diff --git a/kernel/src/net/socket/inet/common/port.rs b/kernel/src/net/socket/inet/common/port.rs new file mode 100644 index 000000000..25f8f5734 --- /dev/null +++ b/kernel/src/net/socket/inet/common/port.rs @@ -0,0 +1,106 @@ +use hashbrown::HashMap; +use system_error::SystemError; + +use crate::{arch::rand::rand, libs::spinlock::SpinLock, process::{Pid, ProcessManager}}; + +use super::Types::{self, *}; + +/// # TCP 和 UDP 的端口管理器。 +/// 如果 TCP/UDP 的 socket 绑定了某个端口,它会在对应的表中记录,以检测端口冲突。 +#[derive(Debug)] +pub struct PortManager { + // TCP 端口记录表 + tcp_port_table: SpinLock>, + // UDP 端口记录表 + udp_port_table: SpinLock>, +} + +impl PortManager { + pub fn new() -> Self { + return Self { + tcp_port_table: SpinLock::new(HashMap::new()), + udp_port_table: SpinLock::new(HashMap::new()), + }; + } + + /// @brief 自动分配一个相对应协议中未被使用的PORT,如果动态端口均已被占用,返回错误码 EADDRINUSE + pub fn get_ephemeral_port(&self, socket_type: Types) -> Result { + // TODO: selects non-conflict high port + + static mut EPHEMERAL_PORT: u16 = 0; + unsafe { + if EPHEMERAL_PORT == 0 { + EPHEMERAL_PORT = (49152 + rand() % (65536 - 49152)) as u16; + } + } + + let mut remaining = 65536 - 49152; // 剩余尝试分配端口次数 + let mut port: u16; + while remaining > 0 { + unsafe { + if EPHEMERAL_PORT == 65535 { + EPHEMERAL_PORT = 49152; + } else { + EPHEMERAL_PORT += 1; + } + port = EPHEMERAL_PORT; + } + + // 使用 ListenTable 检查端口是否被占用 + let listen_table_guard = match socket_type { + Udp => self.udp_port_table.lock(), + Tcp => self.tcp_port_table.lock(), + _ => panic!("{:?} cann't get a port", socket_type), + }; + if listen_table_guard.get(&port).is_none() { + drop(listen_table_guard); + return Ok(port); + } + remaining -= 1; + } + return Err(SystemError::EADDRINUSE); + } + + #[inline] + pub fn bind_ephemeral_port(&self, socket_type: Types) -> Result { + let port = self.get_ephemeral_port(socket_type)?; + self.bind_port(socket_type, port)?; + return Ok(port); + } + + /// @brief 检测给定端口是否已被占用,如果未被占用则在 TCP/UDP 对应的表中记录 + /// + /// TODO: 增加支持端口复用的逻辑 + pub fn bind_port(&self, socket_type: Types, port: u16) -> Result<(), SystemError> { + if port > 0 { + match socket_type { + Udp => { + let mut guard = self.udp_port_table.lock(); + if guard.get(&port).is_some() { + return Err(SystemError::EADDRINUSE); + } + guard.insert(port, ProcessManager::current_pid()); + }, + Tcp => { + let mut guard = self.tcp_port_table.lock(); + if guard.get(&port).is_some() { + return Err(SystemError::EADDRINUSE); + } + guard.insert(port, ProcessManager::current_pid()); + }, + _ => {}, + }; + } + return Ok(()); + } + + /// @brief 在对应的端口记录表中将端口和 socket 解绑 + /// should call this function when socket is closed or aborted + pub fn unbind_port(&self, socket_type: Types, port: u16) { + match socket_type { + Udp => {self.udp_port_table.lock().remove(&port);}, + Tcp => {self.tcp_port_table.lock().remove(&port);}, + _ => {} + }; + } +} diff --git a/kernel/src/net/socket/inet/datagram/inner.rs b/kernel/src/net/socket/inet/datagram/inner.rs new file mode 100644 index 000000000..ced8e1366 --- /dev/null +++ b/kernel/src/net/socket/inet/datagram/inner.rs @@ -0,0 +1,125 @@ +use system_error::SystemError::{self, *}; +use smoltcp; + +use crate::net::socket::inet::common::{BoundInner, Types as InetTypes}; + +pub type SmolUdpSocket = smoltcp::socket::udp::Socket<'static>; + +pub const DEFAULT_METADATA_BUF_SIZE: usize = 1024; +pub const DEFAULT_RX_BUF_SIZE: usize = 64 * 1024; +pub const DEFAULT_TX_BUF_SIZE: usize = 64 * 1024; + +#[derive(Debug)] +pub struct UnboundUdp { + socket: SmolUdpSocket, +} + +impl UnboundUdp { + pub fn new() -> Self { + let rx_buffer = smoltcp::socket::udp::PacketBuffer::new( + vec![smoltcp::socket::udp::PacketMetadata::EMPTY; DEFAULT_METADATA_BUF_SIZE], + vec![0; DEFAULT_RX_BUF_SIZE], + ); + let tx_buffer = smoltcp::socket::udp::PacketBuffer::new( + vec![smoltcp::socket::udp::PacketMetadata::EMPTY; DEFAULT_METADATA_BUF_SIZE], + vec![0; DEFAULT_TX_BUF_SIZE], + ); + let socket = SmolUdpSocket::new(rx_buffer, tx_buffer); + + return Self { socket }; + } + + pub fn bind(mut self, local_endpoint: smoltcp::wire::IpEndpoint) -> Result { + // let (addr, port) = (local_endpoint.addr, local_endpoint.port); + if self.socket.bind(local_endpoint).is_err() { + return Err(EINVAL); + } + let inner = BoundInner::bind(self.socket, &local_endpoint.addr)?; + inner.port_manager().bind_port(InetTypes::Udp, local_endpoint.port)?; + Ok( BoundUdp { + inner, + remote: None, + }) + } + + pub fn bind_ephemeral(self, remote: smoltcp::wire::IpAddress) -> Result { + // let (addr, port) = (remote.addr, remote.port); + let (inner, address) = BoundInner::bind_ephemeral(self.socket, remote)?; + let bound_port = inner.port_manager().bind_ephemeral_port(InetTypes::Udp)?; + let endpoint = smoltcp::wire::IpEndpoint::new(address, bound_port); + Ok( BoundUdp { + inner, + remote: Some(endpoint), + }) + } + + pub fn close(&mut self) { + self.socket.close(); + } +} + +#[derive(Debug)] +pub struct BoundUdp { + inner: BoundInner, + remote: Option, +} + +impl BoundUdp { + pub fn with_mut_socket(&self, f: F) -> T + where + F: FnMut(&mut SmolUdpSocket) -> T, + { + self.inner.with_mut(f) + } + + pub fn with_socket(&self, f: F) -> T + where + F: Fn(&SmolUdpSocket) -> T, + { + self.inner.with(f) + } + + pub fn endpoint(&self) -> smoltcp::wire::IpListenEndpoint { + self.inner.with::(|socket| { + socket.endpoint() + }) + } + + #[inline] + pub fn try_recv(&self, buf: &mut [u8]) -> Result<(usize, smoltcp::wire::IpEndpoint), SystemError> { + self.with_mut_socket(|socket| { + if socket.can_recv() { + if let Ok((size, metadata)) = socket.recv_slice(buf) { + return Ok((size, metadata.endpoint)); + } + } + return Err(EAGAIN_OR_EWOULDBLOCK); + }) + } + + pub fn try_send(&self, buf: &[u8], to: Option) -> Result { + let remote = to.or(self.remote).ok_or(ENOTCONN)?; + + let result = self.with_mut_socket(|socket| { + if socket.can_send() && socket.send_slice(buf, remote).is_ok() { + return Ok(buf.len()); + } + return Err(ENOBUFS); + }); + return result; + } + + pub fn close(&self) { + self.inner.iface().port_manager().unbind_port(InetTypes::Udp, self.endpoint().port); + self.with_mut_socket(|socket|{ + socket.close(); + }); + } +} + +// Udp Inner 负责其内部资源管理 +#[derive(Debug)] +pub enum UdpInner { + Unbound(UnboundUdp), + Bound(BoundUdp), +} \ No newline at end of file diff --git a/kernel/src/net/socket/inet/datagram/mod.rs b/kernel/src/net/socket/inet/datagram/mod.rs new file mode 100644 index 000000000..51be1b0ef --- /dev/null +++ b/kernel/src/net/socket/inet/datagram/mod.rs @@ -0,0 +1,382 @@ +use system_error::SystemError::{self, *}; +use smoltcp; + +use core::sync::atomic::AtomicBool; +use crate::filesystem::vfs::IndexNode; +use crate::net::event_poll::EPollEventType; +use crate::libs::rwlock::RwLock; +use crate::net::net_core::poll_ifaces; +use crate::net::socket::common::poll_unit::{WaitQueue, EPollItems}; +use crate::net::socket::Socket; +use alloc::sync::{Arc, Weak}; + +pub type SmolUdpSocket = smoltcp::socket::udp::Socket<'static>; + +pub mod inner; + +use inner::*; + +type EP = EPollEventType; + +// Udp Socket 负责提供状态切换接口、执行状态切换 +#[derive(Debug)] +pub struct UdpSocket { + inner: RwLock>, + nonblock: AtomicBool, + epoll_items: EPollItems, + wait_queue: WaitQueue, + self_ref: Weak, +} + +impl UdpSocket { + pub fn new(nonblock: bool) -> Arc { + return Arc::new_cyclic(|me| Self { + inner: RwLock::new(Some(UdpInner::Unbound(UnboundUdp::new()))), + nonblock: AtomicBool::new(nonblock), + wait_queue: WaitQueue::default(), + epoll_items: EPollItems::new(), + self_ref: me.clone(), + }); + } + + pub fn is_nonblock(&self) -> bool { + self.nonblock.load(core::sync::atomic::Ordering::Relaxed) + } + + pub fn do_bind(&self, local_endpoint: smoltcp::wire::IpEndpoint) -> Result<(), SystemError> { + let mut inner = self.inner.write(); + if let Some(UdpInner::Unbound(unbound)) = inner.take() { + let bound = unbound.bind(local_endpoint)?; + *inner = Some(UdpInner::Bound(bound)); + return Ok(()); + } + return Err(EINVAL); + } + + pub fn bind_emphemeral(&self, remote: smoltcp::wire::IpAddress) -> Result<(), SystemError> { + let mut inner_guard = self.inner.write(); + if let Some(UdpInner::Unbound(unbound)) = inner_guard.take() { + let bound = unbound.bind_ephemeral(remote)?; + inner_guard.replace(UdpInner::Bound(bound)); + return Ok(()); + } + return Err(EINVAL); + } + + pub fn is_bound(&self) -> bool { + let inner = self.inner.read(); + if let Some(UdpInner::Bound(_)) = &*inner { + return true; + } + return false; + } + + pub fn close(&self) { + let mut inner = self.inner.write(); + if let Some(UdpInner::Bound(bound)) = &mut *inner { + bound.close(); + inner.take(); + } + } + + pub fn try_recv(&self, buf: &mut [u8]) + -> Result<(usize, smoltcp::wire::IpEndpoint), SystemError> + { + let received + = match self.inner.read().as_ref().expect("Udp Inner is None") { + UdpInner::Bound(bound) => { + bound.try_recv(buf) + } + _ => Err(ENOTCONN) + }; + + poll_ifaces(); + + return received; + } + + pub fn try_send(&self, buf: &[u8], to: Option) + -> Result + { + { + let mut inner_guard = self.inner.write(); + let inner = match inner_guard.take().expect("Udp Inner is None") { + UdpInner::Bound(bound) => bound, + UdpInner::Unbound(unbound) => { + unbound.bind_ephemeral(to.ok_or(EADDRNOTAVAIL)?.addr)? + } + }; + // size = inner.try_send(buf, to)?; + inner_guard.replace(UdpInner::Bound(inner)); + }; + // Optimize: 拿两次锁的平均效率是否比一次长时间的读锁效率要高? + let result + = match self.inner.read().as_ref().expect("Udp Inner is None") { + UdpInner::Bound(bound) => bound.try_send(buf, to), + _ => Err(ENOTCONN), + }; + poll_ifaces(); + return result; + } + + pub fn read(&self, buf: &mut [u8]) -> Result { + if self.is_nonblock() { + return self.try_recv(buf).map(|(size, _)| size); + } else { + return self.poll_unit().busy_wait(EP::EPOLLIN, + || self.try_recv(buf).map(|(size, _)| size) + ); + } + } + + pub fn on_events(&self) -> EPollEventType { + let mut event = EPollEventType::empty(); + match self.inner.read().as_ref().unwrap() { + UdpInner::Unbound(_) => { + event.insert( + EP::EPOLLOUT | EP::EPOLLWRNORM | EP::EPOLLWRBAND + ); + } + UdpInner::Bound(bound) => { + let (can_recv, can_send) = + bound.with_socket(|socket| { + (socket.can_recv(), socket.can_send()) + } + ); + + if can_recv { + event.insert( + EP::EPOLLIN | EP::EPOLLRDNORM + ); + } + + if can_send { + event.insert( + EP::EPOLLOUT | EP::EPOLLWRNORM | EP::EPOLLWRBAND + ); + } else { + todo!("缓冲区空间不够,需要使用信号处理"); + } + } + } + return event; + } +} + +impl IndexNode for UdpSocket { + fn read_at( + &self, + _offset: usize, + _len: usize, + buf: &mut [u8], + data: crate::libs::spinlock::SpinLockGuard, + ) -> Result { + drop(data); + self.read(buf) + } + + fn write_at( + &self, + _offset: usize, + _len: usize, + buf: &[u8], + _data: crate::libs::spinlock::SpinLockGuard, + ) -> Result { + self.try_send(buf, None) + } + + fn fs(&self) -> alloc::sync::Arc { + todo!() + } + + fn as_any_ref(&self) -> &dyn core::any::Any { + self + } + + fn list(&self) -> Result, SystemError> { + todo!() + } + + fn poll(&self, _private_data: &crate::filesystem::vfs::FilePrivateData) -> Result { + Ok(self.on_events().bits() as usize) + } +} + +impl Socket for UdpSocket { + fn wait_queue(&self) -> &crate::net::socket::common::poll_unit::WaitQueue { + &self.wait_queue + } + + fn epoll_items(&self) -> &crate::net::socket::common::poll_unit::EPollItems { + &self.epoll_items + } + + fn update_io_events(&self) -> Result { + todo!() + } + + fn bind(&self, local_endpoint: crate::net::Endpoint) -> Result<(), SystemError> { + match local_endpoint { + crate::net::Endpoint::Ip(local_endpoint) => { + self.do_bind(local_endpoint) + } + _ => Err(EAFNOSUPPORT), + } + } + + +} + +bitflags! { + pub struct UdpSocketOptions: u32 { + const ZERO = 0; /* No UDP options */ + const UDP_CORK = 1; /* Never send partially complete segments */ + const UDP_ENCAP = 100; /* Set the socket to accept encapsulated packets */ + const UDP_NO_CHECK6_TX = 101; /* Disable sending checksum for UDP6X */ + const UDP_NO_CHECK6_RX = 102; /* Disable accepting checksum for UDP6 */ + const UDP_SEGMENT = 103; /* Set GSO segmentation size */ + const UDP_GRO = 104; /* This socket can receive UDP GRO packets */ + + const UDPLITE_SEND_CSCOV = 10; /* sender partial coverage (as sent) */ + const UDPLITE_RECV_CSCOV = 11; /* receiver partial coverage (threshold ) */ + } +} + +bitflags! { + pub struct UdpEncapTypes: u8 { + const ZERO = 0; + const ESPINUDP_NON_IKE = 1; // draft-ietf-ipsec-nat-t-ike-00/01 + const ESPINUDP = 2; // draft-ietf-ipsec-udp-encaps-06 + const L2TPINUDP = 3; // rfc2661 + const GTP0 = 4; // GSM TS 09.60 + const GTP1U = 5; // 3GPP TS 29.060 + const RXRPC = 6; + const ESPINTCP = 7; // Yikes, this is really xfrm encap types. + } +} + +// fn sock_set_option( +// &self, +// _socket: &mut udp::Socket, +// _level: SocketOptionsLevel, +// optname: PosixSocketOption, +// _optval: &[u8], +// ) -> Result<(), SystemError> { +// use PosixSocketOption::*; +// use SystemError::*; + +// if optname == SO_BINDTODEVICE { +// todo!("SO_BINDTODEVICE"); +// } + +// match optname { +// SO_TYPE => {} +// SO_PROTOCOL => {} +// SO_DOMAIN => {} +// SO_ERROR => { +// return Err(ENOPROTOOPT); +// } +// SO_TIMESTAMP_OLD => {} +// SO_TIMESTAMP_NEW => {} +// SO_TIMESTAMPNS_OLD => {} + +// SO_TIMESTAMPING_OLD => {} + +// SO_RCVTIMEO_OLD => {} + +// SO_SNDTIMEO_OLD => {} + +// // if define CONFIG_NET_RX_BUSY_POLL +// SO_BUSY_POLL | SO_PREFER_BUSY_POLL | SO_BUSY_POLL_BUDGET => { +// debug!("Unsupported socket option: {:?}", optname); +// return Err(ENOPROTOOPT); +// } +// // end if +// optname => { +// debug!("Unsupported socket option: {:?}", optname); +// return Err(ENOPROTOOPT); +// } +// } +// return Ok(()); +// } + +// fn udp_set_option( +// &self, +// level: SocketOptionsLevel, +// optname: usize, +// optval: &[u8], +// ) -> Result<(), SystemError> { +// use PosixSocketOption::*; + +// let so_opt_name = +// PosixSocketOption::try_from(optname as i32) +// .map_err(|_| SystemError::ENOPROTOOPT)?; + +// if level == SocketOptionsLevel::SOL_SOCKET { +// self.with_mut_socket(f) +// self.sock_set_option(self., level, so_opt_name, optval)?; +// if so_opt_name == SO_RCVBUF || so_opt_name == SO_RCVBUFFORCE { +// todo!("SO_RCVBUF"); +// } +// } + +// match UdpSocketOptions::from_bits_truncate(optname as u32) { +// UdpSocketOptions::UDP_CORK => { +// todo!("UDP_CORK"); +// } +// UdpSocketOptions::UDP_ENCAP => { +// match UdpEncapTypes::from_bits_truncate(optval[0]) { +// UdpEncapTypes::ESPINUDP_NON_IKE => { +// todo!("ESPINUDP_NON_IKE"); +// } +// UdpEncapTypes::ESPINUDP => { +// todo!("ESPINUDP"); +// } +// UdpEncapTypes::L2TPINUDP => { +// todo!("L2TPINUDP"); +// } +// UdpEncapTypes::GTP0 => { +// todo!("GTP0"); +// } +// UdpEncapTypes::GTP1U => { +// todo!("GTP1U"); +// } +// UdpEncapTypes::RXRPC => { +// todo!("RXRPC"); +// } +// UdpEncapTypes::ESPINTCP => { +// todo!("ESPINTCP"); +// } +// UdpEncapTypes::ZERO => {} +// _ => { +// return Err(SystemError::ENOPROTOOPT); +// } +// } +// } +// UdpSocketOptions::UDP_NO_CHECK6_TX => { +// todo!("UDP_NO_CHECK6_TX"); +// } +// UdpSocketOptions::UDP_NO_CHECK6_RX => { +// todo!("UDP_NO_CHECK6_RX"); +// } +// UdpSocketOptions::UDP_SEGMENT => { +// todo!("UDP_SEGMENT"); +// } +// UdpSocketOptions::UDP_GRO => { +// todo!("UDP_GRO"); +// } + +// UdpSocketOptions::UDPLITE_RECV_CSCOV => { +// todo!("UDPLITE_RECV_CSCOV"); +// } +// UdpSocketOptions::UDPLITE_SEND_CSCOV => { +// todo!("UDPLITE_SEND_CSCOV"); +// } + +// UdpSocketOptions::ZERO => {} +// _ => { +// return Err(SystemError::ENOPROTOOPT); +// } +// } +// return Ok(()); +// } diff --git a/kernel/src/net/socket/ip_def.rs b/kernel/src/net/socket/inet/ip_def.rs similarity index 100% rename from kernel/src/net/socket/ip_def.rs rename to kernel/src/net/socket/inet/ip_def.rs diff --git a/kernel/src/net/socket/inet/mod.rs b/kernel/src/net/socket/inet/mod.rs new file mode 100644 index 000000000..95574c909 --- /dev/null +++ b/kernel/src/net/socket/inet/mod.rs @@ -0,0 +1,141 @@ +use system_error::SystemError::{self, *}; +use smoltcp; +use alloc::sync::Arc; + +// pub mod raw; +// pub mod icmp; +pub mod datagram; +pub mod stream; +pub mod common; +pub mod syscall; + +pub use common::Types; +pub use common::BoundInner; +// pub use raw::RawSocket; +pub use datagram::UdpSocket; +pub use stream::TcpSocket; + +use crate::filesystem::vfs::IndexNode; + +use super::Socket; + +pub trait AnyInetSocket { + fn epoll_items(&self) -> &super::common::poll_unit::EPollItems; + fn wait_queue(&self) -> &super::common::poll_unit::WaitQueue; + /// `on_iface_events` + /// 通知socket发生的事件 + fn on_iface_events(&self); + fn do_bind(&self, endpoint: smoltcp::wire::IpEndpoint) -> Result<(), SystemError>; +} + +#[derive(Debug)] +pub enum InetSocket { + // Raw(RawSocket), + Udp(UdpSocket), + Tcp(TcpSocket), +} + +impl InetSocket { + pub fn on_iface_events(&self) { + todo!() + } +} + +// impl IndexNode for InetSocket { + +// } + +// impl Socket for InetSocket { +// fn epoll_items(&self) -> &super::common::poll_unit::EPollItems { +// match self { +// InetSocket::Udp(udp) => udp.epoll_items(), +// InetSocket::Tcp(tcp) => tcp.epoll_items(), +// } +// } + +// fn bind(&self, endpoint: crate::net::Endpoint) -> Result<(), SystemError> { +// if let crate::net::Endpoint::Ip(ip) = endpoint { +// match self { +// InetSocket::Udp(udp) => { +// udp.do_bind(ip)?; +// }, +// InetSocket::Tcp(tcp) => { +// tcp.do_bind(ip)?; +// }, +// } +// return Ok(()); +// } +// return Err(EINVAL); +// } + +// fn wait_queue(&self) -> &super::common::poll_unit::WaitQueue { +// todo!() +// } + +// fn on_iface_events(&self) { +// todo!() +// } +// } + +// pub trait Socket: FileLike + Send + Sync { +// /// Assign the address specified by socket_addr to the socket +// fn bind(&self, _socket_addr: SocketAddr) -> Result<()> { +// return_errno_with_message!(Errno::EOPNOTSUPP, "bind() is not supported"); +// } + +// /// Build connection for a given address +// fn connect(&self, _socket_addr: SocketAddr) -> Result<()> { +// return_errno_with_message!(Errno::EOPNOTSUPP, "connect() is not supported"); +// } + +// /// Listen for connections on a socket +// fn listen(&self, _backlog: usize) -> Result<()> { +// return_errno_with_message!(Errno::EOPNOTSUPP, "listen() is not supported"); +// } + +// /// Accept a connection on a socket +// fn accept(&self) -> Result<(Arc, SocketAddr)> { +// return_errno_with_message!(Errno::EOPNOTSUPP, "accept() is not supported"); +// } + +// /// Shut down part of a full-duplex connection +// fn shutdown(&self, _cmd: SockShutdownCmd) -> Result<()> { +// return_errno_with_message!(Errno::EOPNOTSUPP, "shutdown() is not supported"); +// } + +// /// Get address of this socket. +// fn addr(&self) -> Result { +// return_errno_with_message!(Errno::EOPNOTSUPP, "getsockname() is not supported"); +// } + +// /// Get address of peer socket +// fn peer_addr(&self) -> Result { +// return_errno_with_message!(Errno::EOPNOTSUPP, "getpeername() is not supported"); +// } + +// /// Get options on the socket. The resulted option will put in the `option` parameter, if +// /// this method returns success. +// fn get_option(&self, _option: &mut dyn SocketOption) -> Result<()> { +// return_errno_with_message!(Errno::EOPNOTSUPP, "getsockopt() is not supported"); +// } + +// /// Set options on the socket. +// fn set_option(&self, _option: &dyn SocketOption) -> Result<()> { +// return_errno_with_message!(Errno::EOPNOTSUPP, "setsockopt() is not supported"); +// } + +// /// Sends a message on a socket. +// fn sendmsg( +// &self, +// io_vecs: &[IoVec], +// message_header: MessageHeader, +// flags: SendRecvFlags, +// ) -> Result; + +// /// Receives a message from a socket. +// /// +// /// If successful, the `io_vecs` buffer will be filled with the received content. +// /// This method returns the length of the received message, +// /// and the message header. +// fn recvmsg(&self, io_vecs: &[IoVec], flags: SendRecvFlags) -> Result<(usize, MessageHeader)>; +// } \ No newline at end of file diff --git a/kernel/src/net/socket/inet/stream/inner.rs b/kernel/src/net/socket/inet/stream/inner.rs new file mode 100644 index 000000000..c02462b3c --- /dev/null +++ b/kernel/src/net/socket/inet/stream/inner.rs @@ -0,0 +1,271 @@ +use system_error::SystemError::{self, *}; +use crate::net::socket::{self, inet::Types}; +use crate::libs::rwlock::RwLock; +use alloc::vec::Vec; +use smoltcp; + +pub const DEFAULT_METADATA_BUF_SIZE: usize = 1024; +pub const DEFAULT_RX_BUF_SIZE: usize = 512 * 1024; +pub const DEFAULT_TX_BUF_SIZE: usize = 512 * 1024; + +fn new_smoltcp_socket() -> smoltcp::socket::tcp::Socket<'static> { + let rx_buffer = smoltcp::socket::tcp::SocketBuffer::new( + vec![0; DEFAULT_RX_BUF_SIZE] + ); + let tx_buffer = smoltcp::socket::tcp::SocketBuffer::new( + vec![0; DEFAULT_TX_BUF_SIZE] + ); + smoltcp::socket::tcp::Socket::new(rx_buffer, tx_buffer) +} + +fn new_listen_smoltcp_socket(local_endpoint: smoltcp::wire::IpEndpoint) -> smoltcp::socket::tcp::Socket<'static> { + let mut socket = new_smoltcp_socket(); + socket.listen(local_endpoint).unwrap(); + socket +} + +#[derive(Debug)] +pub enum Init { + Unbound(smoltcp::socket::tcp::Socket<'static>), + Bound((socket::inet::BoundInner, smoltcp::wire::IpEndpoint)), +} + +impl Init { + pub(super) fn new() -> Self { + Init::Unbound(new_smoltcp_socket()) + } + + pub(super) fn new_bound(inner: socket::inet::BoundInner) -> Self { + Init::Bound(inner) + } + + pub(super) fn bind(self, local_endpoint: smoltcp::wire::IpEndpoint) -> Result { + match self { + Init::Unbound(socket) => { + let bound = socket::inet::BoundInner::bind( + socket, + &local_endpoint.addr, + )?; + bound.port_manager().bind_port(Types::Tcp, local_endpoint.port)?; + // bound.iface().common().bind_socket() + Ok( Init::Bound((bound, local_endpoint)) ) + }, + Init::Bound(_) => { + Err(EINVAL) + } + } + } + + pub(super) fn bind_to_ephemeral(self, remote_endpoint: smoltcp::wire::IpEndpoint) + -> Result + { + match self { + Init::Unbound(socket) => { + let (bound, address) = socket::inet::BoundInner::bind_ephemeral( + socket, + remote_endpoint.addr, + )?; + let bound_port = bound.port_manager().bind_ephemeral_port(Types::Tcp)?; + let endpoint = smoltcp::wire::IpEndpoint::new(address, bound_port); + Ok( Init::Bound((bound, endpoint)) ) + }, + Init::Bound(_) => { + Err(EINVAL) + } + } + } + + pub(super) fn connect(self, remote_endpoint: smoltcp::wire::IpEndpoint) -> Result { + let (inner, local) = match self { + Init::Unbound(_) => { + self.bind_to_ephemeral(remote_endpoint).map_err(|err| (self, err))? + }, + Init::Bound(inner) => inner, + }; + inner.with_mut(|socket| { + socket.connect( + inner.iface().smol_iface().lock().context(), + remote_endpoint, + local + ).map_err(|_| (self, ECONNREFUSED)) + })?; + return Ok( Connecting::new(inner) ); + } +} + +#[derive(Debug, Default)] +enum ConnectResult { + Connected, + #[default] Connecting, + Refused, +} + +#[derive(Debug)] +pub struct Connecting { + inner: socket::inet::BoundInner, + result: RwLock, +} + +impl Connecting { + fn new(inner: socket::inet::BoundInner) -> Self { + Connecting { + inner, + result: RwLock::new(Err(EAGAIN_OR_EWOULDBLOCK)), + } + } + + pub fn with_mut) -> R>(&self, f: F) -> R { + self.inner.with_mut(f) + } + + pub fn into_result(self) -> (Inner, Option) { + use ConnectResult::*; + match self.result.read() { + Connecting => (Inner::Connecting(self), Some(EAGAIN_OR_EWOULDBLOCK)), + Connected => (Inner::Established(Established { inner: self.inner }), None), + Refused => (Inner::Init(Init::Bound(self.inner)), Some(ECONNREFUSED)), + } + } + + /// Returns `true` when `conn_result` becomes ready, which indicates that the caller should + /// invoke the `into_result()` method as soon as possible. + /// + /// Since `into_result()` needs to be called only once, this method will return `true` + /// _exactly_ once. The caller is responsible for not missing this event. + #[must_use] + pub(super) fn update_io_events(&self) -> bool { + if self.result.read().is_some() { + return false; + } + + self.inner.with_mut(|socket: &mut smoltcp::socket::tcp::Socket| { + let mut result = self.result.write(); + if result.is_some() { + return false; + } + + // Connected + if socket.can_send() { + result.replace(Ok(())); + return true; + } + // Connecting + if socket.is_open() { + return false; + } + // Refused + result.replace(Err(ECONNREFUSED)); + true + }) + } +} + +#[derive(Debug)] +pub struct Listening { + inners: Vec, +} + +impl Listening { + pub fn accept(&mut self) -> Result<(Established, smoltcp::wire::IpEndpoint), SystemError> { + let local_endpoint = self.inners[0].with::(|socket| { + socket.local_endpoint() + }).ok_or_else(||{ + log::error!("A Listening Tcp With No Local Endpoint"); + EINVAL + })?; + + let mut new_listen = socket::inet::BoundInner::bind( + new_listen_smoltcp_socket(local_endpoint), + &local_endpoint.addr, + )?; + + let connected: &mut socket::inet::BoundInner = self.inners.iter_mut().find(|inner| { + inner.with::(|socket| { + socket.is_active() + }) + }).ok_or(EAGAIN_OR_EWOULDBLOCK)?; + + // swap the connected socket with the new_listen socket + // TODO is smoltcp socket swappable? + core::mem::swap(&mut new_listen, connected); + + let remote_endpoint = connected.with::(|socket| { + // haven't check ECONNABORTED is the right error + socket.remote_endpoint().ok_or(ECONNABORTED) + })?; + + return Ok (( Established { inner: new_listen }, remote_endpoint)); + } +} + +#[derive(Debug)] +pub struct Established { + inner: socket::inet::BoundInner, +} + +impl Established { + pub fn with_mut) -> R>(&self, f: F) -> R { + self.inner.with_mut(f) + } + + pub fn with) -> R>(&self, f: F) -> R { + self.inner.with(f) + } + + pub fn close(self) { + self.inner.with_mut::(|socket| { + socket.close(); + }); + self.inner.release(); + } + + pub fn local_endpoint(&self) -> smoltcp::wire::IpEndpoint { + self.inner.with::(|socket| { + socket.local_endpoint() + }).unwrap() + } + + pub fn remote_endpoint(&self) -> smoltcp::wire::IpEndpoint { + self.inner.with::(|socket| { + socket.remote_endpoint().unwrap() + }) + } + + pub fn recv_slice(&self, buf: &mut [u8]) -> Result { + self.inner.with_mut::(|socket| { + use smoltcp::socket::tcp::RecvError::*; + if socket.can_send() { + match socket.recv_slice(buf) { + Ok(size) => Ok(size), + Err(InvalidState) => { + log::error!("TcpSocket::try_recv: InvalidState"); + Err(ENOTCONN) + }, + Err(Finished) => { + Ok(0) + } + } + } else { + Err(ENOBUFS) + } + }) + } + + pub fn send_slice(&self, buf: &[u8]) -> Result { + self.inner.with_mut::(|socket| { + if socket.can_send() { + socket.send_slice(buf).map_err(|_| ECONNABORTED) + } else { + Err(ENOBUFS) + } + }) + } +} + +#[derive(Debug)] +pub enum Inner { + Init(Init), + Connecting(Connecting), + Listening(Listening), + Established(Established), +} \ No newline at end of file diff --git a/kernel/src/net/socket/inet/stream/mod.rs b/kernel/src/net/socket/inet/stream/mod.rs new file mode 100644 index 000000000..687af0005 --- /dev/null +++ b/kernel/src/net/socket/inet/stream/mod.rs @@ -0,0 +1,499 @@ +use system_error::SystemError::{self, *}; +use core::sync::atomic::AtomicBool; +use alloc::sync::{Arc, Weak}; + +use crate::net::event_poll::EPollEventType; +use crate::net::net_core::poll_ifaces; +use crate::net::socket::common::{poll_unit::{EPollItems, WaitQueue}, Shutdown}; +use crate::libs::rwlock::RwLock; +use smoltcp; + +pub mod inner; +use inner::*; + +type EP = EPollEventType; +#[derive(Debug)] +pub struct TcpSocket { + inner: RwLock>, + shutdown: Shutdown, + nonblock: AtomicBool, + epitems: EPollItems, + wait_queue: WaitQueue, + self_ref: Weak, +} + +impl TcpSocket { + pub fn new(nonblock: bool) -> Arc { + Arc::new_cyclic( + |me| Self { + inner: RwLock::new(Some(Inner::Unbound(Init::new()))), + shutdown: Shutdown::new(), + nonblock: AtomicBool::new(nonblock), + epitems: EPollItems::new(), + wait_queue: WaitQueue::default(), + self_ref: me.clone(), + } + ) + } + + pub fn is_nonblock(&self) -> bool { + self.nonblock.load(core::sync::atomic::Ordering::Relaxed) + } + + #[inline] + fn write_state(&self, mut f: F) -> Result<(), SystemError> + where + F: FnMut(Inner) -> Result + { + let mut inner_guard = self.inner.write(); + let inner = inner_guard.take().expect("Tcp Inner is None"); + let update = f(inner)?; + inner_guard.replace(update); + Ok(()) + } + + pub fn bind(&self, local_endpoint: smoltcp::wire::IpEndpoint) -> Result<(), SystemError> { + let mut writer = self.inner.write(); + match writer.take().expect("Tcp Inner is None") { + Inner::Init(inner) => { + let bound = inner.bind(local_endpoint)?; + if let Init::Bound((bound, _)) = &bound { + bound.0.iface().common().bind_socket(self.self_ref.upgrade().unwrap()); + } + writer.replace(Inner::Init(bound)); + Ok(()) + } + _ => Err(EINVAL), + } + } + + pub fn do_listen(&self, backlog: usize) -> Result<(), SystemError> { + self.write_state(|inner| { + match inner { + Inner::Connecting(connecting) => { + connecting.listen(backlog).map(|inners| + Inner::Listening(inners) + ) + } + _ => Err(EINVAL), + } + }) + } + + pub fn try_accept(&self) -> Result<(Arc, smoltcp::wire::IpEndpoint), SystemError> { + match self.inner.write().as_mut().expect("Tcp Inner is None") { + Inner::Listening(listening) => { + listening.accept().map(|(stream, remote)| + ( + Arc::new_cyclic( |me| TcpStream { + inner: stream, + shutdown: Shutdown::new(), + nonblock: AtomicBool::new( + self.nonblock.load( + core::sync::atomic::Ordering::Relaxed + ) + ), + epitems: EPollItems::new(), + wait_queue: WaitQueue::default(), + self_ref: me.clone(), + }), + remote + ) + ) + } + _ => Err(EINVAL), + } + } + + pub fn start_connect(&self, remote_endpoint: smoltcp::wire::IpEndpoint) -> Result<(), SystemError> { + let mut writer = self.inner.write(); + let inner = writer.take().expect("Tcp Inner is None"); + let (init, err) = match inner { + Inner::Init(init) => { + let conn_result = init.connect(remote_endpoint); + match conn_result { + Ok(connecting) => { + ( + Inner::Connecting(connecting), + if self.is_nonblock() { + None + } else { + Some(EINPROGRESS) + } + ) + } + Err((init, err)) => { + (Inner::Init(init), Some(err)) + } + } + } + Inner::Connecting(connecting) if self.is_nonblock() => { + (Inner::Connecting(connecting), Some(EALREADY)) + }, + Inner::Connecting(connecting) => { + (Inner::Connecting(connecting), None) + } + Inner::Listening(inner) => { + (Inner::Listening(inner), Some(EISCONN)) + } + Inner::Established(_) => { + (Inner::Established(Established::new()), Some(EISCONN)) + } + }; + writer.replace(init); + + drop(writer); + + poll_ifaces(); + + if let Some(err) = err { + return Err(err); + } + return Ok(()); + } + + pub fn finish_connect(&self) -> Result<(), SystemError> { + let mut writer = self.inner.write(); + let Inner::Connecting(conn) = writer.take().expect("Tcp Inner is None") else { + log::error!("TcpSocket::finish_connect: not Connecting"); + return Err(EINVAL); + }; + + let (inner, err) = conn.into_result(); + writer.replace(inner); + drop(writer); + + if let Some(err) = err { + return Err(err); + } + return Ok(()); + } + + pub fn check_connect(&self) -> Result<(), SystemError> { + match self.inner.read().as_ref().expect("Tcp Inner is None") { + Inner::Connecting(_) => Err(EAGAIN_OR_EWOULDBLOCK), + Inner::Established(_) => Ok(()), // TODO check established + _ => Err(EINVAL), // TODO socket error options + } + } + + pub fn try_recv(&self, buf: &mut [u8]) -> Result { + match self.inner.read().as_ref().expect("Tcp Inner is None") { + Inner::Established(inner) => { + inner.recv_slice(buf) + } + _ => Err(EINVAL), + } + } + + pub fn try_send(&self, buf: &[u8]) -> Result { + match self.inner.read().as_ref().expect("Tcp Inner is None") { + Inner::Established(inner) => { + let sent = inner.send_slice(buf); + poll_ifaces(); + sent + } + _ => Err(EINVAL), + } + } +} + +impl IndexNode for TcpSocket { + fn read_at( + &self, + _offset: usize, + _len: usize, + buf: &mut [u8], + data: crate::libs::spinlock::SpinLockGuard, + ) -> Result { + drop(data); + // self.inner.read().as_ref().expect("Tcp Inner is None").read(buf) + todo!() + } + + fn write_at( + &self, + _offset: usize, + _len: usize, + buf: &[u8], + data: crate::libs::spinlock::SpinLockGuard, + ) -> Result { + drop(data); + // self.inner.read().as_ref().expect("Tcp Inner is None").write(buf) + todo!() + } + + fn fs(&self) -> alloc::sync::Arc { + todo!("TcpSocket::fs") + } + + fn as_any_ref(&self) -> &dyn core::any::Any { + self + } + + fn list(&self) -> Result, SystemError> { + todo!("TcpSocket::list") + } + + fn kernel_ioctl( + &self, + arg: alloc::sync::Arc, + _data: &crate::filesystem::vfs::FilePrivateData, + ) -> Result { + let epitem = arg + .arc_any() + .downcast::() + .map_err(|_| SystemError::EFAULT)?; + + self.epoll_items().add(epitem); + + return Ok(0); + } + + fn poll(&self, private_data: &crate::filesystem::vfs::FilePrivateData) -> Result { + drop(private_data); + + } +} + +impl Socket for TcpSocket { + fn epoll_items(&self) -> &EPollItems { + &self.epitems + } + + fn wait_queue(&self) -> &WaitQueue { + &self.wait_queue + } + + fn update_io_events(&self) -> Result { + let mut mask = EP::empty(); + let shutdown = self.shutdown.get(); + match self.inner.read().as_ref().expect("Tcp Inner is None") { + Inner::Established(inner) => { + inner.with_mut(|socket| { + let state = socket.state(); + use smoltcp::socket::tcp::State::*; + type EP = crate::net::event_poll::EPollEventType; + + if shutdown.is_both_shutdown() || + state == Closed || + state == CloseWait + { + mask |= EP::EPOLLHUP; + } + + if shutdown.is_recv_shutdown() { + mask |= EP::EPOLLIN | EP::EPOLLRDNORM | EP::EPOLLRDHUP; + } + + if state != SynSent && state != SynReceived { + if socket.can_recv() { + mask |= EP::EPOLLIN | EP::EPOLLRDNORM; + } + + if !shutdown.is_send_shutdown() { + if socket.can_send() { + mask |= EP::EPOLLOUT | EP::EPOLLWRNORM | EP::EPOLLWRBAND; + } else { + todo!("TcpSocket::poll: buffer space not enough"); + } + } else { + mask |= EP::EPOLLOUT | EP::EPOLLWRNORM; + } + // TODO tcp urg data => EPOLLPRI + } else if state == SynSent /* inet_test_bit */ { + log::warn!("Active TCP fastopen socket with defer_connect"); + mask |= EP::EPOLLOUT | EP::EPOLLWRNORM; + } + + // TODO socket error + return Ok(mask.bits() as usize); + }) + } + _ => Err(EINVAL), + } + } + + fn accept(&self) -> Result<(Arc, crate::net::Endpoint), SystemError> { + self.try_accept().map(|(stream, remote)| + (stream as Arc, crate::net::Endpoint::from(remote)) + ) + } +} + +#[derive(Debug)] +// #[cast_to([sync] IndexNode)] +struct TcpStream { + inner: Established, + shutdown: Shutdown, + nonblock: AtomicBool, + epitems: EPollItems, + wait_queue: WaitQueue, + self_ref: Weak, +} + +impl TcpStream { + pub fn is_nonblock(&self) -> bool { + self.nonblock.load(core::sync::atomic::Ordering::Relaxed) + } + + pub fn read(&self, buf: &mut [u8]) -> Result { + if self.nonblock.load(core::sync::atomic::Ordering::Relaxed) { + return self.recv_slice(buf); + } else { + return self.wait_queue().busy_wait( + EP::EPOLLIN, + || self.recv_slice(buf) + ) + } + } + + pub fn recv_slice(&self, buf: &mut [u8]) -> Result { + use smoltcp::socket::tcp::RecvError::*; + let received = match self.inner.recv_slice(buf) { + Ok(0) => Err(EAGAIN_OR_EWOULDBLOCK), + Ok(size) => Ok(size), + Err(InvalidState) => { + log::error!("TcpStream::recv_slice: InvalidState"); + Err(EINVAL) + }, + Err(Finished) => { + // Remote send is shutdown + self.shutdown.recv_shutdown(); + Err(ENOTCONN) + } + }; + poll_ifaces(); + received + } + + pub fn send_slice(&self, buf: &[u8]) -> Result { + let sent = self.inner.send_slice(buf); + poll_ifaces(); + sent + } +} + +use crate::net::socket::Socket; +use crate::filesystem::vfs::IndexNode; + +impl IndexNode for TcpStream { + fn read_at( + &self, + _offset: usize, + _len: usize, + buf: &mut [u8], + data: crate::libs::spinlock::SpinLockGuard, + ) -> Result { + drop(data); + self.read(buf) + } + + fn write_at( + &self, + _offset: usize, + _len: usize, + buf: &[u8], + data: crate::libs::spinlock::SpinLockGuard, + ) -> Result { + drop(data); + self.send_slice(buf) + } + + fn fs(&self) -> alloc::sync::Arc { + todo!("TcpSocket::fs") + } + + fn as_any_ref(&self) -> &dyn core::any::Any { + self + } + + fn list(&self) -> Result, SystemError> { + todo!("TcpSocket::list") + } + + // fn kernel_ioctl( + // &self, + // arg: alloc::sync::Arc, + // _data: &crate::filesystem::vfs::FilePrivateData, + // ) -> Result { + // let epitem = arg + // .arc_any() + // .downcast::() + // .map_err(|_| SystemError::EFAULT)?; + + // self.epoll_items().add(epitem); + + // return Ok(0); + // } + +} + +impl Socket for TcpStream { + fn epoll_items(&self) -> &EPollItems { + &self.epitems + } + + fn wait_queue(&self) -> &WaitQueue { + &self.wait_queue + } + + fn update_io_events(&self) -> Result { + self.inner.with(|socket| { + let mut mask = EPollEventType::empty(); + let shutdown = self.shutdown.get(); + let state = socket.state(); + use smoltcp::socket::tcp::State::*; + type EP = crate::net::event_poll::EPollEventType; + + if shutdown.is_both_shutdown() || state == Closed { + mask |= EP::EPOLLHUP; + } + + if shutdown.is_recv_shutdown() { + mask |= EP::EPOLLIN | EP::EPOLLRDNORM | EP::EPOLLRDHUP; + } + + if state != SynSent && state != SynReceived { + if socket.can_recv() { + mask |= EP::EPOLLIN | EP::EPOLLRDNORM; + } + + if !shutdown.is_send_shutdown() { + // __sk_stream_is_writeable,这是一个内联函数,用于判断一个TCP套接字是否可写。 + // + // 以下是函数的逐行解释: + // static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake) + // - 这行定义了函数__sk_stream_is_writeable,它是一个内联函数(static inline), + // 这意味着在调用点直接展开代码,而不是调用函数体。函数接收两个参数: + // 一个指向struct sock对象的指针sk(代表套接字),和一个整型变量wake。 + // + // return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && + // - 这行代码调用了sk_stream_wspace函数,获取套接字sk的可写空间(write space)大小。 + // 随后与sk_stream_min_wspace调用结果进行比较,该函数返回套接字为了保持稳定写入速度所需的 + // 最小可写空间。如果当前可写空间大于或等于最小可写空间,则表达式为真。 + // __sk_stream_memory_free(sk, wake); + // - 这行代码调用了__sk_stream_memory_free函数,它可能用于检查套接字的内存缓冲区是否 + // 有足够的空间可供写入数据。参数wake可能用于通知网络协议栈有数据需要发送,如果设置了相应的标志。 + // 综上所述,__sk_stream_is_writeable函数的目的是判断一个TCP套接字是否可以安全地进行写操作, + // 它基于套接字的当前可写空间和所需的最小空间以及内存缓冲区的可用性。只有当这两个条件都满足时, + // 函数才会返回true,表示套接字是可写的。 + if socket.can_send() { + mask |= EP::EPOLLOUT | EP::EPOLLWRNORM | EP::EPOLLWRBAND; + } else { + todo!("TcpStream::poll: buffer space not enough"); + } + } else { + mask |= EP::EPOLLOUT | EP::EPOLLWRNORM; + } + // TODO tcp urg data => EPOLLPRI + } else if state == SynSent /* inet_test_bit */ { + log::warn!("Active TCP fastopen socket with defer_connect"); + mask |= EP::EPOLLOUT | EP::EPOLLWRNORM; + } + + // TODO socket error + return Ok(mask.bits() as usize); + }) + } +} \ No newline at end of file diff --git a/kernel/src/net/socket/inet/syscall.rs b/kernel/src/net/socket/inet/syscall.rs new file mode 100644 index 000000000..79c0d30e9 --- /dev/null +++ b/kernel/src/net/socket/inet/syscall.rs @@ -0,0 +1,33 @@ +use system_error::SystemError::{self, *}; +use smoltcp; +use alloc::sync::Arc; + +use super::AnyInetSocket; + +pub fn create_inet_socket(sock_type: crate::net::socket::define::Types, protocol: smoltcp::wire::IpProtocol) -> Result, SystemError> { + use crate::net::socket::define::Types as SocketTypes; + use smoltcp::wire::IpProtocol::*; + match protocol { + Udp => { + if sock_type.types() != SocketTypes::DGRAM { + return Err(EPROTONOSUPPORT); + } + todo!() + } + Tcp => { + todo!() + } + Icmp => { + todo!() + } + HopByHop => { + if sock_type.types() != SocketTypes::RAW { + return Err(EPROTONOSUPPORT); + } + todo!() + } + _ => { + return Err(EPROTONOSUPPORT); + } + } +} \ No newline at end of file diff --git a/kernel/src/net/socket/mod.rs b/kernel/src/net/socket/mod.rs index d6f6c6a62..00f374037 100644 --- a/kernel/src/net/socket/mod.rs +++ b/kernel/src/net/socket/mod.rs @@ -7,6 +7,7 @@ use alloc::{ sync::{Arc, Weak}, vec::Vec, }; +use common::poll_unit::{EPollItems, WaitQueue}; use hashbrown::HashMap; use log::warn; use smoltcp::{ @@ -31,202 +32,117 @@ use crate::{ }; use self::{ - handle::GlobalSocketHandle, - inet::{RawSocket, TcpSocket, UdpSocket}, unix::{SeqpacketSocket, StreamSocket}, + common::shutdown::Shutdown, }; use super::{ - event_poll::{EPollEventType, EPollItem, EventPoll}, Endpoint, Protocol, ShutdownType, SocketOptionsLevel + event_poll::{EPollEventType, EPollItem, EventPoll}, Endpoint, }; -pub mod handle; pub mod inet; pub mod unix; -pub mod tcp_def; -pub mod ip_def; +pub mod define; +pub mod common; -lazy_static! { - /// 所有socket的集合 - /// TODO: 优化这里,自己实现SocketSet!!!现在这样的话,不管全局有多少个网卡,每个时间点都只会有1个进程能够访问socket - pub static ref SOCKET_SET: SpinLock> = SpinLock::new(SocketSet::new(vec![])); - /// SocketHandle表,每个SocketHandle对应一个SocketHandleItem, - /// 注意!:在网卡中断中需要拿到这张表的🔓,在获取读锁时应该确保关中断避免死锁 - pub static ref HANDLE_MAP: RwLock> = RwLock::new(HashMap::new()); - /// 端口管理器 - pub static ref PORT_MANAGER: PortManager = PortManager::new(); -} +pub use define::{AddressFamily, Options as SocketOptions, OptionsLevel as SocketOptionsLevel, Types as SocketTypes}; /* For setsockopt(2) */ // See: linux-5.19.10/include/uapi/asm-generic/socket.h#9 pub const SOL_SOCKET: u8 = 1; -/// 根据地址族、socket类型和协议创建socket -pub(super) fn new_socket( - address_family: AddressFamily, - socket_type: PosixSocketType, - protocol: Protocol, -) -> Result, SystemError> { - let socket: Box = match address_family { - AddressFamily::Unix => match socket_type { - PosixSocketType::Stream => Box::new(StreamSocket::new(SocketOptions::default())), - PosixSocketType::SeqPacket => Box::new(SeqpacketSocket::new(SocketOptions::default())), - _ => { - return Err(SystemError::EINVAL); - } - }, - AddressFamily::INet => match socket_type { - PosixSocketType::Stream => Box::new(TcpSocket::new(SocketOptions::default())), - PosixSocketType::Datagram => Box::new(UdpSocket::new(SocketOptions::default())), - PosixSocketType::Raw => Box::new(RawSocket::new(protocol, SocketOptions::default())), - _ => { - return Err(SystemError::EINVAL); - } - }, - _ => { - return Err(SystemError::EAFNOSUPPORT); - } - }; - - let handle_item = SocketHandleItem::new(Arc::downgrade(&socket.posix_item())); - HANDLE_MAP - .write_irqsave() - .insert(socket.socket_handle(), handle_item); - Ok(socket) -} - -pub trait Socket: Sync + Send + Debug + Any { - /// @brief 从socket中读取数据,如果socket是阻塞的,那么直到读取到数据才返回 - /// - /// @param buf 读取到的数据存放的缓冲区 - /// - /// @return - 成功:(返回读取的数据的长度,读取数据的端点). - /// - 失败:错误码 - fn read(&self, buf: &mut [u8]) -> (Result, Endpoint); - - /// @brief 向socket中写入数据。如果socket是阻塞的,那么直到写入的数据全部写入socket中才返回 - /// - /// @param buf 要写入的数据 - /// @param to 要写入的目的端点,如果是None,那么写入的数据将会被丢弃 - /// - /// @return 返回写入的数据的长度 - fn write(&self, buf: &[u8], to: Option) -> Result; - - /// @brief 对应于POSIX的connect函数,用于连接到指定的远程服务器端点 - /// - /// It is used to establish a connection to a remote server. - /// When a socket is connected to a remote server, - /// the operating system will establish a network connection with the server - /// and allow data to be sent and received between the local socket and the remote server. - /// - /// @param endpoint 要连接的端点 - /// - /// @return 返回连接是否成功 - fn connect(&mut self, _endpoint: Endpoint) -> Result<(), SystemError>; +// /// 根据地址族、socket类型和协议创建socket +// pub(super) fn new_unbound_socket( +// address_family: AddressFamily, +// socket_type: PosixSocketType, +// protocol: Protocol, +// ) -> Result, SystemError> { +// let socket: Box = match address_family { +// AddressFamily::Unix => match socket_type { +// PosixSocketType::Stream => Box::new(StreamSocket::new(Options::default())), +// PosixSocketType::SeqPacket => Box::new(SeqpacketSocket::new(Options::default())), +// _ => { +// return Err(SystemError::EINVAL); +// } +// }, +// AddressFamily::INet => match socket_type { +// PosixSocketType::Stream => Box::new(TcpSocket::new(Options::default())), +// PosixSocketType::Datagram => Box::new(BoundUdp::new(Options::default())), +// PosixSocketType::Raw => Box::new(RawSocket::new(protocol, Options::default())), +// _ => { +// return Err(SystemError::EINVAL); +// } +// }, +// _ => { +// return Err(SystemError::EAFNOSUPPORT); +// } +// }; + +// Ok(socket) +// } + +pub trait Socket: IndexNode { + /// # `epoll_items` + /// socket的epoll事件集 + fn epoll_items(&self) -> &EPollItems; + + /// # `wait_queue` + /// 获取socket的wait queue + fn wait_queue(&self) -> &WaitQueue; + + /// # `connect` + /// 对应于POSIX的connect函数,用于连接到指定的远程服务器端点 + fn connect(&self, _endpoint: Endpoint) -> Result<(), SystemError> { + Err(SystemError::ENOSYS) + } - /// @brief 对应于POSIX的bind函数,用于绑定到本机指定的端点 - /// - /// The bind() function is used to associate a socket with a particular IP address and port number on the local machine. - /// - /// @param endpoint 要绑定的端点 - /// - /// @return 返回绑定是否成功 - fn bind(&mut self, _endpoint: Endpoint) -> Result<(), SystemError> { + /// # `bind` + /// 对应于POSIX的bind函数,用于绑定到本机指定的端点 + fn bind(&self, _endpoint: Endpoint) -> Result<(), SystemError> { Err(SystemError::ENOSYS) } - /// @brief 对应于 POSIX 的 shutdown 函数,用于关闭socket。 - /// - /// shutdown() 函数用于启动网络连接的正常关闭。 - /// 当在两个端点之间建立网络连接时,任一端点都可以通过调用其端点对象上的 shutdown() 函数来启动关闭序列。 - /// 此函数向远程端点发送关闭消息以指示本地端点不再接受新数据。 - /// - /// @return 返回是否成功关闭 - fn shutdown(&mut self, _type: ShutdownType) -> Result<(), SystemError> { + /// # `shutdown` + /// 对应于 POSIX 的 shutdown 函数,用于网络连接的可选关闭。 + fn shutdown(&self, _type: Shutdown) -> Result<(), SystemError> { Err(SystemError::ENOSYS) } - /// @brief 对应于POSIX的listen函数,用于监听端点 - /// - /// @param backlog 最大的等待连接数 - /// - /// @return 返回监听是否成功 - fn listen(&mut self, _backlog: usize) -> Result<(), SystemError> { + /// # `listen` + /// 监听socket,仅用于stream socket + fn listen(&self, _backlog: usize) -> Result<(), SystemError> { Err(SystemError::ENOSYS) } - /// @brief 对应于POSIX的accept函数,用于接受连接 - /// - /// @param endpoint 对端的端点 - /// - /// @return 返回接受连接是否成功 - fn accept(&mut self) -> Result<(Box, Endpoint), SystemError> { + /// # `accept` + /// 接受连接,仅用于listening stream socket + /// ## Block + /// 如果没有连接到来,会阻塞 + fn accept(&self) -> Result<(Arc, Endpoint), SystemError> { Err(SystemError::ENOSYS) } - /// @brief 获取socket的端点 - /// - /// @return 返回socket的端点 + /// # `endpoint` + /// 获取绑定的端点 fn endpoint(&self) -> Option { None } - /// @brief 获取socket的对端端点 - /// - /// @return 返回socket的对端端点 + /// # `peer_endpoint` + /// 获取对端的端点 fn peer_endpoint(&self) -> Option { None } - /// @brief - /// The purpose of the poll function is to provide - /// a non-blocking way to check if a socket is ready for reading or writing, - /// so that you can efficiently handle multiple sockets in a single thread or event loop. - /// - /// @return (in, out, err) - /// - /// The first boolean value indicates whether the socket is ready for reading. If it is true, then there is data available to be read from the socket without blocking. - /// The second boolean value indicates whether the socket is ready for writing. If it is true, then data can be written to the socket without blocking. - /// The third boolean value indicates whether the socket has encountered an error condition. If it is true, then the socket is in an error state and should be closed or reset - /// - fn poll(&self) -> EPollEventType { - EPollEventType::empty() - } - - /// @brief socket的ioctl函数 - /// - /// @param cmd ioctl命令 - /// @param arg0 ioctl命令的第一个参数 - /// @param arg1 ioctl命令的第二个参数 - /// @param arg2 ioctl命令的第三个参数 - /// - /// @return 返回ioctl命令的返回值 - fn ioctl( - &self, - _cmd: usize, - _arg0: usize, - _arg1: usize, - _arg2: usize, - ) -> Result { - Ok(0) - } - - /// @brief 获取socket的元数据 - fn metadata(&self) -> SocketMetadata; - - fn box_clone(&self) -> Box; - - /// @brief 设置socket的选项 - /// - /// @param level 选项的层次 - /// @param optname 选项的名称 - /// @param optval 选项的值 - /// - /// @return 返回设置是否成功, 如果不支持该选项,返回ENOSYS - /// - /// ## See + /// # `set_option` + /// 对应 Posix `setsockopt` ,设置socket选项 + /// ## Parameters + /// - level 选项的层次 + /// - optname 选项的名称 + /// - optval 选项的值 + /// ## Reference /// https://code.dragonos.org.cn/s?refs=sk_setsockopt&project=linux-6.6.21 - fn setsockopt( + fn set_option( &self, _level: SocketOptionsLevel, _optname: usize, @@ -236,370 +152,24 @@ pub trait Socket: Sync + Send + Debug + Any { Ok(()) } - fn socket_handle(&self) -> GlobalSocketHandle; - fn write_buffer(&self, _buf: &[u8]) -> Result { todo!() } - fn as_any_ref(&self) -> &dyn Any; - - fn as_any_mut(&mut self) -> &mut dyn Any; - - fn add_epoll(&mut self, epitem: Arc) -> Result<(), SystemError> { - let posix_item = self.posix_item(); - posix_item.add_epoll(epitem); - Ok(()) - } - - fn remove_epoll(&mut self, epoll: &Weak>) -> Result<(), SystemError> { - let posix_item = self.posix_item(); - posix_item.remove_epoll(epoll)?; - - Ok(()) - } - - fn clear_epoll(&mut self) -> Result<(), SystemError> { - let posix_item = self.posix_item(); - - for epitem in posix_item.epitems.lock_irqsave().iter() { - let epoll = epitem.epoll(); - - if let Some(epoll) = epoll.upgrade() { - EventPoll::ep_remove(&mut epoll.lock_irqsave(), epitem.fd(), None)?; - } - } - - Ok(()) - } - - fn close(&mut self); - - fn posix_item(&self) -> Arc; -} - -impl Clone for Box { - fn clone(&self) -> Box { - self.box_clone() - } -} - -/// # Socket在文件系统中的inode封装 -#[derive(Debug)] -pub struct SocketInode(SpinLock>, AtomicUsize); - -impl SocketInode { - pub fn new(socket: Box) -> Arc { - Arc::new(Self(SpinLock::new(socket), AtomicUsize::new(0))) - } - - #[inline] - pub fn inner(&self) -> SpinLockGuard> { - self.0.lock() - } - - pub unsafe fn inner_no_preempt(&self) -> SpinLockGuard> { - self.0.lock_no_preempt() - } - - fn do_close(&self) -> Result<(), SystemError> { - let prev_ref_count = self.1.fetch_sub(1, core::sync::atomic::Ordering::SeqCst); - if prev_ref_count == 1 { - // 最后一次关闭,需要释放 - let mut socket = self.0.lock_irqsave(); - - if socket.metadata().socket_type == SocketType::Unix { - return Ok(()); - } - - if let Some(Endpoint::Ip(Some(ip))) = socket.endpoint() { - PORT_MANAGER.unbind_port(socket.metadata().socket_type, ip.port); - } - - socket.clear_epoll()?; - - HANDLE_MAP - .write_irqsave() - .remove(&socket.socket_handle()) - .unwrap(); - socket.close(); - } - - Ok(()) - } -} - -impl Drop for SocketInode { - fn drop(&mut self) { - for _ in 0..self.1.load(core::sync::atomic::Ordering::SeqCst) { - let _ = self.do_close(); - } - } -} - -impl IndexNode for SocketInode { - fn open( - &self, - _data: SpinLockGuard, - _mode: &FileMode, - ) -> Result<(), SystemError> { - self.1.fetch_add(1, core::sync::atomic::Ordering::SeqCst); - Ok(()) - } - - fn close(&self, _data: SpinLockGuard) -> Result<(), SystemError> { - self.do_close() - } - - fn read_at( - &self, - _offset: usize, - len: usize, - buf: &mut [u8], - data: SpinLockGuard, - ) -> Result { - drop(data); - self.0.lock_no_preempt().read(&mut buf[0..len]).0 - } - - fn write_at( - &self, - _offset: usize, - len: usize, - buf: &[u8], - data: SpinLockGuard, - ) -> Result { - drop(data); - self.0.lock_no_preempt().write(&buf[0..len], None) - } + /// # `update_io_events` + /// 更新socket的事件。 + /// 原socket::poll + fn update_io_events(&self) -> Result; fn poll(&self, _private_data: &FilePrivateData) -> Result { - let events = self.0.lock_irqsave().poll(); - return Ok(events.bits() as usize); - } - - fn fs(&self) -> Arc { - todo!() - } - - fn as_any_ref(&self) -> &dyn Any { - self - } - - fn list(&self) -> Result, SystemError> { - return Err(SystemError::ENOTDIR); - } - - fn metadata(&self) -> Result { - let meta = Metadata { - mode: ModeType::from_bits_truncate(0o755), - file_type: FileType::Socket, - ..Default::default() - }; - - return Ok(meta); - } - - fn resize(&self, _len: usize) -> Result<(), SystemError> { - return Ok(()); + Ok(self.update_io_events()?.bits() as usize) } } -#[derive(Debug)] -pub struct PosixSocketHandleItem { - /// socket的waitqueue - wait_queue: Arc, - - pub epitems: SpinLock>>, -} - -impl PosixSocketHandleItem { - pub fn new(wait_queue: Option>) -> Self { - Self { - wait_queue: wait_queue.unwrap_or(Arc::new(EventWaitQueue::new())), - epitems: SpinLock::new(LinkedList::new()), - } - } - /// ## 在socket的等待队列上睡眠 - pub fn sleep(&self, events: u64) { - unsafe { - ProcessManager::preempt_disable(); - self.wait_queue.sleep_without_schedule(events); - ProcessManager::preempt_enable(); - } - schedule(SchedMode::SM_NONE); - } - - pub fn add_epoll(&self, epitem: Arc) { - self.epitems.lock_irqsave().push_back(epitem) - } - - pub fn remove_epoll(&self, epoll: &Weak>) -> Result<(), SystemError> { - let is_remove = !self - .epitems - .lock_irqsave() - .extract_if(|x| x.epoll().ptr_eq(epoll)) - .collect::>() - .is_empty(); - - if is_remove { - return Ok(()); - } - - Err(SystemError::ENOENT) - } - - /// ### 唤醒该队列上等待events的进程 - /// - /// ### 参数 - /// - events: 发生的事件 - /// - /// 需要注意的是,只要触发了events中的任意一件事件,进程都会被唤醒 - pub fn wakeup_any(&self, events: u64) { - self.wait_queue.wakeup_any(events); - } -} -#[derive(Debug)] -pub struct SocketHandleItem { - /// 对应的posix socket是否为listen的 - pub is_posix_listen: bool, - /// shutdown状态 - pub shutdown_type: RwLock, - pub posix_item: Weak, -} - -impl SocketHandleItem { - pub fn new(posix_item: Weak) -> Self { - Self { - is_posix_listen: false, - shutdown_type: RwLock::new(ShutdownType::empty()), - posix_item, - } - } - - pub fn shutdown_type(&self) -> ShutdownType { - *self.shutdown_type.read() - } - - pub fn shutdown_type_writer(&mut self) -> RwLockWriteGuard { - self.shutdown_type.write_irqsave() - } - - pub fn reset_shutdown_type(&self) { - *self.shutdown_type.write() = ShutdownType::empty(); - } - - pub fn posix_item(&self) -> Option> { - self.posix_item.upgrade() - } -} - -/// # TCP 和 UDP 的端口管理器。 -/// 如果 TCP/UDP 的 socket 绑定了某个端口,它会在对应的表中记录,以检测端口冲突。 -pub struct PortManager { - // TCP 端口记录表 - tcp_port_table: SpinLock>, - // UDP 端口记录表 - udp_port_table: SpinLock>, -} - -impl PortManager { - pub fn new() -> Self { - return Self { - tcp_port_table: SpinLock::new(HashMap::new()), - udp_port_table: SpinLock::new(HashMap::new()), - }; - } - - /// @brief 自动分配一个相对应协议中未被使用的PORT,如果动态端口均已被占用,返回错误码 EADDRINUSE - pub fn get_ephemeral_port(&self, socket_type: SocketType) -> Result { - // TODO: selects non-conflict high port - - static mut EPHEMERAL_PORT: u16 = 0; - unsafe { - if EPHEMERAL_PORT == 0 { - EPHEMERAL_PORT = (49152 + rand() % (65536 - 49152)) as u16; - } - } - - let mut remaining = 65536 - 49152; // 剩余尝试分配端口次数 - let mut port: u16; - while remaining > 0 { - unsafe { - if EPHEMERAL_PORT == 65535 { - EPHEMERAL_PORT = 49152; - } else { - EPHEMERAL_PORT += 1; - } - port = EPHEMERAL_PORT; - } - - // 使用 ListenTable 检查端口是否被占用 - let listen_table_guard = match socket_type { - SocketType::Udp => self.udp_port_table.lock(), - SocketType::Tcp => self.tcp_port_table.lock(), - _ => panic!("{:?} cann't get a port", socket_type), - }; - if listen_table_guard.get(&port).is_none() { - drop(listen_table_guard); - return Ok(port); - } - remaining -= 1; - } - return Err(SystemError::EADDRINUSE); - } - - /// @brief 检测给定端口是否已被占用,如果未被占用则在 TCP/UDP 对应的表中记录 - /// - /// TODO: 增加支持端口复用的逻辑 - pub fn bind_port(&self, socket_type: SocketType, port: u16) -> Result<(), SystemError> { - if port > 0 { - let mut listen_table_guard = match socket_type { - SocketType::Udp => self.udp_port_table.lock(), - SocketType::Tcp => self.tcp_port_table.lock(), - _ => panic!("{:?} cann't bind a port", socket_type), - }; - match listen_table_guard.get(&port) { - Some(_) => return Err(SystemError::EADDRINUSE), - None => listen_table_guard.insert(port, ProcessManager::current_pid()), - }; - drop(listen_table_guard); - } - return Ok(()); - } - - /// @brief 在对应的端口记录表中将端口和 socket 解绑 - /// should call this function when socket is closed or aborted - pub fn unbind_port(&self, socket_type: SocketType, port: u16) { - let mut listen_table_guard = match socket_type { - SocketType::Udp => self.udp_port_table.lock(), - SocketType::Tcp => self.tcp_port_table.lock(), - _ => { - return; - } - }; - listen_table_guard.remove(&port); - drop(listen_table_guard); - } -} - -/// @brief socket的类型 -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum SocketType { - /// 原始的socket - Raw, - /// 用于Tcp通信的 Socket - Tcp, - /// 用于Udp通信的 Socket - Udp, - /// unix域的 Socket - Unix, -} - bitflags! { /// @brief socket的选项 #[derive(Default)] - pub struct SocketOptions: u32 { + pub struct Options: u32 { /// 是否阻塞 const BLOCK = 1 << 0; /// 是否允许广播 @@ -612,312 +182,3 @@ bitflags! { const REUSEPORT = 1 << 4; } } - -#[derive(Debug, Clone)] -/// @brief 在trait Socket的metadata函数中返回该结构体供外部使用 -pub struct SocketMetadata { - /// socket的类型 - pub socket_type: SocketType, - /// 接收缓冲区的大小 - pub rx_buf_size: usize, - /// 发送缓冲区的大小 - pub tx_buf_size: usize, - /// 元数据的缓冲区的大小 - pub metadata_buf_size: usize, - /// socket的选项 - pub options: SocketOptions, -} - -impl SocketMetadata { - fn new( - socket_type: SocketType, - rx_buf_size: usize, - tx_buf_size: usize, - metadata_buf_size: usize, - options: SocketOptions, - ) -> Self { - Self { - socket_type, - rx_buf_size, - tx_buf_size, - metadata_buf_size, - options, - } - } -} - -/// @brief 地址族的枚举 -/// -/// 参考:https://code.dragonos.org.cn/xref/linux-5.19.10/include/linux/socket.h#180 -#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)] -pub enum AddressFamily { - /// AF_UNSPEC 表示地址族未指定 - Unspecified = 0, - /// AF_UNIX 表示Unix域的socket (与AF_LOCAL相同) - Unix = 1, - /// AF_INET 表示IPv4的socket - INet = 2, - /// AF_AX25 表示AMPR AX.25的socket - AX25 = 3, - /// AF_IPX 表示IPX的socket - IPX = 4, - /// AF_APPLETALK 表示Appletalk的socket - Appletalk = 5, - /// AF_NETROM 表示AMPR NET/ROM的socket - Netrom = 6, - /// AF_BRIDGE 表示多协议桥接的socket - Bridge = 7, - /// AF_ATMPVC 表示ATM PVCs的socket - Atmpvc = 8, - /// AF_X25 表示X.25的socket - X25 = 9, - /// AF_INET6 表示IPv6的socket - INet6 = 10, - /// AF_ROSE 表示AMPR ROSE的socket - Rose = 11, - /// AF_DECnet Reserved for DECnet project - Decnet = 12, - /// AF_NETBEUI Reserved for 802.2LLC project - Netbeui = 13, - /// AF_SECURITY 表示Security callback的伪AF - Security = 14, - /// AF_KEY 表示Key management API - Key = 15, - /// AF_NETLINK 表示Netlink的socket - Netlink = 16, - /// AF_PACKET 表示Low level packet interface - Packet = 17, - /// AF_ASH 表示Ash - Ash = 18, - /// AF_ECONET 表示Acorn Econet - Econet = 19, - /// AF_ATMSVC 表示ATM SVCs - Atmsvc = 20, - /// AF_RDS 表示Reliable Datagram Sockets - Rds = 21, - /// AF_SNA 表示Linux SNA Project - Sna = 22, - /// AF_IRDA 表示IRDA sockets - Irda = 23, - /// AF_PPPOX 表示PPPoX sockets - Pppox = 24, - /// AF_WANPIPE 表示WANPIPE API sockets - WanPipe = 25, - /// AF_LLC 表示Linux LLC - Llc = 26, - /// AF_IB 表示Native InfiniBand address - /// 介绍:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/configuring_infiniband_and_rdma_networks/index#understanding-infiniband-and-rdma_configuring-infiniband-and-rdma-networks - Ib = 27, - /// AF_MPLS 表示MPLS - Mpls = 28, - /// AF_CAN 表示Controller Area Network - Can = 29, - /// AF_TIPC 表示TIPC sockets - Tipc = 30, - /// AF_BLUETOOTH 表示Bluetooth sockets - Bluetooth = 31, - /// AF_IUCV 表示IUCV sockets - Iucv = 32, - /// AF_RXRPC 表示RxRPC sockets - Rxrpc = 33, - /// AF_ISDN 表示mISDN sockets - Isdn = 34, - /// AF_PHONET 表示Phonet sockets - Phonet = 35, - /// AF_IEEE802154 表示IEEE 802.15.4 sockets - Ieee802154 = 36, - /// AF_CAIF 表示CAIF sockets - Caif = 37, - /// AF_ALG 表示Algorithm sockets - Alg = 38, - /// AF_NFC 表示NFC sockets - Nfc = 39, - /// AF_VSOCK 表示vSockets - Vsock = 40, - /// AF_KCM 表示Kernel Connection Multiplexor - Kcm = 41, - /// AF_QIPCRTR 表示Qualcomm IPC Router - Qipcrtr = 42, - /// AF_SMC 表示SMC-R sockets. - /// reserve number for PF_SMC protocol family that reuses AF_INET address family - Smc = 43, - /// AF_XDP 表示XDP sockets - Xdp = 44, - /// AF_MCTP 表示Management Component Transport Protocol - Mctp = 45, - /// AF_MAX 表示最大的地址族 - Max = 46, -} - -impl TryFrom for AddressFamily { - type Error = SystemError; - fn try_from(x: u16) -> Result { - use num_traits::FromPrimitive; - return ::from_u16(x).ok_or(SystemError::EINVAL); - } -} - -/// @brief posix套接字类型的枚举(这些值与linux内核中的值一致) -#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)] -pub enum PosixSocketType { - Stream = 1, - Datagram = 2, - Raw = 3, - Rdm = 4, - SeqPacket = 5, - Dccp = 6, - Packet = 10, -} - -impl TryFrom for PosixSocketType { - type Error = SystemError; - fn try_from(x: u8) -> Result { - use num_traits::FromPrimitive; - return ::from_u8(x).ok_or(SystemError::EINVAL); - } -} - -/// ### 为socket提供无锁的poll方法 -/// -/// 因为在网卡中断中,需要轮询socket的状态,如果使用socket文件或者其inode来poll -/// 在当前的设计,会必然死锁,所以引用这一个设计来解决,提供无🔓的poll -pub struct SocketPollMethod; - -impl SocketPollMethod { - pub fn poll(socket: &socket::Socket, handle_item: &SocketHandleItem) -> EPollEventType { - let shutdown = handle_item.shutdown_type(); - match socket { - socket::Socket::Udp(udp) => Self::udp_poll(udp, shutdown), - socket::Socket::Tcp(tcp) => Self::tcp_poll(tcp, shutdown, handle_item.is_posix_listen), - socket::Socket::Raw(raw) => Self::raw_poll(raw, shutdown), - _ => todo!(), - } - } - - pub fn tcp_poll( - socket: &tcp::Socket, - shutdown: ShutdownType, - is_posix_listen: bool, - ) -> EPollEventType { - let mut events = EPollEventType::empty(); - // debug!("enter tcp_poll! is_posix_listen:{}", is_posix_listen); - // 处理listen的socket - if is_posix_listen { - // 如果是listen的socket,那么只有EPOLLIN和EPOLLRDNORM - if socket.is_active() { - events.insert(EPollEventType::EPOLL_LISTEN_CAN_ACCEPT); - } - - // debug!("tcp_poll listen socket! events:{:?}", events); - return events; - } - - let state = socket.state(); - - if shutdown == ShutdownType::SHUTDOWN_MASK || state == tcp::State::Closed { - events.insert(EPollEventType::EPOLLHUP); - } - - if shutdown.contains(ShutdownType::RCV_SHUTDOWN) { - events.insert( - EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM | EPollEventType::EPOLLRDHUP, - ); - } - - // Connected or passive Fast Open socket? - if state != tcp::State::SynSent && state != tcp::State::SynReceived { - // socket有可读数据 - if socket.can_recv() { - events.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM); - } - - if !(shutdown.contains(ShutdownType::SEND_SHUTDOWN)) { - // 缓冲区可写(这里判断可写的逻辑好像跟linux不太一样) - if socket.send_queue() < socket.send_capacity() { - events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM); - } else { - // TODO:触发缓冲区已满的信号SIGIO - todo!("A signal SIGIO that the buffer is full needs to be sent"); - } - } else { - // 如果我们的socket关闭了SEND_SHUTDOWN,epoll事件就是EPOLLOUT - events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM); - } - } else if state == tcp::State::SynSent { - events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM); - } - - // socket发生错误 - // TODO: 这里的逻辑可能有问题,需要进一步验证是否is_active()==false就代表socket发生错误 - if !socket.is_active() { - events.insert(EPollEventType::EPOLLERR); - } - - events - } - - pub fn udp_poll(socket: &udp::Socket, shutdown: ShutdownType) -> EPollEventType { - let mut event = EPollEventType::empty(); - - if shutdown.contains(ShutdownType::RCV_SHUTDOWN) { - event.insert( - EPollEventType::EPOLLRDHUP | EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM, - ); - } - if shutdown.contains(ShutdownType::SHUTDOWN_MASK) { - event.insert(EPollEventType::EPOLLHUP); - } - - if socket.can_recv() { - event.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM); - } - - if socket.can_send() { - event.insert( - EPollEventType::EPOLLOUT - | EPollEventType::EPOLLWRNORM - | EPollEventType::EPOLLWRBAND, - ); - } else { - // TODO: 缓冲区空间不够,需要使用信号处理 - todo!() - } - - return event; - } - - pub fn raw_poll(socket: &raw::Socket, shutdown: ShutdownType) -> EPollEventType { - //debug!("enter raw_poll!"); - let mut event = EPollEventType::empty(); - - if shutdown.contains(ShutdownType::RCV_SHUTDOWN) { - event.insert( - EPollEventType::EPOLLRDHUP | EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM, - ); - } - if shutdown.contains(ShutdownType::SHUTDOWN_MASK) { - event.insert(EPollEventType::EPOLLHUP); - } - - if socket.can_recv() { - //debug!("poll can recv!"); - event.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM); - } else { - //debug!("poll can not recv!"); - } - - if socket.can_send() { - //debug!("poll can send!"); - event.insert( - EPollEventType::EPOLLOUT - | EPollEventType::EPOLLWRNORM - | EPollEventType::EPOLLWRBAND, - ); - } else { - //debug!("poll can not send!"); - // TODO: 缓冲区空间不够,需要使用信号处理 - todo!() - } - return event; - } -} diff --git a/kernel/src/net/socket/handle.rs b/kernel/src/net/socket/old/handle.rs similarity index 70% rename from kernel/src/net/socket/handle.rs rename to kernel/src/net/socket/old/handle.rs index 97701ace0..571e69686 100644 --- a/kernel/src/net/socket/handle.rs +++ b/kernel/src/net/socket/old/handle.rs @@ -1,5 +1,4 @@ use ida::IdAllocator; -use smoltcp::iface::SocketHandle; int_like!(KernelHandle, usize); @@ -8,14 +7,14 @@ int_like!(KernelHandle, usize); /// 比如,在socket被关闭时,自动释放socket的资源,通知系统的其他组件。 #[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] pub enum GlobalSocketHandle { - Smoltcp(SocketHandle), + Smoltcp(smoltcp::iface::SocketHandle), Kernel(KernelHandle), } static KERNEL_HANDLE_IDA: IdAllocator = IdAllocator::new(0, usize::MAX); impl GlobalSocketHandle { - pub fn new_smoltcp_handle(handle: SocketHandle) -> Self { + pub fn new_smoltcp_handle(handle: smoltcp::iface::SocketHandle) -> Self { return Self::Smoltcp(handle); } @@ -23,7 +22,7 @@ impl GlobalSocketHandle { return Self::Kernel(KernelHandle::new(KERNEL_HANDLE_IDA.alloc().unwrap())); } - pub fn smoltcp_handle(&self) -> Option { + pub fn smoltcp_handle(&self) -> Option { if let Self::Smoltcp(sh) = *self { return Some(sh); } @@ -37,3 +36,9 @@ impl GlobalSocketHandle { None } } + +impl From for GlobalSocketHandle { + fn from(handle: smoltcp::iface::SocketHandle) -> Self { + return Self::new_smoltcp_handle(handle); + } +} diff --git a/kernel/src/net/socket/old/icmp.rs b/kernel/src/net/socket/old/icmp.rs new file mode 100644 index 000000000..5bb45a429 --- /dev/null +++ b/kernel/src/net/socket/old/icmp.rs @@ -0,0 +1,93 @@ + + + +use system_error::SystemError::{self, *}; +use smoltcp; +use super::{common::{BoundInner, Types}, raw::{ + DEFAULT_METADATA_BUF_SIZE, DEFAULT_RX_BUF_SIZE, DEFAULT_TX_BUF_SIZE +}}; + +pub type SmolIcmpSocket = smoltcp::socket::icmp::Socket<'static>; + +#[derive(Debug)] +pub struct UnboundIcmp { + socket: SmolIcmpSocket, +} + +impl UnboundIcmp { + pub fn new() -> Self { + let rx_buffer = smoltcp::socket::icmp::PacketBuffer::new( + vec![smoltcp::socket::icmp::PacketMetadata::EMPTY; DEFAULT_METADATA_BUF_SIZE], + vec![0; DEFAULT_RX_BUF_SIZE], + ); + let tx_buffer = smoltcp::socket::icmp::PacketBuffer::new( + vec![smoltcp::socket::icmp::PacketMetadata::EMPTY; DEFAULT_METADATA_BUF_SIZE], + vec![0; DEFAULT_TX_BUF_SIZE], + ); + let socket = SmolIcmpSocket::new(rx_buffer, tx_buffer); + + return Self { socket }; + } + + pub fn ephemeral_bind(self, remote: smoltcp::wire::IpAddress) -> Result { + Ok( BoundIcmp { + inner: BoundInner::bind_ephemeral(self.socket, smoltcp::wire::IpEndpoint::new(remote, 0))?, + }) + } + + pub fn bind(mut self, endpoint: smoltcp::wire::IpEndpoint) -> Result { + if self.socket.bind(smoltcp::socket::icmp::Endpoint::Udp( + smoltcp::wire::IpListenEndpoint::from(endpoint) + )).is_err() { + return Err(EINVAL); + } + Ok( BoundIcmp { + inner: BoundInner::bind(self.socket, endpoint.addr)?, + }) + } +} + +#[derive(Debug)] +pub struct BoundIcmp { + inner: BoundInner, +} + +impl BoundIcmp { + fn with_mut_socket(&self, f: F) -> T + where + F: FnMut(&mut SmolIcmpSocket) -> T, + { + self.inner.with_mut(f) + } + + pub fn send(&self, buf: &[u8], dst: smoltcp::wire::IpEndpoint) -> Result<(), SystemError> { + if buf.len() > DEFAULT_TX_BUF_SIZE { + return Err(EMSGSIZE); + } + use smoltcp::socket::icmp::SendError::*; + self.with_mut_socket(|socket| { + match socket.send_slice(buf, dst.addr) { + Ok(_) => Ok(()), + Err(Unaddressable) => Err(ECONNREFUSED), + Err(BufferFull) => Err(ENOBUFS), + } + }) + } + + pub fn recv(&self, buf: &mut [u8]) -> Result<(usize, smoltcp::wire::IpAddress), SystemError> { + use smoltcp::socket::icmp::RecvError::*; + self.with_mut_socket(|socket| { + match socket.recv_slice(buf) { + Ok((size, metadata)) => Ok((size, metadata)), + Err(Exhausted) => Err(ENOBUFS), + Err(Truncated) => Err(EMSGSIZE), + } + }) + } +} + +#[derive(Debug)] +pub enum IcmpInner { + Unbound(UnboundIcmp), + Bound(BoundIcmp), +} \ No newline at end of file diff --git a/kernel/src/net/socket/old/inode/mod.rs b/kernel/src/net/socket/old/inode/mod.rs new file mode 100644 index 000000000..4fc6e1ef2 --- /dev/null +++ b/kernel/src/net/socket/old/inode/mod.rs @@ -0,0 +1,264 @@ +use core::{any::Any, fmt::Debug, sync::atomic::AtomicUsize}; + +use alloc::{ + boxed::Box, + collections::LinkedList, + string::String, + sync::{Arc, Weak}, + vec::Vec, +}; +use hashbrown::HashMap; +use log::warn; +use smoltcp::{ + iface::SocketSet, + socket::{self, raw, tcp, udp}, +}; +use system_error::SystemError; + +use crate::{ + arch::rand::rand, driver::net::Iface, filesystem::vfs::{ + file::FileMode, syscall::ModeType, FilePrivateData, FileSystem, FileType, IndexNode, + Metadata, + }, libs::{ + rwlock::{RwLock, RwLockWriteGuard}, + spinlock::{SpinLock, SpinLockGuard}, + wait_queue::EventWaitQueue, + }, process::{Pid, ProcessManager}, sched::{schedule, SchedMode} +}; + +use super::{ + handle::GlobalSocketHandle, inet::{RawSocket, TcpSocket, BoundUdp}, unix::{SeqpacketSocket, StreamSocket}, Socket, Options, InetSocketType, PORT_MANAGER +}; + +use super::super::{ + event_poll::{EPollEventType, EPollItem, EventPoll}, Endpoint, Protocol, ShutdownType, SocketOptionsLevel +}; + +/// # Socket在文件系统中的inode封装 +#[derive(Debug)] +pub struct SocketInode { + bound_iface: Option>, + /// socket的实现 + socket: SpinLock>, + /// following are socket commons + is_listen: bool, + + shutdown_type: RwLock, + + epoll_item: SpinLock>>, + + wait_queue: Arc, +} + +impl SocketInode { + pub fn new(socket: Box, wait_queue: Option>) -> Arc { + Arc::new(Self { + bound_iface: None, + socket: SpinLock::new(socket), + is_listen: false, + epoll_item: SpinLock::new(LinkedList::new()), + shutdown_type: RwLock::new(ShutdownType::empty()), + wait_queue: wait_queue.unwrap_or(Arc::new(EventWaitQueue::new())), + }) + } + + #[inline] + pub fn inner(&self) -> SpinLockGuard> { + self.socket.lock() + } + + pub unsafe fn inner_no_preempt(&self) -> SpinLockGuard> { + self.socket.lock_no_preempt() + } + + // ==> epoll api + pub fn add_epoll(&self, epitem: Arc) { + self.epoll_item.lock_irqsave().push_back(epitem) + } + + pub fn remove_epoll(&self, epoll: &Weak>) -> Result<(), SystemError> { + let is_remove = !self + .epoll_item + .lock_irqsave() + .extract_if(|x| x.epoll().ptr_eq(epoll)) + .collect::>() + .is_empty(); + + if is_remove { + return Ok(()); + } + + Err(SystemError::ENOENT) + } + + fn clear_epoll(&self) -> Result<(), SystemError> { + for epitem in self.epoll_item.lock_irqsave().iter() { + let epoll = epitem.epoll(); + + if let Some(epoll) = epoll.upgrade() { + EventPoll::ep_remove(&mut epoll.lock_irqsave(), epitem.fd(), None)?; + } + } + + Ok(()) + } + // <== epoll api + + /// # wakeup_any + /// 唤醒该队列上等待events的进程 + /// ## 参数 + /// - events: 发生的事件 + /// ## Notice + /// 只要触发了events中的任意一件事件,进程都会被唤醒 + pub fn wakeup_any(&self, events: u64) { + self.wait_queue.wakeup_any(events); + } + + /// ## 在socket的等待队列上睡眠 + pub fn sleep(&self, events: u64) { + unsafe { + ProcessManager::preempt_disable(); + self.wait_queue.sleep_without_schedule(events); + ProcessManager::preempt_enable(); + } + schedule(SchedMode::SM_NONE); + } + + // ==> shutdown_type api + pub fn shutdown_type(&self) -> ShutdownType { + *self.shutdown_type.read() + } + + pub fn shutdown_type_writer(&mut self) -> RwLockWriteGuard { + self.shutdown_type.write_irqsave() + } + + pub fn reset_shutdown_type(&self) { + *self.shutdown_type.write() = ShutdownType::empty(); + } + // <== shutdown_type api +} + +impl IndexNode for SocketInode { + fn open( + &self, + _data: SpinLockGuard, + _mode: &FileMode, + ) -> Result<(), SystemError> { + Ok(()) + } + + fn close(&self, _data: SpinLockGuard) -> Result<(), SystemError> { + let mut socket = self.socket.lock_irqsave(); + + if socket.metadata().socket_type == InetSocketType::Unix { + return Ok(()); + } + + self.clear_epoll()?; + + socket.close(); + + if let Some(iface) = self.bound_iface.as_ref() { + if let Some(Endpoint::Ip(Some(ip))) = socket.endpoint() { + iface.port_manager().unbind_port(socket.metadata().socket_type, ip.port); + } + iface.poll()?; + } + + Ok(()) + } + + fn read_at( + &self, + _offset: usize, + len: usize, + buf: &mut [u8], + data: SpinLockGuard, + ) -> Result { + drop(data); + + let read_result = loop { + if let Some(iface) = self.bound_iface.as_ref() { + iface.poll()?; + } + let read_result + = self.socket.lock_no_preempt().read(&mut buf[0..len]); + if self + .socket + .lock() + .metadata() + .options + .contains(Options::BLOCK) + { + match read_result { + Ok((x, _)) => break Ok(x), + Err(SystemError::EAGAIN_OR_EWOULDBLOCK) => { + self.sleep(EPollEventType::EPOLLIN.bits() as u64); + continue; + } + Err(e) => break Err(e), + } + } + }; + if let Some(iface) = self.bound_iface.as_ref() { + iface.poll()?; + } + return read_result; + } + + fn write_at( + &self, + _offset: usize, + len: usize, + buf: &[u8], + data: SpinLockGuard, + ) -> Result { + drop(data); + let write_len = self.socket.lock_no_preempt().write(&buf[0..len], None)?; + if let Some(iface) = self.bound_iface.as_ref() { + iface.poll()?; + } + return Ok(write_len); + } + + fn poll(&self, _private_data: &FilePrivateData) -> Result { + let mut events = self.socket.lock_irqsave().poll(); + if self.shutdown_type().contains(ShutdownType::RCV_SHUTDOWN) { + events.insert( + EPollEventType::EPOLLRDHUP | EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM, + ); + } + if self.shutdown_type().contains(ShutdownType::SHUTDOWN_MASK) { + events.insert(EPollEventType::EPOLLHUP); + } + + return Ok(events.bits() as usize); + } + + fn fs(&self) -> Arc { + todo!() + } + + fn as_any_ref(&self) -> &dyn Any { + self + } + + fn list(&self) -> Result, SystemError> { + return Err(SystemError::ENOTDIR); + } + + fn metadata(&self) -> Result { + let meta = Metadata { + mode: ModeType::from_bits_truncate(0o755), + file_type: FileType::Socket, + ..Default::default() + }; + + return Ok(meta); + } + + fn resize(&self, _len: usize) -> Result<(), SystemError> { + return Ok(()); + } +} \ No newline at end of file diff --git a/kernel/src/net/socket/old/old.rs b/kernel/src/net/socket/old/old.rs new file mode 100644 index 000000000..0c05da714 --- /dev/null +++ b/kernel/src/net/socket/old/old.rs @@ -0,0 +1,713 @@ +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use log::{debug, error, warn}; +use smoltcp::{ + socket::{raw, tcp, udp}, + wire, +}; +use system_error::SystemError; + +use crate::{ + driver::net::Iface, + libs::rwlock::RwLock, + net::{ + event_poll::EPollEventType, net_core::poll_ifaces, socket::tcp_def::TcpOptions, syscall::PosixSocketOption, Endpoint, Protocol, ShutdownType, NET_DEVICES, SocketOptionsLevel + }, +}; + +use crate::net::socket::{ + handle::GlobalSocketHandle, PosixSocketHandleItem, Socket, SocketHandleItem, SocketMetadata, + Options, SocketPollMethod, InetSocketType, HANDLE_MAP, PORT_MANAGER, SOCKET_SET, ip_def::IpOptions, +}; + +pub mod init; + + +/// @brief 表示 tcp socket +/// +/// https://man7.org/linux/man-pages/man7/tcp.7.html +#[derive(Debug, Clone)] +pub struct TcpSocket { + handles: Vec, + local_endpoint: Option, // save local endpoint for bind() + is_listening: bool, + metadata: SocketMetadata, + posix_item: Arc, +} + +impl TcpSocket { + /// 元数据的缓冲区的大小 + pub const DEFAULT_METADATA_BUF_SIZE: usize = 1024; + /// 默认的接收缓冲区的大小 receive + pub const DEFAULT_RX_BUF_SIZE: usize = 512 * 1024; + /// 默认的发送缓冲区的大小 transmiss + pub const DEFAULT_TX_BUF_SIZE: usize = 512 * 1024; + + /// TcpSocket的特殊事件,用于在事件等待队列上sleep + pub const CAN_CONNECT: u64 = 1u64 << 63; + pub const CAN_ACCPET: u64 = 1u64 << 62; + + /// @brief 创建一个tcp的socket + /// + /// @param options socket的选项 + /// + /// @return 返回创建的tcp的socket + pub fn new(options: Options) -> Self { + // 创建handles数组并把socket添加到socket集合中,并得到socket的句柄 + let handles: Vec = vec![GlobalSocketHandle::new_smoltcp_handle( + SOCKET_SET.lock_irqsave().add(Self::create_new_socket()), + )]; + + let metadata = SocketMetadata::new( + InetSocketType::Tcp, + Self::DEFAULT_RX_BUF_SIZE, + Self::DEFAULT_TX_BUF_SIZE, + Self::DEFAULT_METADATA_BUF_SIZE, + options, + ); + let posix_item = Arc::new(PosixSocketHandleItem::new(None)); + // debug!("when there's a new tcp socket,its'len: {}",handles.len()); + + return Self { + handles, + local_endpoint: None, + is_listening: false, + metadata, + posix_item, + }; + } + + fn do_listen( + &mut self, + socket: &mut tcp::Socket, + local_endpoint: wire::IpEndpoint, + ) -> Result<(), SystemError> { + let listen_result = if local_endpoint.addr.is_unspecified() { + socket.listen(local_endpoint.port) + } else { + socket.listen(local_endpoint) + }; + return match listen_result { + Ok(()) => { + // debug!( + // "Tcp Socket Listen on {local_endpoint}, open?:{}", + // socket.is_open() + // ); + self.is_listening = true; + + Ok(()) + } + Err(_) => Err(SystemError::EINVAL), + }; + } + + /// # create_new_socket - 创建新的TCP套接字 + /// + /// 该函数用于创建一个新的TCP套接字,并返回该套接字的引用。 + fn create_new_socket() -> tcp::Socket<'static> { + // 初始化tcp的buffer + let rx_buffer = tcp::SocketBuffer::new(vec![0; Self::DEFAULT_RX_BUF_SIZE]); + let tx_buffer = tcp::SocketBuffer::new(vec![0; Self::DEFAULT_TX_BUF_SIZE]); + tcp::Socket::new(rx_buffer, tx_buffer) + } + + /// listening状态的posix socket是需要特殊处理的 + fn tcp_poll_listening(&self) -> EPollEventType { + let socketset_guard = SOCKET_SET.lock_irqsave(); + + let can_accept = self.handles.iter().any(|h| { + if let Some(sh) = h.smoltcp_handle() { + let socket = socketset_guard.get::(sh); + socket.is_active() + } else { + false + } + }); + + if can_accept { + return EPollEventType::EPOLL_LISTEN_CAN_ACCEPT; + } else { + return EPollEventType::empty(); + } + } + + fn sk_setsockopt( + &self, + _socket: &mut tcp::Socket, + _level: SocketOptionsLevel, + optname: PosixSocketOption, + _optval: &[u8], + ) -> Result<(), SystemError> { + use PosixSocketOption::*; + use SystemError::*; + + debug!("[SYS] [TCP] [setsockopt: {:?}]", optname); + + if optname == SO_BINDTODEVICE { + todo!("SO_BINDTODEVICE"); + } + + match optname { + SO_REUSEADDR => { + return Ok(()); + } + SO_TYPE => {} + SO_PROTOCOL => {} + SO_DOMAIN => {} + SO_ERROR => { + return Err(ENOPROTOOPT); + } + + SO_TIMESTAMP_OLD => {} + SO_TIMESTAMP_NEW => {} + SO_TIMESTAMPNS_OLD => {} + SO_TIMESTAMPING_OLD => {} + SO_RCVTIMEO_OLD => {} + SO_SNDTIMEO_OLD => {} + + // if define CONFIG_NET_RX_BUSY_POLL + SO_BUSY_POLL | SO_PREFER_BUSY_POLL | SO_BUSY_POLL_BUDGET => { + debug!("[SYS] [TCP] [setsockopt: {:?}] not supported", optname); + return Err(ENOPROTOOPT); + } + // end if + + opt => { + debug!("[SYS] [TCP] [setsockopt: {:?}] not supported", opt); + return Err(ENOPROTOOPT); + } + } + return Ok(()); + } + + fn do_tcp_setsockopt( + &self, + socket: &mut tcp::Socket, + _level: SocketOptionsLevel, + optname: usize, + optval: &[u8], + ) -> Result<(), SystemError> { + + let boolval = optval[0] != 0; + + match TcpOptions::from_bits_truncate(optname as u32) { + TcpOptions::TCP_CONGESTION => { + todo!("TCP_CONGESTION"); + } + TcpOptions::TCP_QUICKACK => { + if boolval { + socket.set_ack_delay(None); + } else { + socket.set_ack_delay(Some(smoltcp::time::Duration::from_millis(10))); + } + } + TcpOptions::TCP_NODELAY => { + socket.set_nagle_enabled(boolval); + } + TcpOptions::TCP_USER_TIMEOUT => { + let duration = u32::from_ne_bytes(optval.try_into().map_err(|_| SystemError::EINVAL)?) as u64; + socket.set_timeout(Some(smoltcp::time::Duration::from_millis(duration))); + } + TcpOptions::TCP_KEEPINTVL => { + let duration = u32::from_ne_bytes(optval.try_into().map_err(|_| SystemError::EINVAL)?) as u64; + socket.set_keep_alive(Some(smoltcp::time::Duration::from_millis(duration))); + } + // TcpOptions::TCP_NL + _ => { + return Err(SystemError::ENOPROTOOPT); + } + } + return Ok(()); + } + + fn do_ip_setsockopt( + &self, + _level: SocketOptionsLevel, + optname: usize, + _optval: &[u8], + ) -> Result<(), SystemError> { + debug!("ip_setsockopt: optname={}", optname); + match IpOptions::from_bits_truncate(optname as u32) { + IpOptions::IP_LOCAL_PORT_RANGE => {} + _ => {} + } + return Ok(()); + } +} + +impl Socket for TcpSocket { + + fn close(&mut self) { + for handle in self.handles.iter() { + { + let mut socket_set_guard = SOCKET_SET.lock_irqsave(); + let smoltcp_handle = handle.smoltcp_handle().unwrap(); + socket_set_guard + .get_mut::(smoltcp_handle) + .close(); + drop(socket_set_guard); + } + poll_ifaces(); + SOCKET_SET + .lock_irqsave() + .remove(handle.smoltcp_handle().unwrap()); + // debug!("[Socket] [TCP] Close: {:?}", handle); + } + } + + fn read(&self, buf: &mut [u8]) -> (Result, Endpoint) { + if HANDLE_MAP + .read_irqsave() + .get(&self.socket_handle()) + .unwrap() + .shutdown_type() + .contains(ShutdownType::RCV_SHUTDOWN) + { + return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); + } + + let mut socket_set_guard = SOCKET_SET.lock_irqsave(); + + let socket = socket_set_guard + .get_mut::(self.handles.first().unwrap().smoltcp_handle().unwrap()); + + // 如果socket已经关闭,返回错误 + if !socket.is_active() { + // debug!("Tcp Socket Read Error, socket is closed"); + return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); + } + + if socket.may_recv() { + match socket.recv_slice(buf) { + Ok(0) => { + return (Err(SystemError::EAGAIN_OR_EWOULDBLOCK), Endpoint::Ip(None)); + } + Ok(size) => { // size: usize > 0 + let endpoint = if let Some(p) = socket.remote_endpoint() { + p + } else { + return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); + }; + + drop(socket_set_guard); + return (Ok(size), Endpoint::Ip(Some(endpoint))); + } + Err(tcp::RecvError::InvalidState) => { + warn!("Tcp Socket Read Error, InvalidState"); + return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); + } + Err(tcp::RecvError::Finished) => { + // 对端写端已关闭,我们应该关闭读端 + HANDLE_MAP + .write_irqsave() + .get_mut(&self.socket_handle()) + .unwrap() + .shutdown_type_writer() + .insert(ShutdownType::RCV_SHUTDOWN); + return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); + } + } + } else { + return (Err(SystemError::ENOTCONN), Endpoint::Ip(None)); + } + + } + + fn write(&self, buf: &[u8], _to: Option) -> Result { + if HANDLE_MAP + .read_irqsave() + .get(&self.socket_handle()) + .unwrap() + .shutdown_type() + .contains(ShutdownType::RCV_SHUTDOWN) + { + return Err(SystemError::ENOTCONN); + } + // debug!("tcp socket:write, socket'len={}",self.handle.len()); + + let mut socket_set_guard = SOCKET_SET.lock_irqsave(); + + let socket = socket_set_guard + .get_mut::(self.handles.first().unwrap().smoltcp_handle().unwrap()); + + if socket.is_open() { + if socket.can_send() { + match socket.send_slice(buf) { + Ok(size) => { + drop(socket_set_guard); + poll_ifaces(); + return Ok(size); + } + Err(e) => { + error!("Tcp Socket Write Error {e:?}"); + return Err(SystemError::ENOBUFS); + } + } + } else { + return Err(SystemError::ENOBUFS); + } + } + + return Err(SystemError::ENOTCONN); + } + + fn poll(&self) -> EPollEventType { + // 处理listen的快速路径 + if self.is_listening { + return self.tcp_poll_listening(); + } + // 由于上面处理了listening状态,所以这里只处理非listening状态,这种情况下只有一个handle + + assert!(self.handles.len() == 1); + + let mut socket_set_guard = SOCKET_SET.lock_irqsave(); + // debug!("tcp socket:poll, socket'len={}",self.handle.len()); + + let socket = socket_set_guard + .get_mut::(self.handles.first().unwrap().smoltcp_handle().unwrap()); + let handle_map_guard = HANDLE_MAP.read_irqsave(); + let handle_item = handle_map_guard.get(&self.socket_handle()).unwrap(); + let shutdown_type = handle_item.shutdown_type(); + let is_posix_listen = handle_item.is_posix_listen; + drop(handle_map_guard); + + return SocketPollMethod::tcp_poll(socket, shutdown_type, is_posix_listen); + } + + fn connect(&mut self, endpoint: Endpoint) -> Result<(), SystemError> { + let mut sockets = SOCKET_SET.lock_irqsave(); + // debug!("tcp socket:connect, socket'len={}", self.handles.len()); + + let socket = + sockets.get_mut::(self.handles.first().unwrap().smoltcp_handle().unwrap()); + + if let Endpoint::Ip(Some(ip)) = endpoint { + let temp_port = PORT_MANAGER.get_ephemeral_port(self.metadata.socket_type)?; + // 检测端口是否被占用 + PORT_MANAGER.bind_port(self.metadata.socket_type, temp_port)?; + + // debug!("temp_port: {}", temp_port); + let iface: Arc = NET_DEVICES.write_irqsave().get(&0).unwrap().clone(); + let mut inner_iface = iface.smol_iface().lock(); + // debug!("to connect: {ip:?}"); + + match socket.connect(inner_iface.context(), ip, temp_port) { + Ok(()) => { + // avoid deadlock + drop(inner_iface); + drop(iface); + drop(sockets); + loop { + poll_ifaces(); + let mut sockets = SOCKET_SET.lock_irqsave(); + let socket = sockets.get_mut::( + self.handles.first().unwrap().smoltcp_handle().unwrap(), + ); + + match socket.state() { + tcp::State::Established => { + return Ok(()); + } + tcp::State::SynSent => { + drop(sockets); + self.posix_item.sleep(Self::CAN_CONNECT); + } + _ => { + return Err(SystemError::ECONNREFUSED); + } + } + } + } + Err(e) => { + // error!("Tcp Socket Connect Error {e:?}"); + match e { + tcp::ConnectError::InvalidState => return Err(SystemError::EISCONN), + tcp::ConnectError::Unaddressable => return Err(SystemError::EADDRNOTAVAIL), + } + } + } + } else { + return Err(SystemError::EINVAL); + } + } + + /// @brief tcp socket 监听 local_endpoint 端口 + /// + /// @param backlog 未处理的连接队列的最大长度 + fn listen(&mut self, backlog: usize) -> Result<(), SystemError> { + if self.is_listening { + return Ok(()); + } + + // debug!( + // "tcp socket:listen, socket'len={}, backlog = {backlog}", + // self.handles.len() + // ); + + let local_endpoint = self.local_endpoint.ok_or(SystemError::EINVAL)?; + let mut sockets = SOCKET_SET.lock_irqsave(); + // 获取handle的数量 + let handlen = self.handles.len(); + let backlog = handlen.max(backlog); + + // 添加剩余需要构建的socket + // debug!("tcp socket:before listen, socket'len={}", self.handle_list.len()); + let mut handle_guard = HANDLE_MAP.write_irqsave(); + let socket_handle_item_0 = handle_guard.get_mut(&self.socket_handle()).unwrap(); + socket_handle_item_0.is_posix_listen = true; + + self.handles.extend((handlen..backlog).map(|_| { + let socket = Self::create_new_socket(); + let handle = GlobalSocketHandle::new_smoltcp_handle(sockets.add(socket)); + let mut handle_item = SocketHandleItem::new(Arc::downgrade(&self.posix_item)); + handle_item.is_posix_listen = true; + handle_guard.insert(handle, handle_item); + handle + })); + + // debug!("tcp socket:listen, socket'len={}", self.handles.len()); + // debug!("tcp socket:listen, backlog={backlog}"); + + // 监听所有的socket + for i in 0..backlog { + let handle = self.handles.get(i).unwrap(); + + let socket = sockets.get_mut::(handle.smoltcp_handle().unwrap()); + + if !socket.is_listening() { + // debug!("Tcp Socket is already listening on {local_endpoint}"); + self.do_listen(socket, local_endpoint)?; + } + // debug!("Tcp Socket before listen, open={}", socket.is_open()); + } + + return Ok(()); + } + + fn bind(&mut self, endpoint: Endpoint) -> Result<(), SystemError> { + if let Endpoint::Ip(Some(mut ip)) = endpoint { + if ip.port == 0 { + ip.port = PORT_MANAGER.get_ephemeral_port(self.metadata.socket_type)?; + } + + // 检测端口是否已被占用 + PORT_MANAGER.bind_port(self.metadata.socket_type, ip.port)?; + // debug!("tcp socket:bind, socket'len={}",self.handle.len()); + + self.local_endpoint = Some(ip); + self.is_listening = false; + + return Ok(()); + } + return Err(SystemError::EINVAL); + } + + fn shutdown(&mut self, shutdown_type: crate::net::ShutdownType) -> Result<(), SystemError> { + // TODO:目前只是在表层判断,对端不知晓,后续需使用tcp实现 + HANDLE_MAP + .write_irqsave() + .get_mut(&self.socket_handle()) + .unwrap() + .shutdown_type = RwLock::new(shutdown_type); + return Ok(()); + } + + fn accept(&mut self) -> Result<(Box, Endpoint), SystemError> { + if !self.is_listening { + return Err(SystemError::EINVAL); + } + let endpoint = self.local_endpoint.ok_or(SystemError::EINVAL)?; + loop { + // debug!("tcp accept: poll_ifaces()"); + poll_ifaces(); + // debug!("tcp socket:accept, socket'len={}", self.handle_list.len()); + + let mut sockset = SOCKET_SET.lock_irqsave(); + // Get the corresponding activated handler + let global_handle_index = self.handles.iter().position(|handle| { + let con_smol_sock = sockset.get::(handle.smoltcp_handle().unwrap()); + con_smol_sock.is_active() + }); + + if let Some(handle_index) = global_handle_index { + let con_smol_sock = sockset + .get::(self.handles[handle_index].smoltcp_handle().unwrap()); + + // debug!("[Socket] [TCP] Accept: {:?}", handle); + // handle is connected socket's handle + let remote_ep = con_smol_sock + .remote_endpoint() + .ok_or(SystemError::ENOTCONN)?; + + let tcp_socket = Self::create_new_socket(); + + let new_handle = GlobalSocketHandle::new_smoltcp_handle(sockset.add(tcp_socket)); + + // let handle in TcpSock be the new empty handle, and return the old connected handle + let old_handle = core::mem::replace(&mut self.handles[handle_index], new_handle); + + let metadata = SocketMetadata::new( + InetSocketType::Tcp, + Self::DEFAULT_TX_BUF_SIZE, + Self::DEFAULT_RX_BUF_SIZE, + Self::DEFAULT_METADATA_BUF_SIZE, + self.metadata.options, + ); + + let sock_ret = Box::new(TcpSocket { + handles: vec![old_handle], + local_endpoint: self.local_endpoint, + is_listening: false, + metadata, + posix_item: Arc::new(PosixSocketHandleItem::new(None)), + }); + + { + let mut handle_guard = HANDLE_MAP.write_irqsave(); + // 先删除原来的 + let item = handle_guard.remove(&old_handle).unwrap(); + item.reset_shutdown_type(); + assert!(item.is_posix_listen); + + // 按照smoltcp行为,将新的handle绑定到原来的item + let new_item = SocketHandleItem::new(Arc::downgrade(&sock_ret.posix_item)); + handle_guard.insert(old_handle, new_item); + // 插入新的item + handle_guard.insert(new_handle, item); + + let socket = sockset.get_mut::( + self.handles[handle_index].smoltcp_handle().unwrap(), + ); + + if !socket.is_listening() { + self.do_listen(socket, endpoint)?; + } + + drop(handle_guard); + } + + return Ok((sock_ret, Endpoint::Ip(Some(remote_ep)))); + } + + drop(sockset); + + // debug!("[TCP] [Accept] sleeping socket with handle: {:?}", self.handles.first().unwrap().smoltcp_handle().unwrap()); + self.posix_item.sleep(Self::CAN_ACCPET); + // debug!("tcp socket:after sleep, handle_guard'len={}",HANDLE_MAP.write_irqsave().len()); + } + } + + fn endpoint(&self) -> Option { + let mut result: Option = self.local_endpoint.map(|x| Endpoint::Ip(Some(x))); + + if result.is_none() { + let sockets = SOCKET_SET.lock_irqsave(); + // debug!("tcp socket:endpoint, socket'len={}",self.handle.len()); + + let socket = + sockets.get::(self.handles.first().unwrap().smoltcp_handle().unwrap()); + if let Some(ep) = socket.local_endpoint() { + result = Some(Endpoint::Ip(Some(ep))); + } + } + return result; + } + + fn peer_endpoint(&self) -> Option { + let sockets = SOCKET_SET.lock_irqsave(); + // debug!("tcp socket:peer_endpoint, socket'len={}",self.handle.len()); + + let socket = + sockets.get::(self.handles.first().unwrap().smoltcp_handle().unwrap()); + return socket.remote_endpoint().map(|x| Endpoint::Ip(Some(x))); + } + + fn metadata(&self) -> SocketMetadata { + self.metadata.clone() + } + + fn box_clone(&self) -> Box { + Box::new(self.clone()) + } + + fn set_option( + &self, + level: SocketOptionsLevel, + optname: usize, + optval: &[u8], + ) -> Result<(), SystemError> { + + let mut socket_set_guard = SOCKET_SET.lock_irqsave(); + let socket = socket_set_guard.get_mut::(self.handles[0].smoltcp_handle().unwrap()); + + if level == SocketOptionsLevel::SOL_SOCKET { + return self.sk_setsockopt(socket, level, PosixSocketOption::try_from(optname as i32)?, optval); + } + + if level != SocketOptionsLevel::SOL_TCP { + return self.do_ip_setsockopt(level, optname, optval); + } + return self.do_tcp_setsockopt(socket, level, optname, optval); + } + + fn socket_handle(&self) -> GlobalSocketHandle { + // debug!("tcp socket:socket_handle, socket'len={}",self.handle.len()); + + *self.handles.first().unwrap() + } + + fn as_any_ref(&self) -> &dyn core::any::Any { + self + } + + fn as_any_mut(&mut self) -> &mut dyn core::any::Any { + self + } +} + + +bitflags! { + pub struct TcpOptions: u32 { + const TCP_NODELAY = 1; + const TCP_MAXSEG = 2; + const TCP_CORK = 3; + const TCP_KEEPIDLE = 4; + const TCP_KEEPINTVL = 5; + const TCP_KEEPCNT = 6; + const TCP_SYNCNT = 7; + const TCP_LINGER2 = 8; + const TCP_DEFER_ACCEPT = 9; + const TCP_WINDOW_CLAMP = 10; + const TCP_INFO = 11; + const TCP_QUICKACK = 12; + const TCP_CONGESTION = 13; + const TCP_MD5SIG = 14; + const TCP_THIN_LINEAR_TIMEOUTS = 16; + const TCP_THIN_DUPACK = 17; + const TCP_USER_TIMEOUT = 18; + const TCP_REPAIR = 19; + const TCP_REPAIR_QUEUE = 20; + const TCP_QUEUE_SEQ = 21; + const TCP_REPAIR_OPTIONS = 22; + const TCP_FASTOPEN = 23; + const TCP_TIMESTAMP = 24; + const TCP_NOTSENT_LOWAT = 25; + const TCP_CC_INFO = 26; + const TCP_SAVE_SYN = 27; + const TCP_SAVED_SYN = 28; + const TCP_REPAIR_WINDOW = 29; + const TCP_FASTOPEN_CONNECT = 30; + const TCP_ULP = 31; + const TCP_MD5SIG_EXT = 32; + const TCP_FASTOPEN_KEY = 33; + const TCP_FASTOPEN_NO_COOKIE = 34; + const TCP_ZEROCOPY_RECEIVE = 35; + const TCP_INQ = 36; + const TCP_CM_INQ = Self::TCP_INQ.bits(); + const TCP_TX_DELAY = 37; + const TCP_AO_ADD_KEY = 38; + const TCP_AO_DEL_KEY = 39; + const TCP_AO_INFO = 40; + const TCP_AO_GET_KEYS = 41; + const TCP_AO_REPAIR = 42; + } +} \ No newline at end of file diff --git a/kernel/src/net/socket/old/poll_method.rs b/kernel/src/net/socket/old/poll_method.rs new file mode 100644 index 000000000..b2d58c709 --- /dev/null +++ b/kernel/src/net/socket/old/poll_method.rs @@ -0,0 +1,145 @@ + +/// ### 为socket提供无锁的poll方法 +/// +/// 因为在网卡中断中,需要轮询socket的状态,如果使用socket文件或者其inode来poll +/// 在当前的设计,会必然死锁,所以引用这一个设计来解决,提供无🔓的poll +pub struct SocketPollMethod; + +impl SocketPollMethod { + pub fn poll(socket: &socket::Socket, handle_item: &SocketHandleItem) -> EPollEventType { + let shutdown = handle_item.shutdown_type(); + match socket { + socket::Socket::Udp(udp) => Self::udp_poll(udp, shutdown), + socket::Socket::Tcp(tcp) => Self::tcp_poll(tcp, shutdown, handle_item.is_posix_listen), + socket::Socket::Raw(raw) => Self::raw_poll(raw, shutdown), + _ => todo!(), + } + } + + pub fn tcp_poll( + socket: &tcp::Socket, + shutdown: ShutdownType, + is_posix_listen: bool, + ) -> EPollEventType { + let mut events = EPollEventType::empty(); + // debug!("enter tcp_poll! is_posix_listen:{}", is_posix_listen); + // 处理listen的socket + if is_posix_listen { + // 如果是listen的socket,那么只有EPOLLIN和EPOLLRDNORM + if socket.is_active() { + events.insert(EPollEventType::EPOLL_LISTEN_CAN_ACCEPT); + } + + // debug!("tcp_poll listen socket! events:{:?}", events); + return events; + } + + let state = socket.state(); + + if shutdown == ShutdownType::SHUTDOWN_MASK || state == tcp::State::Closed { + events.insert(EPollEventType::EPOLLHUP); + } + + if shutdown.contains(ShutdownType::RCV_SHUTDOWN) { + events.insert( + EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM | EPollEventType::EPOLLRDHUP, + ); + } + + // Connected or passive Fast Open socket? + if state != tcp::State::SynSent && state != tcp::State::SynReceived { + // socket有可读数据 + if socket.can_recv() { + events.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM); + } + + if !(shutdown.contains(ShutdownType::SEND_SHUTDOWN)) { + // 缓冲区可写(这里判断可写的逻辑好像跟linux不太一样) + if socket.send_queue() < socket.send_capacity() { + events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM); + } else { + // TODO:触发缓冲区已满的信号SIGIO + todo!("A signal SIGIO that the buffer is full needs to be sent"); + } + } else { + // 如果我们的socket关闭了SEND_SHUTDOWN,epoll事件就是EPOLLOUT + events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM); + } + } else if state == tcp::State::SynSent { + events.insert(EPollEventType::EPOLLOUT | EPollEventType::EPOLLWRNORM); + } + + // socket发生错误 + // TODO: 这里的逻辑可能有问题,需要进一步验证是否is_active()==false就代表socket发生错误 + if !socket.is_active() { + events.insert(EPollEventType::EPOLLERR); + } + + events + } + + pub fn udp_poll(socket: &udp::Socket, shutdown: ShutdownType) -> EPollEventType { + let mut event = EPollEventType::empty(); + + if shutdown.contains(ShutdownType::RCV_SHUTDOWN) { + event.insert( + EPollEventType::EPOLLRDHUP | EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM, + ); + } + if shutdown.contains(ShutdownType::SHUTDOWN_MASK) { + event.insert(EPollEventType::EPOLLHUP); + } + + if socket.can_recv() { + event.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM); + } + + if socket.can_send() { + event.insert( + EPollEventType::EPOLLOUT + | EPollEventType::EPOLLWRNORM + | EPollEventType::EPOLLWRBAND, + ); + } else { + // TODO: 缓冲区空间不够,需要使用信号处理 + todo!() + } + + return event; + } + + pub fn raw_poll(socket: &raw::Socket, shutdown: ShutdownType) -> EPollEventType { + //debug!("enter raw_poll!"); + let mut event = EPollEventType::empty(); + + if shutdown.contains(ShutdownType::RCV_SHUTDOWN) { + event.insert( + EPollEventType::EPOLLRDHUP | EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM, + ); + } + if shutdown.contains(ShutdownType::SHUTDOWN_MASK) { + event.insert(EPollEventType::EPOLLHUP); + } + + if socket.can_recv() { + //debug!("poll can recv!"); + event.insert(EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM); + } else { + //debug!("poll can not recv!"); + } + + if socket.can_send() { + //debug!("poll can send!"); + event.insert( + EPollEventType::EPOLLOUT + | EPollEventType::EPOLLWRNORM + | EPollEventType::EPOLLWRBAND, + ); + } else { + //debug!("poll can not send!"); + // TODO: 缓冲区空间不够,需要使用信号处理 + todo!() + } + return event; + } +} diff --git a/kernel/src/net/socket/old/raw/mod.rs b/kernel/src/net/socket/old/raw/mod.rs new file mode 100644 index 000000000..1ff58da09 --- /dev/null +++ b/kernel/src/net/socket/old/raw/mod.rs @@ -0,0 +1,375 @@ +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use log::{debug, error, warn}; +use smoltcp::{ + socket::{raw, tcp, udp}, + wire, +}; +use system_error::SystemError::{self, *}; + +use crate::{ + driver::net::Iface, + libs::rwlock::RwLock, + net::{ + event_poll::EPollEventType, net_core::poll_ifaces, syscall::PosixSocketOption, Endpoint, Protocol, NET_DEVICES, SocketOptionsLevel + }, +}; + +use crate::net::socket::{ + handle::GlobalSocketHandle, Socket, SocketMetadata, + Options, SocketPollMethod, ip_def::IpOptions, +}; + +use super::common::{get_iface_to_bind, BoundInner, Types}; + +pub const DEFAULT_METADATA_BUF_SIZE: usize = 1024; +pub const DEFAULT_RX_BUF_SIZE: usize = 64 * 1024; +pub const DEFAULT_TX_BUF_SIZE: usize = 64 * 1024; + +pub const ICMP_FILTER: usize = 1; + +pub type SmolRawSocket = smoltcp::socket::raw::Socket<'static>; + +// #[derive(Debug)] +// pub struct NewRaw { +// version: Option, +// protocol: smoltcp::wire::IpProtocol, +// } + +// impl NewRaw { +// pub fn new(protocol: smoltcp::wire::IpProtocol) -> Self { +// return Self { +// version: None, +// protocol, +// }; +// } +// } + +// #[derive(Debug)] +// pub struct UnboundRaw { +// socket: SmolRawSocket, +// } + +// impl UnboundRaw { +// pub fn new(protocol: smoltcp::wire::IpProtocol) -> Self { +// let rx_buffer = smoltcp::socket::raw::PacketBuffer::new( +// vec![smoltcp::socket::raw::PacketMetadata::EMPTY; DEFAULT_METADATA_BUF_SIZE], +// vec![0; DEFAULT_RX_BUF_SIZE], +// ); + +// let tx_buffer = smoltcp::socket::raw::PacketBuffer::new( +// vec![smoltcp::socket::raw::PacketMetadata::EMPTY; DEFAULT_METADATA_BUF_SIZE], +// vec![0; DEFAULT_TX_BUF_SIZE], +// ); + +// let socket = SmolRawSocket::new( +// smoltcp::wire::IpVersion::Ipv4, +// protocol, +// rx_buffer, +// tx_buffer +// ); + +// return Self { socket }; +// } + +// pub fn bind(self, local_endpoint: smoltcp::wire::IpEndpoint) -> Result { +// Ok( BoundRaw { +// inner: BoundInetInner::bind(self.socket, SocketType::Udp, local_endpoint)?, +// }) +// } +// } + +// #[derive(Debug)] +// pub struct BoundRaw { +// inner: BoundInetInner, +// } + +// impl BoundRaw { +// fn with_mut_socket(&mut self, f: F) -> T +// where +// F: FnMut(&mut SmolRawSocket) -> T, +// { +// self.inner.with_mut(f) +// } + +// #[inline] +// fn try_recv(&mut self, buf: &mut [u8]) -> Result<(usize, smoltcp::wire::IpEndpoint), SystemError> { +// self.with_mut_socket(|socket| { +// if socket.can_recv() { +// if let Ok((size, metadata)) = socket.recv_slice(buf) { +// return Ok((size, metadata.endpoint)); +// } +// } +// return Err(ENOTCONN); +// }) +// } + +// fn try_send(&mut self, buf: &[u8], to: Option) -> Result { +// let remote = to.or(self.inner.remote).ok_or(ENOTCONN)?; + +// let result = self.with_mut_socket(|socket| { +// if socket.can_send() && socket.send_slice(buf, remote).is_ok() { +// return Ok(buf.len()); +// } +// return Err(ENOBUFS); +// }); +// return result; +// } + +// fn close(&mut self) { +// self.with_mut_socket(|socket|{ +// socket.close(); +// }); +// self.inner.iface().port_manager().unbind_port(SocketType::Udp, self.inner.endpoint().port); +// } +// } + +// // Udp Inner 负责其内部资源管理 +// #[derive(Debug)] +// pub enum UdpInner { +// Unbound(UnboundUdp), +// Bound(BoundUdp), +// } + +// // Udp Socket 负责提供状态切换接口、执行状态切换 +// #[derive(Debug)] +// pub struct UdpSocket { +// inner: RwLock>, +// metadata: SocketMetadata, + +// } + +// impl UdpSocket { +// pub fn new(options: SocketOptions) -> Self { +// let metadata = SocketMetadata::new( +// // SocketType::Udp, +// DEFAULT_RX_BUF_SIZE, +// DEFAULT_TX_BUF_SIZE, +// DEFAULT_METADATA_BUF_SIZE, +// options, +// ); +// return Self { +// inner: RwLock::new(None), +// metadata, +// }; +// } +// } + +// /// @brief 表示原始的socket。原始套接字绕过传输层协议(如 TCP 或 UDP)并提供对网络层协议(如 IP)的直接访问。 +// /// +// /// ref: https://man7.org/linux/man-pages/man7/raw.7.html +// #[derive(Debug, Clone)] +// pub struct RawSocket { +// handle: GlobalSocketHandle, +// /// 用户发送的数据包是否包含了IP头. +// /// 如果是true,用户发送的数据包,必须包含IP头。(即用户要自行设置IP头+数据) +// /// 如果是false,用户发送的数据包,不包含IP头。(即用户只要设置数据) +// header_included: bool, +// /// socket的metadata +// metadata: SocketMetadata, +// } + + +// impl RawSocket { + +// /// @brief 创建一个原始的socket +// /// +// /// @param protocol 协议号 +// /// @param options socket的选项 +// /// +// /// @return 返回创建的原始的socket +// pub fn new(protocol: Protocol, options: SocketOptions) -> Self { +// let rx_buffer = raw::PacketBuffer::new( +// vec![raw::PacketMetadata::EMPTY; DEFAULT_METADATA_BUF_SIZE], +// vec![0; DEFAULT_RX_BUF_SIZE], +// ); +// let tx_buffer = raw::PacketBuffer::new( +// vec![raw::PacketMetadata::EMPTY; DEFAULT_METADATA_BUF_SIZE], +// vec![0; DEFAULT_TX_BUF_SIZE], +// ); +// let protocol: u8 = protocol.into(); +// let socket = raw::Socket::new( +// wire::IpVersion::Ipv4, +// wire::IpProtocol::from(protocol), +// rx_buffer, +// tx_buffer, +// ); + +// // 把socket添加到socket集合中,并得到socket的句柄 +// let handle = GlobalSocketHandle::new_smoltcp_handle(SOCKET_SET.lock_irqsave().add(socket)); + +// let metadata = SocketMetadata::new( +// InetSocketType::Raw, +// DEFAULT_RX_BUF_SIZE, +// DEFAULT_TX_BUF_SIZE, +// DEFAULT_METADATA_BUF_SIZE, +// options, +// ); + +// let posix_item = Arc::new(PosixSocketHandleItem::new(None)); + +// return Self { +// handle, +// header_included: false, +// metadata, +// }; +// } +// } + +// impl Socket for RawSocket { + +// fn close(&mut self) { +// let mut socket_set_guard = SOCKET_SET.lock_irqsave(); +// if let smoltcp::socket::Socket::Udp(mut sock) = +// socket_set_guard.remove(self.handle.smoltcp_handle().unwrap()) +// { +// sock.close(); +// } +// drop(socket_set_guard); +// poll_ifaces(); +// } + +// fn read(&self, buf: &mut [u8]) -> (Result, Endpoint) { + +// let mut socket_set_guard = SOCKET_SET.lock_irqsave(); +// let socket = +// socket_set_guard.get_mut::(self.handle.smoltcp_handle().unwrap()); + +// match socket.recv_slice(buf) { +// Ok(len) => { +// let packet = wire::Ipv4Packet::new_unchecked(buf); +// return ( +// Ok(len), +// Endpoint::Ip(Some(wire::IpEndpoint { +// addr: wire::IpAddress::Ipv4(packet.src_addr()), +// port: 0, +// })), +// ); +// } +// Err(_) => { +// return (Err(SystemError::EAGAIN_OR_EWOULDBLOCK), Endpoint::Ip(None)) +// } +// } +// } + +// fn write(&self, buf: &[u8], to: Option) -> Result { +// // 如果用户发送的数据包,包含IP头,则直接发送 +// if self.header_included { +// let mut socket_set_guard = SOCKET_SET.lock_irqsave(); +// let socket = +// socket_set_guard.get_mut::(self.handle.smoltcp_handle().unwrap()); +// match socket.send_slice(buf) { +// Ok(_) => { +// return Ok(buf.len()); +// } +// Err(raw::SendError::BufferFull) => { +// return Err(SystemError::ENOBUFS); +// } +// } +// } else { +// // 如果用户发送的数据包,不包含IP头,则需要自己构造IP头 + +// if let Some(Endpoint::Ip(Some(endpoint))) = to { +// let mut socket_set_guard = SOCKET_SET.lock_irqsave(); +// let socket: &mut raw::Socket = +// socket_set_guard.get_mut::(self.handle.smoltcp_handle().unwrap()); + +// // 暴力解决方案:只考虑0号网卡。 TODO:考虑多网卡的情况!!! +// let iface = NET_DEVICES.read_irqsave().get(&0).unwrap().clone(); + +// // 构造IP头 +// let ipv4_src_addr: Option = +// iface.inner_iface().lock().ipv4_addr(); +// if ipv4_src_addr.is_none() { +// return Err(SystemError::ENETUNREACH); +// } +// let ipv4_src_addr = ipv4_src_addr.unwrap(); + +// if let wire::IpAddress::Ipv4(ipv4_dst) = endpoint.addr { +// let len = buf.len(); + +// // 创建20字节的IPv4头部 +// let mut buffer: Vec = vec![0u8; len + 20]; +// let mut packet: wire::Ipv4Packet<&mut Vec> = +// wire::Ipv4Packet::new_unchecked(&mut buffer); + +// // 封装ipv4 header +// packet.set_version(4); +// packet.set_header_len(20); +// packet.set_total_len((20 + len) as u16); +// packet.set_src_addr(ipv4_src_addr); +// packet.set_dst_addr(ipv4_dst); + +// // 设置ipv4 header的protocol字段 +// packet.set_next_header(socket.ip_protocol()); + +// // 获取IP数据包的负载字段 +// let payload: &mut [u8] = packet.payload_mut(); +// payload.copy_from_slice(buf); + +// // 填充checksum字段 +// packet.fill_checksum(); + +// // 发送数据包 +// socket.send_slice(&buffer).unwrap(); + +// iface.poll().ok(); + +// drop(socket_set_guard); +// return Ok(len); +// } else { +// warn!("Unsupport Ip protocol type!"); +// return Err(SystemError::EINVAL); +// } +// } else { +// // 如果没有指定目的地址,则返回错误 +// return Err(SystemError::ENOTCONN); +// } +// } +// } + +// fn connect(&mut self, _endpoint: Endpoint) -> Result<(), SystemError> { +// Ok(()) +// } + +// fn metadata(&self) -> SocketMetadata { +// self.metadata.clone() +// } + +// fn box_clone(&self) -> Box { +// Box::new(self.clone()) +// } + +// /// @brief 设置socket的选项 +// /// +// /// @param level 选项的层次 +// /// @param optname 选项的名称 +// /// @param optval 选项的值 +// /// +// /// @return 返回设置是否成功, 如果不支持该选项,返回ENOSYS +// /// +// /// ## See +// /// https://code.dragonos.org.cn/s?refs=sk_setsockopt&project=linux-6.6.21 +// fn set_option( +// &self, +// _level: SocketOptionsLevel, +// optname: usize, +// _optval: &[u8], +// ) -> Result<(), SystemError> { +// if optname == ICMP_FILTER { +// todo!("setsockopt ICMP_FILTER"); +// } +// return Err(SystemError::ENOPROTOOPT); +// } + +// fn socket_handle(&self) -> GlobalSocketHandle { +// self.handle +// } + +// fn as_any_ref(&self) -> &dyn core::any::Any { +// self +// } + +// fn as_any_mut(&mut self) -> &mut dyn core::any::Any { +// self +// } +// } \ No newline at end of file diff --git a/kernel/src/net/socket/tcp_def.rs b/kernel/src/net/socket/tcp_def.rs deleted file mode 100644 index 59dacd3cc..000000000 --- a/kernel/src/net/socket/tcp_def.rs +++ /dev/null @@ -1,53 +0,0 @@ - - -bitflags! { - pub struct TcpOptions: u32 { - const TCP_NODELAY = 1; - const TCP_MAXSEG = 2; - const TCP_CORK = 3; - const TCP_KEEPIDLE = 4; - const TCP_KEEPINTVL = 5; - const TCP_KEEPCNT = 6; - const TCP_SYNCNT = 7; - const TCP_LINGER2 = 8; - const TCP_DEFER_ACCEPT = 9; - const TCP_WINDOW_CLAMP = 10; - const TCP_INFO = 11; - const TCP_QUICKACK = 12; - const TCP_CONGESTION = 13; - const TCP_MD5SIG = 14; - const TCP_THIN_LINEAR_TIMEOUTS = 16; - const TCP_THIN_DUPACK = 17; - const TCP_USER_TIMEOUT = 18; - const TCP_REPAIR = 19; - const TCP_REPAIR_QUEUE = 20; - const TCP_QUEUE_SEQ = 21; - const TCP_REPAIR_OPTIONS = 22; - const TCP_FASTOPEN = 23; - const TCP_TIMESTAMP = 24; - const TCP_NOTSENT_LOWAT = 25; - const TCP_CC_INFO = 26; - const TCP_SAVE_SYN = 27; - const TCP_SAVED_SYN = 28; - const TCP_REPAIR_WINDOW = 29; - const TCP_FASTOPEN_CONNECT = 30; - const TCP_ULP = 31; - const TCP_MD5SIG_EXT = 32; - const TCP_FASTOPEN_KEY = 33; - const TCP_FASTOPEN_NO_COOKIE = 34; - const TCP_ZEROCOPY_RECEIVE = 35; - const TCP_INQ = 36; - const TCP_CM_INQ = Self::TCP_INQ.bits(); - const TCP_TX_DELAY = 37; - const TCP_AO_ADD_KEY = 38; - const TCP_AO_DEL_KEY = 39; - const TCP_AO_INFO = 40; - const TCP_AO_GET_KEYS = 41; - const TCP_AO_REPAIR = 42; - } -} - -// // You can then define values with exact meanings like this: -// const TCP_REPAIR_ON: TcpOptions = TcpOptions::from_bits_truncate(1); -// const TCP_REPAIR_OFF: TcpOptions = TcpOptions::from_bits_truncate(0); -// const TCP_REPAIR_OFF_NO_WP: TcpOptions = TcpOptions::from_bits_truncate(-1); \ No newline at end of file diff --git a/kernel/src/net/socket/unix.rs b/kernel/src/net/socket/unix/mod.rs similarity index 92% rename from kernel/src/net/socket/unix.rs rename to kernel/src/net/socket/unix/mod.rs index f15037775..0ec17c72f 100644 --- a/kernel/src/net/socket/unix.rs +++ b/kernel/src/net/socket/unix/mod.rs @@ -4,8 +4,8 @@ use system_error::SystemError; use crate::{libs::spinlock::SpinLock, net::Endpoint}; use super::{ - handle::GlobalSocketHandle, PosixSocketHandleItem, Socket, SocketInode, SocketMetadata, - SocketOptions, SocketType, + handle::GlobalSocketHandle, PosixSocketHandleItem, Socket, inode::SocketInode, SocketMetadata, + Options, InetSocketType, }; #[derive(Debug, Clone)] @@ -27,11 +27,11 @@ impl StreamSocket { /// /// ## 参数 /// - `options`: socket选项 - pub fn new(options: SocketOptions) -> Self { + pub fn new(options: Options) -> Self { let buffer = Arc::new(SpinLock::new(Vec::with_capacity(Self::DEFAULT_BUF_SIZE))); let metadata = SocketMetadata::new( - SocketType::Unix, + InetSocketType::Unix, Self::DEFAULT_BUF_SIZE, Self::DEFAULT_BUF_SIZE, Self::DEFAULT_METADATA_BUF_SIZE, @@ -51,9 +51,7 @@ impl StreamSocket { } impl Socket for StreamSocket { - fn posix_item(&self) -> Arc { - self.posix_item.clone() - } + fn socket_handle(&self) -> GlobalSocketHandle { self.handle } @@ -142,11 +140,11 @@ impl SeqpacketSocket { /// /// ## 参数 /// - `options`: socket选项 - pub fn new(options: SocketOptions) -> Self { + pub fn new(options: Options) -> Self { let buffer = Arc::new(SpinLock::new(Vec::with_capacity(Self::DEFAULT_BUF_SIZE))); let metadata = SocketMetadata::new( - SocketType::Unix, + InetSocketType::Unix, Self::DEFAULT_BUF_SIZE, Self::DEFAULT_BUF_SIZE, Self::DEFAULT_METADATA_BUF_SIZE, @@ -166,9 +164,7 @@ impl SeqpacketSocket { } impl Socket for SeqpacketSocket { - fn posix_item(&self) -> Arc { - self.posix_item.clone() - } + fn close(&mut self) {} fn read(&self, buf: &mut [u8]) -> (Result, Endpoint) { diff --git a/kernel/src/net/syscall.rs b/kernel/src/net/syscall.rs index 44d2971f1..a3f0a6220 100644 --- a/kernel/src/net/syscall.rs +++ b/kernel/src/net/syscall.rs @@ -20,7 +20,7 @@ use crate::{ }; use super::{ - socket::{new_socket, PosixSocketType, Socket, SocketInode}, + socket::{new_unbound_socket, PosixSocketType, Socket, SocketInode}, Endpoint, Protocol, ShutdownType, }; @@ -43,9 +43,9 @@ impl Syscall { let socket_type = PosixSocketType::try_from((socket_type & 0xf) as u8)?; let protocol = Protocol::from(protocol as u8); - let socket = new_socket(address_family, socket_type, protocol)?; + let socket = new_unbound_socket(address_family, socket_type, protocol)?; - let socketinode: Arc = SocketInode::new(socket); + let socketinode: Arc = SocketInode::new(socket, None); let f = File::new(socketinode, FileMode::O_RDWR)?; // 把socket添加到当前进程的文件描述符表中 let binding = ProcessManager::current_pcb().fd_table(); @@ -76,8 +76,8 @@ impl Syscall { let mut fd_table_guard = binding.write(); // 创建一对socket - let inode0 = SocketInode::new(new_socket(address_family, socket_type, protocol)?); - let inode1 = SocketInode::new(new_socket(address_family, socket_type, protocol)?); + let inode0 = SocketInode::new(new_unbound_socket(address_family, socket_type, protocol)?, None); + let inode1 = SocketInode::new(new_unbound_socket(address_family, socket_type, protocol)?, None); // 进行pair unsafe { @@ -116,7 +116,7 @@ impl Syscall { // 获取内层的socket(真正的数据) let socket: SpinLockGuard> = socket_inode.inner(); debug!("setsockopt: level={:?}", level); - return socket.setsockopt(sol, optname, optval).map(|_| 0); + return socket.set_option(sol, optname, optval).map(|_| 0); } /// @brief sys_getsockopt系统调用的实际执行函数 @@ -348,7 +348,7 @@ impl Syscall { .get_socket(fd as i32) .ok_or(SystemError::EBADF)?; let mut socket = unsafe { socket.inner_no_preempt() }; - socket.shutdown(ShutdownType::from_bits_truncate(how as u8))?; + socket.shutdown(ShutdownType::from_bits_truncate((how + 1) as u8))?; return Ok(0); } @@ -415,7 +415,7 @@ impl Syscall { // debug!("accept: new_socket={:?}", new_socket); // Insert the new socket into the file descriptor vector - let new_socket: Arc = SocketInode::new(new_socket); + let new_socket: Arc = SocketInode::new(new_socket, None); let mut file_mode = FileMode::O_RDWR; if flags & SOCK_NONBLOCK.bits() != 0 { @@ -459,12 +459,12 @@ impl Syscall { if addr.is_null() { return Err(SystemError::EINVAL); } - let socket: Arc = ProcessManager::current_pcb() + let endpoint = ProcessManager::current_pcb() .get_socket(fd as i32) - .ok_or(SystemError::EBADF)?; - let socket = socket.inner(); - let endpoint: Endpoint = socket.endpoint().ok_or(SystemError::EINVAL)?; - drop(socket); + .ok_or(SystemError::EBADF)? + .inner() + .endpoint() + .ok_or(SystemError::EINVAL)?; let sockaddr_in = SockAddr::from(endpoint); unsafe { diff --git a/kernel/src/process/mod.rs b/kernel/src/process/mod.rs index c56702a81..1f34c2879 100644 --- a/kernel/src/process/mod.rs +++ b/kernel/src/process/mod.rs @@ -24,15 +24,10 @@ use crate::{ ipc::signal::{AtomicSignal, SigSet, Signal}, process::ArchPCBInfo, CurrentIrqArch, - }, - driver::tty::tty_core::TtyCore, - exception::InterruptArch, - filesystem::{ + }, driver::tty::tty_core::TtyCore, exception::InterruptArch, filesystem::{ procfs::procfs_unregister_pid, vfs::{file::FileDescriptorVec, FileType}, - }, - ipc::signal_types::{SigInfo, SigPending, SignalStruct}, - libs::{ + }, ipc::signal_types::{SigInfo, SigPending, SignalStruct}, libs::{ align::AlignedBox, casting::DowncastArc, futex::{ @@ -43,25 +38,16 @@ use crate::{ rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}, spinlock::{SpinLock, SpinLockGuard}, wait_queue::WaitQueue, - }, - mm::{ + }, mm::{ percpu::{PerCpu, PerCpuVar}, set_IDLE_PROCESS_ADDRESS_SPACE, ucontext::AddressSpace, VirtAddr, - }, - net::socket::SocketInode, - sched::completion::Completion, - sched::{ - cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, - WakeupFlags, __schedule, - }, - smp::{ + }, net::socket::Socket, sched::{completion::Completion, cpu_rq, fair::FairSchedEntity, prio::MAX_PRIO, DequeueFlag, EnqueueFlag, OnRq, SchedMode, WakeupFlags, __schedule}, smp::{ core::smp_get_processor_id, cpu::{AtomicProcessorId, ProcessorId}, kick_cpu, - }, - syscall::{user_access::clear_user, Syscall}, + }, syscall::{user_access::clear_user, Syscall} }; use timer::AlarmTimer; @@ -903,7 +889,7 @@ impl ProcessControlBlock { /// ## 返回值 /// /// Option(&mut Box) socket对象的可变引用. 如果文件描述符不是socket,那么返回None - pub fn get_socket(&self, fd: i32) -> Option> { + pub fn get_socket(&self, fd: i32) -> Option> { let binding = ProcessManager::current_pcb().fd_table(); let fd_table_guard = binding.read(); @@ -913,9 +899,9 @@ impl ProcessControlBlock { if f.file_type() != FileType::Socket { return None; } - let socket: Arc = f + let socket: Arc = f .inode() - .downcast_arc::() + .downcast_arc::() .expect("Not a socket inode"); return Some(socket); } diff --git a/user/apps/test_lo/src/main.rs b/user/apps/test_lo/src/main.rs index 5efc85615..6dd6ed1a1 100644 --- a/user/apps/test_lo/src/main.rs +++ b/user/apps/test_lo/src/main.rs @@ -2,7 +2,13 @@ use std::net::UdpSocket; use std::str; fn main() -> std::io::Result<()> { - let listener = UdpSocket::bind("0.0.0.0:12580")?; + let socket = UdpSocket::bind("10.0.2.15:34254")?; + socket.connect("127.0.0.2:34255")?; + + let listener = UdpSocket::bind("127.0.0.2:34255")?; + + let msg = "Hello, loopback!"; + socket.send(msg.as_bytes())?; let mut buf = [0; 1024]; let (amt, src) = listener.recv_from(&mut buf)?;