From 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 22 Oct 2024 10:36:11 +0000 Subject: [PATCH] 修改4g拨号为QMI,需要在系统里后台执行quectel-CM --- kernel/net/iucv/af_iucv.c | 361 +++++++++++++------------------------------------- 1 files changed, 97 insertions(+), 264 deletions(-) diff --git a/kernel/net/iucv/af_iucv.c b/kernel/net/iucv/af_iucv.c index d59f234..e14368c 100644 --- a/kernel/net/iucv/af_iucv.c +++ b/kernel/net/iucv/af_iucv.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * IUCV protocol stack for Linux on zSeries * @@ -15,6 +16,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/types.h> +#include <linux/limits.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -35,8 +37,6 @@ static char iucv_userid[80]; -static const struct proto_ops iucv_sock_ops; - static struct proto iucv_proto = { .name = "AF_IUCV", .owner = THIS_MODULE, @@ -49,7 +49,7 @@ static const u8 iprm_shutdown[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; -#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) +#define TRGCLS_SIZE sizeof_field(struct iucv_message, class) #define __iucv_sock_wait(sk, condition, timeo, ret) \ do { \ @@ -84,14 +84,11 @@ __ret; \ }) +static struct sock *iucv_accept_dequeue(struct sock *parent, + struct socket *newsock); static void iucv_sock_kill(struct sock *sk); static void iucv_sock_close(struct sock *sk); -static void iucv_sever_path(struct sock *, int); -static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev); -static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, - struct sk_buff *skb, u8 flags); static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify); /* Call Back functions */ @@ -125,110 +122,6 @@ { memcpy(&dst[8], src, 8); } - -static int afiucv_pm_prepare(struct device *dev) -{ -#ifdef CONFIG_PM_DEBUG - printk(KERN_WARNING "afiucv_pm_prepare\n"); -#endif - return 0; -} - -static void afiucv_pm_complete(struct device *dev) -{ -#ifdef CONFIG_PM_DEBUG - printk(KERN_WARNING "afiucv_pm_complete\n"); -#endif -} - -/** - * afiucv_pm_freeze() - Freeze PM callback - * @dev: AFIUCV dummy device - * - * Sever all established IUCV communication pathes - */ -static int afiucv_pm_freeze(struct device *dev) -{ - struct iucv_sock *iucv; - struct sock *sk; - -#ifdef CONFIG_PM_DEBUG - printk(KERN_WARNING "afiucv_pm_freeze\n"); -#endif - read_lock(&iucv_sk_list.lock); - sk_for_each(sk, &iucv_sk_list.head) { - iucv = iucv_sk(sk); - switch (sk->sk_state) { - case IUCV_DISCONN: - case IUCV_CLOSING: - case IUCV_CONNECTED: - iucv_sever_path(sk, 0); - break; - case IUCV_OPEN: - case IUCV_BOUND: - case IUCV_LISTEN: - case IUCV_CLOSED: - default: - break; - } - skb_queue_purge(&iucv->send_skb_q); - skb_queue_purge(&iucv->backlog_skb_q); - } - read_unlock(&iucv_sk_list.lock); - return 0; -} - -/** - * afiucv_pm_restore_thaw() - Thaw and restore PM callback - * @dev: AFIUCV dummy device - * - * socket clean up after freeze - */ -static int afiucv_pm_restore_thaw(struct device *dev) -{ - struct sock *sk; - -#ifdef CONFIG_PM_DEBUG - printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); -#endif - read_lock(&iucv_sk_list.lock); - sk_for_each(sk, &iucv_sk_list.head) { - switch (sk->sk_state) { - case IUCV_CONNECTED: - sk->sk_err = EPIPE; - sk->sk_state = IUCV_DISCONN; - sk->sk_state_change(sk); - break; - case IUCV_DISCONN: - case IUCV_CLOSING: - case IUCV_LISTEN: - case IUCV_BOUND: - case IUCV_OPEN: - default: - break; - } - } - read_unlock(&iucv_sk_list.lock); - return 0; -} - -static const struct dev_pm_ops afiucv_pm_ops = { - .prepare = afiucv_pm_prepare, - .complete = afiucv_pm_complete, - .freeze = afiucv_pm_freeze, - .thaw = afiucv_pm_restore_thaw, - .restore = afiucv_pm_restore_thaw, -}; - -static struct device_driver af_iucv_driver = { - .owner = THIS_MODULE, - .name = "afiucv", - .bus = NULL, - .pm = &afiucv_pm_ops, -}; - -/* dummy device used as trigger for PM functions */ -static struct device *af_iucv_dev; /** * iucv_msg_length() - Returns the length of an iucv message. @@ -321,13 +214,9 @@ struct sk_buff *nskb; int err, confirm_recv = 0; - memset(skb->head, 0, ETH_HLEN); - phs_hdr = skb_push(skb, sizeof(struct af_iucv_trans_hdr)); - skb_reset_mac_header(skb); + phs_hdr = skb_push(skb, sizeof(*phs_hdr)); + memset(phs_hdr, 0, sizeof(*phs_hdr)); skb_reset_network_header(skb); - skb_push(skb, ETH_HLEN); - skb_reset_mac_header(skb); - memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr)); phs_hdr->magic = ETH_P_AF_IUCV; phs_hdr->version = 1; @@ -438,6 +327,20 @@ parent->sk_state = IUCV_CLOSED; } +static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) +{ + write_lock_bh(&l->lock); + sk_add_node(sk, &l->head); + write_unlock_bh(&l->lock); +} + +static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) +{ + write_lock_bh(&l->lock); + sk_del_node_init(sk); + write_unlock_bh(&l->lock); +} + /* Kill socket (only if zapped and orphaned) */ static void iucv_sock_kill(struct sock *sk) { @@ -515,7 +418,9 @@ sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } - case IUCV_DISCONN: /* fall through */ + fallthrough; + + case IUCV_DISCONN: sk->sk_state = IUCV_CLOSING; sk->sk_state_change(sk); @@ -528,8 +433,9 @@ iucv_sock_in_state(sk, IUCV_CLOSED, 0), timeo); } + fallthrough; - case IUCV_CLOSING: /* fall through */ + case IUCV_CLOSING: sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); @@ -538,8 +444,9 @@ skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); + fallthrough; - default: /* fall through */ + default: iucv_sever_path(sk, 1); } @@ -596,7 +503,6 @@ sk->sk_destruct = iucv_sock_destruct; sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; - sk->sk_allocation = GFP_DMA; sock_reset_flag(sk, SOCK_ZAPPED); @@ -607,53 +513,7 @@ return sk; } -/* Create an IUCV socket */ -static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, - int kern) -{ - struct sock *sk; - - if (protocol && protocol != PF_IUCV) - return -EPROTONOSUPPORT; - - sock->state = SS_UNCONNECTED; - - switch (sock->type) { - case SOCK_STREAM: - sock->ops = &iucv_sock_ops; - break; - case SOCK_SEQPACKET: - /* currently, proto ops can handle both sk types */ - sock->ops = &iucv_sock_ops; - break; - default: - return -ESOCKTNOSUPPORT; - } - - sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); - if (!sk) - return -ENOMEM; - - iucv_sock_init(sk, NULL); - - return 0; -} - -void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) -{ - write_lock_bh(&l->lock); - sk_add_node(sk, &l->head); - write_unlock_bh(&l->lock); -} - -void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) -{ - write_lock_bh(&l->lock); - sk_del_node_init(sk); - write_unlock_bh(&l->lock); -} - -void iucv_accept_enqueue(struct sock *parent, struct sock *sk) +static void iucv_accept_enqueue(struct sock *parent, struct sock *sk) { unsigned long flags; struct iucv_sock *par = iucv_sk(parent); @@ -666,7 +526,7 @@ sk_acceptq_added(parent); } -void iucv_accept_unlink(struct sock *sk) +static void iucv_accept_unlink(struct sock *sk) { unsigned long flags; struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); @@ -679,7 +539,8 @@ sock_put(sk); } -struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) +static struct sock *iucv_accept_dequeue(struct sock *parent, + struct socket *newsock) { struct iucv_sock *isk, *n; struct sock *sk; @@ -727,11 +588,11 @@ int addr_len) { struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; + char uid[sizeof(sa->siucv_user_id)]; struct sock *sk = sock->sk; struct iucv_sock *iucv; int err = 0; struct net_device *dev; - char uid[9]; /* Verify the input sockaddr */ if (addr_len < sizeof(struct sockaddr_iucv) || @@ -790,6 +651,7 @@ memcpy(iucv->src_user_id, iucv_userid, 8); sk->sk_state = IUCV_BOUND; iucv->transport = AF_IUCV_TRANS_IUCV; + sk->sk_allocation |= GFP_DMA; if (!iucv->msglimit) iucv->msglimit = IUCV_QUEUELEN_DEFAULT; goto done_unlock; @@ -814,6 +676,8 @@ return -EPROTO; memcpy(iucv->src_user_id, iucv_userid, 8); + iucv->transport = AF_IUCV_TRANS_IUCV; + sk->sk_allocation |= GFP_DMA; write_lock_bh(&iucv_sk_list.lock); __iucv_auto_name(iucv); @@ -1097,7 +961,6 @@ /* initialize defaults */ cmsg_done = 0; /* check for duplicate headers */ - txmsg.class = 0; /* iterate over control messages */ for_each_cmsghdr(cmsg, msg) { @@ -1508,8 +1371,8 @@ return 0; } -__poll_t iucv_sock_poll(struct file *file, struct socket *sock, - poll_table *wait) +static __poll_t iucv_sock_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; @@ -1632,7 +1495,7 @@ /* getsockopt and setsockopt */ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); @@ -1645,7 +1508,7 @@ if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *) optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; rc = 0; @@ -1662,7 +1525,7 @@ switch (sk->sk_state) { case IUCV_OPEN: case IUCV_BOUND: - if (val < 1 || val > (u16)(~0)) + if (val < 1 || val > U16_MAX) rc = -EINVAL; else iucv->msglimit = val; @@ -1791,6 +1654,8 @@ niucv = iucv_sk(nsk); iucv_sock_init(nsk, sk); + niucv->transport = AF_IUCV_TRANS_IUCV; + nsk->sk_allocation |= GFP_DMA; /* Set the new iucv_sock */ memcpy(niucv->dst_name, ipuser + 8, 8); @@ -1884,30 +1749,26 @@ struct sock *sk = path->private; struct sk_buff *this = NULL; struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; - struct sk_buff *list_skb = list->next; + struct sk_buff *list_skb; unsigned long flags; bh_lock_sock(sk); - if (!skb_queue_empty(list)) { - spin_lock_irqsave(&list->lock, flags); - while (list_skb != (struct sk_buff *)list) { - if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { - this = list_skb; - break; - } - list_skb = list_skb->next; + spin_lock_irqsave(&list->lock, flags); + skb_queue_walk(list, list_skb) { + if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { + this = list_skb; + break; } - if (this) - __skb_unlink(this, list); + } + if (this) + __skb_unlink(this, list); + spin_unlock_irqrestore(&list->lock, flags); - spin_unlock_irqrestore(&list->lock, flags); - - if (this) { - kfree_skb(this); - /* wake up any process waiting for sending */ - iucv_sock_wake_msglim(sk); - } + if (this) { + kfree_skb(this); + /* wake up any process waiting for sending */ + iucv_sock_wake_msglim(sk); } if (sk->sk_state == IUCV_CLOSING) { @@ -1953,8 +1814,7 @@ /***************** HiperSockets transport callbacks ********************/ static void afiucv_swap_src_dest(struct sk_buff *skb) { - struct af_iucv_trans_hdr *trans_hdr = - (struct af_iucv_trans_hdr *)skb->data; + struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); char tmpID[8]; char tmpName[8]; @@ -1977,13 +1837,12 @@ **/ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) { + struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); struct sock *nsk; struct iucv_sock *iucv, *niucv; - struct af_iucv_trans_hdr *trans_hdr; int err; iucv = iucv_sk(sk); - trans_hdr = (struct af_iucv_trans_hdr *)skb->data; if (!iucv) { /* no sock - connection refused */ afiucv_swap_src_dest(skb); @@ -2044,15 +1903,13 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); - struct af_iucv_trans_hdr *trans_hdr = - (struct af_iucv_trans_hdr *)skb->data; if (!iucv) goto out; if (sk->sk_state != IUCV_BOUND) goto out; bh_lock_sock(sk); - iucv->msglimit_peer = trans_hdr->window; + iucv->msglimit_peer = iucv_trans_hdr(skb)->window; sk->sk_state = IUCV_CONNECTED; sk->sk_state_change(sk); bh_unlock_sock(sk); @@ -2108,8 +1965,6 @@ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); - struct af_iucv_trans_hdr *trans_hdr = - (struct af_iucv_trans_hdr *)skb->data; if (!iucv) return NET_RX_SUCCESS; @@ -2117,7 +1972,7 @@ if (sk->sk_state != IUCV_CONNECTED) return NET_RX_SUCCESS; - atomic_sub(trans_hdr->window, &iucv->msg_sent); + atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent); iucv_sock_wake_msglim(sk); return NET_RX_SUCCESS; } @@ -2180,22 +2035,12 @@ int err = NET_RX_SUCCESS; char nullstring[8]; - if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { - WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", - (int)skb->len, - (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr))); + if (!pskb_may_pull(skb, sizeof(*trans_hdr))) { kfree_skb(skb); return NET_RX_SUCCESS; } - if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) - if (skb_linearize(skb)) { - WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d", - (int)skb->len); - kfree_skb(skb); - return NET_RX_SUCCESS; - } - skb_pull(skb, ETH_HLEN); - trans_hdr = (struct af_iucv_trans_hdr *)skb->data; + + trans_hdr = iucv_trans_hdr(skb); EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); @@ -2266,10 +2111,10 @@ kfree_skb(skb); break; } - /* fall through and receive non-zero length data */ + fallthrough; /* and receive non-zero length data */ case (AF_IUCV_FLAG_SHT): /* shutdown request */ - /* fall through and receive zero length data */ + fallthrough; /* and receive zero length data */ case 0: /* plain data frame */ IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; @@ -2310,11 +2155,7 @@ list = &iucv->send_skb_q; spin_lock_irqsave(&list->lock, flags); - if (skb_queue_empty(list)) - goto out_unlock; - list_skb = list->next; - nskb = list_skb->next; - while (list_skb != (struct sk_buff *)list) { + skb_queue_walk_safe(list, list_skb, nskb) { if (skb_shinfo(list_skb) == skb_shinfo(skb)) { switch (n) { case TX_NOTIFY_OK: @@ -2347,10 +2188,7 @@ } break; } - list_skb = nskb; - nskb = nskb->next; } -out_unlock: spin_unlock_irqrestore(&list->lock, flags); if (sk->sk_state == IUCV_CLOSING) { @@ -2418,6 +2256,35 @@ .getsockopt = iucv_sock_getsockopt, }; +static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + if (protocol && protocol != PF_IUCV) + return -EPROTONOSUPPORT; + + sock->state = SS_UNCONNECTED; + + switch (sock->type) { + case SOCK_STREAM: + case SOCK_SEQPACKET: + /* currently, proto ops can handle both sk types */ + sock->ops = &iucv_sock_ops; + break; + default: + return -ESOCKTNOSUPPORT; + } + + sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); + if (!sk) + return -ENOMEM; + + iucv_sock_init(sk, NULL); + + return 0; +} + static const struct net_proto_family iucv_sock_family_ops = { .family = AF_IUCV, .owner = THIS_MODULE, @@ -2431,45 +2298,11 @@ static int afiucv_iucv_init(void) { - int err; - - err = pr_iucv->iucv_register(&af_iucv_handler, 0); - if (err) - goto out; - /* establish dummy device */ - af_iucv_driver.bus = pr_iucv->bus; - err = driver_register(&af_iucv_driver); - if (err) - goto out_iucv; - af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); - if (!af_iucv_dev) { - err = -ENOMEM; - goto out_driver; - } - dev_set_name(af_iucv_dev, "af_iucv"); - af_iucv_dev->bus = pr_iucv->bus; - af_iucv_dev->parent = pr_iucv->root; - af_iucv_dev->release = (void (*)(struct device *))kfree; - af_iucv_dev->driver = &af_iucv_driver; - err = device_register(af_iucv_dev); - if (err) - goto out_iucv_dev; - return 0; - -out_iucv_dev: - put_device(af_iucv_dev); -out_driver: - driver_unregister(&af_iucv_driver); -out_iucv: - pr_iucv->iucv_unregister(&af_iucv_handler, 0); -out: - return err; + return pr_iucv->iucv_register(&af_iucv_handler, 0); } static void afiucv_iucv_exit(void) { - device_unregister(af_iucv_dev); - driver_unregister(&af_iucv_driver); pr_iucv->iucv_unregister(&af_iucv_handler, 0); } -- Gitblit v1.6.2