Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: include/linux/netdevice.h net/core/sock.c Trivial merge issues. Removal of "extern" for functions declaration in netdevice.h at the same time "const" was added to an argument. Two parallel line additions in net/core/sock.c Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
53af53ae83
62 changed files with 474 additions and 206 deletions
|
@ -71,7 +71,7 @@ static int netlink_send(int s, struct cn_msg *msg)
|
||||||
nlh->nlmsg_seq = seq++;
|
nlh->nlmsg_seq = seq++;
|
||||||
nlh->nlmsg_pid = getpid();
|
nlh->nlmsg_pid = getpid();
|
||||||
nlh->nlmsg_type = NLMSG_DONE;
|
nlh->nlmsg_type = NLMSG_DONE;
|
||||||
nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
|
nlh->nlmsg_len = size;
|
||||||
nlh->nlmsg_flags = 0;
|
nlh->nlmsg_flags = 0;
|
||||||
|
|
||||||
m = NLMSG_DATA(nlh);
|
m = NLMSG_DATA(nlh);
|
||||||
|
|
|
@ -1785,6 +1785,7 @@ F: include/net/bluetooth/
|
||||||
|
|
||||||
BONDING DRIVER
|
BONDING DRIVER
|
||||||
M: Jay Vosburgh <fubar@us.ibm.com>
|
M: Jay Vosburgh <fubar@us.ibm.com>
|
||||||
|
M: Veaceslav Falico <vfalico@redhat.com>
|
||||||
M: Andy Gospodarek <andy@greyhouse.net>
|
M: Andy Gospodarek <andy@greyhouse.net>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: http://sourceforge.net/projects/bonding/
|
W: http://sourceforge.net/projects/bonding/
|
||||||
|
|
|
@ -930,4 +930,5 @@ void bpf_jit_free(struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
if (fp->bpf_func != sk_run_filter)
|
if (fp->bpf_func != sk_run_filter)
|
||||||
module_free(NULL, fp->bpf_func);
|
module_free(NULL, fp->bpf_func);
|
||||||
|
kfree(fp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -691,4 +691,5 @@ void bpf_jit_free(struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
if (fp->bpf_func != sk_run_filter)
|
if (fp->bpf_func != sk_run_filter)
|
||||||
module_free(NULL, fp->bpf_func);
|
module_free(NULL, fp->bpf_func);
|
||||||
|
kfree(fp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp)
|
||||||
struct bpf_binary_header *header = (void *)addr;
|
struct bpf_binary_header *header = (void *)addr;
|
||||||
|
|
||||||
if (fp->bpf_func == sk_run_filter)
|
if (fp->bpf_func == sk_run_filter)
|
||||||
return;
|
goto free_filter;
|
||||||
set_memory_rw(addr, header->pages);
|
set_memory_rw(addr, header->pages);
|
||||||
module_free(NULL, header);
|
module_free(NULL, header);
|
||||||
|
free_filter:
|
||||||
|
kfree(fp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
if (fp->bpf_func != sk_run_filter)
|
if (fp->bpf_func != sk_run_filter)
|
||||||
module_free(NULL, fp->bpf_func);
|
module_free(NULL, fp->bpf_func);
|
||||||
|
kfree(fp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -772,13 +772,21 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void bpf_jit_free_deferred(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct sk_filter *fp = container_of(work, struct sk_filter, work);
|
||||||
|
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
||||||
|
struct bpf_binary_header *header = (void *)addr;
|
||||||
|
|
||||||
|
set_memory_rw(addr, header->pages);
|
||||||
|
module_free(NULL, header);
|
||||||
|
kfree(fp);
|
||||||
|
}
|
||||||
|
|
||||||
void bpf_jit_free(struct sk_filter *fp)
|
void bpf_jit_free(struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
if (fp->bpf_func != sk_run_filter) {
|
if (fp->bpf_func != sk_run_filter) {
|
||||||
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
INIT_WORK(&fp->work, bpf_jit_free_deferred);
|
||||||
struct bpf_binary_header *header = (void *)addr;
|
schedule_work(&fp->work);
|
||||||
|
|
||||||
set_memory_rw(addr, header->pages);
|
|
||||||
module_free(NULL, header);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
|
||||||
|
|
||||||
msg = (struct cn_msg *)buffer;
|
msg = (struct cn_msg *)buffer;
|
||||||
ev = (struct proc_event *)msg->data;
|
ev = (struct proc_event *)msg->data;
|
||||||
|
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||||
get_seq(&msg->seq, &ev->cpu);
|
get_seq(&msg->seq, &ev->cpu);
|
||||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||||
|
@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
|
||||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||||
msg->ack = 0; /* not used */
|
msg->ack = 0; /* not used */
|
||||||
msg->len = sizeof(*ev);
|
msg->len = sizeof(*ev);
|
||||||
|
msg->flags = 0; /* not used */
|
||||||
/* If cn_netlink_send() failed, the data is not sent */
|
/* If cn_netlink_send() failed, the data is not sent */
|
||||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
|
||||||
|
|
||||||
msg = (struct cn_msg *)buffer;
|
msg = (struct cn_msg *)buffer;
|
||||||
ev = (struct proc_event *)msg->data;
|
ev = (struct proc_event *)msg->data;
|
||||||
|
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||||
get_seq(&msg->seq, &ev->cpu);
|
get_seq(&msg->seq, &ev->cpu);
|
||||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||||
|
@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
|
||||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||||
msg->ack = 0; /* not used */
|
msg->ack = 0; /* not used */
|
||||||
msg->len = sizeof(*ev);
|
msg->len = sizeof(*ev);
|
||||||
|
msg->flags = 0; /* not used */
|
||||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
|
||||||
|
|
||||||
msg = (struct cn_msg *)buffer;
|
msg = (struct cn_msg *)buffer;
|
||||||
ev = (struct proc_event *)msg->data;
|
ev = (struct proc_event *)msg->data;
|
||||||
|
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||||
ev->what = which_id;
|
ev->what = which_id;
|
||||||
ev->event_data.id.process_pid = task->pid;
|
ev->event_data.id.process_pid = task->pid;
|
||||||
ev->event_data.id.process_tgid = task->tgid;
|
ev->event_data.id.process_tgid = task->tgid;
|
||||||
|
@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
|
||||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||||
msg->ack = 0; /* not used */
|
msg->ack = 0; /* not used */
|
||||||
msg->len = sizeof(*ev);
|
msg->len = sizeof(*ev);
|
||||||
|
msg->flags = 0; /* not used */
|
||||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
|
||||||
|
|
||||||
msg = (struct cn_msg *)buffer;
|
msg = (struct cn_msg *)buffer;
|
||||||
ev = (struct proc_event *)msg->data;
|
ev = (struct proc_event *)msg->data;
|
||||||
|
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||||
get_seq(&msg->seq, &ev->cpu);
|
get_seq(&msg->seq, &ev->cpu);
|
||||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||||
|
@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
|
||||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||||
msg->ack = 0; /* not used */
|
msg->ack = 0; /* not used */
|
||||||
msg->len = sizeof(*ev);
|
msg->len = sizeof(*ev);
|
||||||
|
msg->flags = 0; /* not used */
|
||||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
|
||||||
|
|
||||||
msg = (struct cn_msg *)buffer;
|
msg = (struct cn_msg *)buffer;
|
||||||
ev = (struct proc_event *)msg->data;
|
ev = (struct proc_event *)msg->data;
|
||||||
|
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||||
get_seq(&msg->seq, &ev->cpu);
|
get_seq(&msg->seq, &ev->cpu);
|
||||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||||
|
@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
|
||||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||||
msg->ack = 0; /* not used */
|
msg->ack = 0; /* not used */
|
||||||
msg->len = sizeof(*ev);
|
msg->len = sizeof(*ev);
|
||||||
|
msg->flags = 0; /* not used */
|
||||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
|
||||||
|
|
||||||
msg = (struct cn_msg *)buffer;
|
msg = (struct cn_msg *)buffer;
|
||||||
ev = (struct proc_event *)msg->data;
|
ev = (struct proc_event *)msg->data;
|
||||||
|
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||||
get_seq(&msg->seq, &ev->cpu);
|
get_seq(&msg->seq, &ev->cpu);
|
||||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||||
|
@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
|
||||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||||
msg->ack = 0; /* not used */
|
msg->ack = 0; /* not used */
|
||||||
msg->len = sizeof(*ev);
|
msg->len = sizeof(*ev);
|
||||||
|
msg->flags = 0; /* not used */
|
||||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
|
||||||
|
|
||||||
msg = (struct cn_msg *)buffer;
|
msg = (struct cn_msg *)buffer;
|
||||||
ev = (struct proc_event *)msg->data;
|
ev = (struct proc_event *)msg->data;
|
||||||
|
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||||
get_seq(&msg->seq, &ev->cpu);
|
get_seq(&msg->seq, &ev->cpu);
|
||||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||||
|
@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
|
||||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||||
msg->ack = 0; /* not used */
|
msg->ack = 0; /* not used */
|
||||||
msg->len = sizeof(*ev);
|
msg->len = sizeof(*ev);
|
||||||
|
msg->flags = 0; /* not used */
|
||||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
|
||||||
|
|
||||||
msg = (struct cn_msg *)buffer;
|
msg = (struct cn_msg *)buffer;
|
||||||
ev = (struct proc_event *)msg->data;
|
ev = (struct proc_event *)msg->data;
|
||||||
|
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||||
get_seq(&msg->seq, &ev->cpu);
|
get_seq(&msg->seq, &ev->cpu);
|
||||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||||
|
@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
|
||||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||||
msg->ack = 0; /* not used */
|
msg->ack = 0; /* not used */
|
||||||
msg->len = sizeof(*ev);
|
msg->len = sizeof(*ev);
|
||||||
|
msg->flags = 0; /* not used */
|
||||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
|
||||||
|
|
||||||
msg = (struct cn_msg *)buffer;
|
msg = (struct cn_msg *)buffer;
|
||||||
ev = (struct proc_event *)msg->data;
|
ev = (struct proc_event *)msg->data;
|
||||||
|
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||||
msg->seq = rcvd_seq;
|
msg->seq = rcvd_seq;
|
||||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||||
|
@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
|
||||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||||
msg->ack = rcvd_ack + 1;
|
msg->ack = rcvd_ack + 1;
|
||||||
msg->len = sizeof(*ev);
|
msg->len = sizeof(*ev);
|
||||||
|
msg->flags = 0; /* not used */
|
||||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
|
||||||
|
|
||||||
data = nlmsg_data(nlh);
|
data = nlmsg_data(nlh);
|
||||||
|
|
||||||
memcpy(data, msg, sizeof(*data) + msg->len);
|
memcpy(data, msg, size);
|
||||||
|
|
||||||
NETLINK_CB(skb).dst_group = group;
|
NETLINK_CB(skb).dst_group = group;
|
||||||
|
|
||||||
|
@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
|
||||||
static void cn_rx_skb(struct sk_buff *__skb)
|
static void cn_rx_skb(struct sk_buff *__skb)
|
||||||
{
|
{
|
||||||
struct nlmsghdr *nlh;
|
struct nlmsghdr *nlh;
|
||||||
int err;
|
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
int len, err;
|
||||||
|
|
||||||
skb = skb_get(__skb);
|
skb = skb_get(__skb);
|
||||||
|
|
||||||
if (skb->len >= NLMSG_HDRLEN) {
|
if (skb->len >= NLMSG_HDRLEN) {
|
||||||
nlh = nlmsg_hdr(skb);
|
nlh = nlmsg_hdr(skb);
|
||||||
|
len = nlmsg_len(nlh);
|
||||||
|
|
||||||
if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
|
if (len < (int)sizeof(struct cn_msg) ||
|
||||||
skb->len < nlh->nlmsg_len ||
|
skb->len < nlh->nlmsg_len ||
|
||||||
nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
|
len > CONNECTOR_MAX_MSG_SIZE) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
|
size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
|
||||||
size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
|
size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
|
||||||
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
|
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
|
||||||
size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
|
size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
|
||||||
size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
|
size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
|
||||||
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
|
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
|
||||||
size += sizeof(struct can_berr_counter);
|
size += nla_total_size(sizeof(struct can_berr_counter));
|
||||||
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
|
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
|
||||||
size += sizeof(struct can_bittiming_const);
|
size += nla_total_size(sizeof(struct can_bittiming_const));
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,7 +106,6 @@
|
||||||
#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
|
#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
|
||||||
|
|
||||||
#define XGMAC_ADDR_AE 0x80000000
|
#define XGMAC_ADDR_AE 0x80000000
|
||||||
#define XGMAC_MAX_FILTER_ADDR 31
|
|
||||||
|
|
||||||
/* PMT Control and Status */
|
/* PMT Control and Status */
|
||||||
#define XGMAC_PMT_POINTER_RESET 0x80000000
|
#define XGMAC_PMT_POINTER_RESET 0x80000000
|
||||||
|
@ -384,6 +383,7 @@ struct xgmac_priv {
|
||||||
struct device *device;
|
struct device *device;
|
||||||
struct napi_struct napi;
|
struct napi_struct napi;
|
||||||
|
|
||||||
|
int max_macs;
|
||||||
struct xgmac_extra_stats xstats;
|
struct xgmac_extra_stats xstats;
|
||||||
|
|
||||||
spinlock_t stats_lock;
|
spinlock_t stats_lock;
|
||||||
|
@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
|
||||||
netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
|
netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
|
||||||
netdev_mc_count(dev), netdev_uc_count(dev));
|
netdev_mc_count(dev), netdev_uc_count(dev));
|
||||||
|
|
||||||
if (dev->flags & IFF_PROMISC) {
|
if (dev->flags & IFF_PROMISC)
|
||||||
writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
|
value |= XGMAC_FRAME_FILTER_PR;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(hash_filter, 0, sizeof(hash_filter));
|
memset(hash_filter, 0, sizeof(hash_filter));
|
||||||
|
|
||||||
if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
|
if (netdev_uc_count(dev) > priv->max_macs) {
|
||||||
use_hash = true;
|
use_hash = true;
|
||||||
value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
|
value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
|
||||||
}
|
}
|
||||||
|
@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
|
if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
|
||||||
use_hash = true;
|
use_hash = true;
|
||||||
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
|
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
|
for (i = reg; i <= priv->max_macs; i++)
|
||||||
xgmac_set_mac_addr(ioaddr, NULL, reg);
|
xgmac_set_mac_addr(ioaddr, NULL, i);
|
||||||
for (i = 0; i < XGMAC_NUM_HASH; i++)
|
for (i = 0; i < XGMAC_NUM_HASH; i++)
|
||||||
writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
|
writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
|
||||||
|
|
||||||
|
@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
|
||||||
uid = readl(priv->base + XGMAC_VERSION);
|
uid = readl(priv->base + XGMAC_VERSION);
|
||||||
netdev_info(ndev, "h/w version is 0x%x\n", uid);
|
netdev_info(ndev, "h/w version is 0x%x\n", uid);
|
||||||
|
|
||||||
|
/* Figure out how many valid mac address filter registers we have */
|
||||||
|
writel(1, priv->base + XGMAC_ADDR_HIGH(31));
|
||||||
|
if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
|
||||||
|
priv->max_macs = 31;
|
||||||
|
else
|
||||||
|
priv->max_macs = 7;
|
||||||
|
|
||||||
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
|
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
|
||||||
ndev->irq = platform_get_irq(pdev, 0);
|
ndev->irq = platform_get_irq(pdev, 0);
|
||||||
if (ndev->irq == -ENXIO) {
|
if (ndev->irq == -ENXIO) {
|
||||||
|
|
|
@ -2657,6 +2657,8 @@ static int igb_set_eee(struct net_device *netdev,
|
||||||
(hw->phy.media_type != e1000_media_type_copper))
|
(hw->phy.media_type != e1000_media_type_copper))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
memset(&eee_curr, 0, sizeof(struct ethtool_eee));
|
||||||
|
|
||||||
ret_val = igb_get_eee(netdev, &eee_curr);
|
ret_val = igb_get_eee(netdev, &eee_curr);
|
||||||
if (ret_val)
|
if (ret_val)
|
||||||
return ret_val;
|
return ret_val;
|
||||||
|
|
|
@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
|
||||||
p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
|
p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
|
||||||
p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
|
p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
|
||||||
spin_unlock_bh(&mp->mib_counters_lock);
|
spin_unlock_bh(&mp->mib_counters_lock);
|
||||||
|
|
||||||
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mib_counters_timer_wrapper(unsigned long _mp)
|
static void mib_counters_timer_wrapper(unsigned long _mp)
|
||||||
{
|
{
|
||||||
struct mv643xx_eth_private *mp = (void *)_mp;
|
struct mv643xx_eth_private *mp = (void *)_mp;
|
||||||
|
|
||||||
mib_counters_update(mp);
|
mib_counters_update(mp);
|
||||||
|
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
|
||||||
mp->int_mask |= INT_TX_END_0 << i;
|
mp->int_mask |= INT_TX_END_0 << i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
add_timer(&mp->mib_counters_timer);
|
||||||
port_start(mp);
|
port_start(mp);
|
||||||
|
|
||||||
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
|
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
|
||||||
|
@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
|
||||||
if (!ppdev)
|
if (!ppdev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||||||
|
ppdev->dev.of_node = pnp;
|
||||||
|
|
||||||
ret = platform_device_add_resources(ppdev, &res, 1);
|
ret = platform_device_add_resources(ppdev, &res, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
|
||||||
mp->mib_counters_timer.data = (unsigned long)mp;
|
mp->mib_counters_timer.data = (unsigned long)mp;
|
||||||
mp->mib_counters_timer.function = mib_counters_timer_wrapper;
|
mp->mib_counters_timer.function = mib_counters_timer_wrapper;
|
||||||
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
|
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
|
||||||
add_timer(&mp->mib_counters_timer);
|
|
||||||
|
|
||||||
spin_lock_init(&mp->mib_counters_lock);
|
spin_lock_init(&mp->mib_counters_lock);
|
||||||
|
|
||||||
|
|
|
@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
|
||||||
put_page(page);
|
put_page(page);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
page_alloc->size = PAGE_SIZE << order;
|
page_alloc->page_size = PAGE_SIZE << order;
|
||||||
page_alloc->page = page;
|
page_alloc->page = page;
|
||||||
page_alloc->dma = dma;
|
page_alloc->dma = dma;
|
||||||
page_alloc->offset = frag_info->frag_align;
|
page_alloc->page_offset = frag_info->frag_align;
|
||||||
/* Not doing get_page() for each frag is a big win
|
/* Not doing get_page() for each frag is a big win
|
||||||
* on asymetric workloads.
|
* on asymetric workloads.
|
||||||
*/
|
*/
|
||||||
atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
|
atomic_set(&page->_count,
|
||||||
|
page_alloc->page_size / frag_info->frag_stride);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
|
||||||
for (i = 0; i < priv->num_frags; i++) {
|
for (i = 0; i < priv->num_frags; i++) {
|
||||||
frag_info = &priv->frag_info[i];
|
frag_info = &priv->frag_info[i];
|
||||||
page_alloc[i] = ring_alloc[i];
|
page_alloc[i] = ring_alloc[i];
|
||||||
page_alloc[i].offset += frag_info->frag_stride;
|
page_alloc[i].page_offset += frag_info->frag_stride;
|
||||||
if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
|
|
||||||
|
if (page_alloc[i].page_offset + frag_info->frag_stride <=
|
||||||
|
ring_alloc[i].page_size)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
|
if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < priv->num_frags; i++) {
|
for (i = 0; i < priv->num_frags; i++) {
|
||||||
frags[i] = ring_alloc[i];
|
frags[i] = ring_alloc[i];
|
||||||
dma = ring_alloc[i].dma + ring_alloc[i].offset;
|
dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
|
||||||
ring_alloc[i] = page_alloc[i];
|
ring_alloc[i] = page_alloc[i];
|
||||||
rx_desc->data[i].addr = cpu_to_be64(dma);
|
rx_desc->data[i].addr = cpu_to_be64(dma);
|
||||||
}
|
}
|
||||||
|
@ -117,7 +121,7 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
|
||||||
frag_info = &priv->frag_info[i];
|
frag_info = &priv->frag_info[i];
|
||||||
if (page_alloc[i].page != ring_alloc[i].page) {
|
if (page_alloc[i].page != ring_alloc[i].page) {
|
||||||
dma_unmap_page(priv->ddev, page_alloc[i].dma,
|
dma_unmap_page(priv->ddev, page_alloc[i].dma,
|
||||||
page_alloc[i].size, PCI_DMA_FROMDEVICE);
|
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
|
||||||
page = page_alloc[i].page;
|
page = page_alloc[i].page;
|
||||||
atomic_set(&page->_count, 1);
|
atomic_set(&page->_count, 1);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
|
@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
|
||||||
int i)
|
int i)
|
||||||
{
|
{
|
||||||
const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
|
const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
|
||||||
|
u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
|
||||||
|
|
||||||
if (frags[i].offset + frag_info->frag_stride > frags[i].size)
|
|
||||||
dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
|
if (next_frag_end > frags[i].page_size)
|
||||||
PCI_DMA_FROMDEVICE);
|
dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
|
||||||
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
if (frags[i].page)
|
if (frags[i].page)
|
||||||
put_page(frags[i].page);
|
put_page(frags[i].page);
|
||||||
|
@ -161,7 +167,7 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
|
||||||
|
|
||||||
page_alloc = &ring->page_alloc[i];
|
page_alloc = &ring->page_alloc[i];
|
||||||
dma_unmap_page(priv->ddev, page_alloc->dma,
|
dma_unmap_page(priv->ddev, page_alloc->dma,
|
||||||
page_alloc->size, PCI_DMA_FROMDEVICE);
|
page_alloc->page_size, PCI_DMA_FROMDEVICE);
|
||||||
page = page_alloc->page;
|
page = page_alloc->page;
|
||||||
atomic_set(&page->_count, 1);
|
atomic_set(&page->_count, 1);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
|
@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
|
||||||
i, page_count(page_alloc->page));
|
i, page_count(page_alloc->page));
|
||||||
|
|
||||||
dma_unmap_page(priv->ddev, page_alloc->dma,
|
dma_unmap_page(priv->ddev, page_alloc->dma,
|
||||||
page_alloc->size, PCI_DMA_FROMDEVICE);
|
page_alloc->page_size, PCI_DMA_FROMDEVICE);
|
||||||
while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
|
while (page_alloc->page_offset + frag_info->frag_stride <
|
||||||
|
page_alloc->page_size) {
|
||||||
put_page(page_alloc->page);
|
put_page(page_alloc->page);
|
||||||
page_alloc->offset += frag_info->frag_stride;
|
page_alloc->page_offset += frag_info->frag_stride;
|
||||||
}
|
}
|
||||||
page_alloc->page = NULL;
|
page_alloc->page = NULL;
|
||||||
}
|
}
|
||||||
|
@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
|
||||||
/* Save page reference in skb */
|
/* Save page reference in skb */
|
||||||
__skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
|
__skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
|
||||||
skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
|
skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
|
||||||
skb_frags_rx[nr].page_offset = frags[nr].offset;
|
skb_frags_rx[nr].page_offset = frags[nr].page_offset;
|
||||||
skb->truesize += frag_info->frag_stride;
|
skb->truesize += frag_info->frag_stride;
|
||||||
frags[nr].page = NULL;
|
frags[nr].page = NULL;
|
||||||
}
|
}
|
||||||
|
@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
|
||||||
|
|
||||||
/* Get pointer to first fragment so we could copy the headers into the
|
/* Get pointer to first fragment so we could copy the headers into the
|
||||||
* (linear part of the) skb */
|
* (linear part of the) skb */
|
||||||
va = page_address(frags[0].page) + frags[0].offset;
|
va = page_address(frags[0].page) + frags[0].page_offset;
|
||||||
|
|
||||||
if (length <= SMALL_PACKET_SIZE) {
|
if (length <= SMALL_PACKET_SIZE) {
|
||||||
/* We are copying all relevant data to the skb - temporarily
|
/* We are copying all relevant data to the skb - temporarily
|
||||||
|
@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
||||||
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
|
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
ethh = (struct ethhdr *)(page_address(frags[0].page) +
|
ethh = (struct ethhdr *)(page_address(frags[0].page) +
|
||||||
frags[0].offset);
|
frags[0].page_offset);
|
||||||
|
|
||||||
if (is_multicast_ether_addr(ethh->h_dest)) {
|
if (is_multicast_ether_addr(ethh->h_dest)) {
|
||||||
struct mlx4_mac_entry *entry;
|
struct mlx4_mac_entry *entry;
|
||||||
|
|
|
@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
|
||||||
struct mlx4_en_rx_alloc {
|
struct mlx4_en_rx_alloc {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
u32 offset;
|
u32 page_offset;
|
||||||
u32 size;
|
u32 page_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx4_en_tx_ring {
|
struct mlx4_en_tx_ring {
|
||||||
|
|
|
@ -448,7 +448,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
|
||||||
irq = irq_of_parse_and_map(node, 0);
|
irq = irq_of_parse_and_map(node, 0);
|
||||||
if (irq <= 0) {
|
if (irq <= 0) {
|
||||||
netdev_err(ndev, "irq_of_parse_and_map failed\n");
|
netdev_err(ndev, "irq_of_parse_and_map failed\n");
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto irq_map_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv = netdev_priv(ndev);
|
priv = netdev_priv(ndev);
|
||||||
|
@ -472,24 +473,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
|
||||||
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
|
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
|
||||||
TX_DESC_NUM, &priv->tx_base,
|
TX_DESC_NUM, &priv->tx_base,
|
||||||
GFP_DMA | GFP_KERNEL);
|
GFP_DMA | GFP_KERNEL);
|
||||||
if (priv->tx_desc_base == NULL)
|
if (priv->tx_desc_base == NULL) {
|
||||||
|
ret = -ENOMEM;
|
||||||
goto init_fail;
|
goto init_fail;
|
||||||
|
}
|
||||||
|
|
||||||
priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
|
priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
|
||||||
RX_DESC_NUM, &priv->rx_base,
|
RX_DESC_NUM, &priv->rx_base,
|
||||||
GFP_DMA | GFP_KERNEL);
|
GFP_DMA | GFP_KERNEL);
|
||||||
if (priv->rx_desc_base == NULL)
|
if (priv->rx_desc_base == NULL) {
|
||||||
|
ret = -ENOMEM;
|
||||||
goto init_fail;
|
goto init_fail;
|
||||||
|
}
|
||||||
|
|
||||||
priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
|
priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!priv->tx_buf_base)
|
if (!priv->tx_buf_base) {
|
||||||
|
ret = -ENOMEM;
|
||||||
goto init_fail;
|
goto init_fail;
|
||||||
|
}
|
||||||
|
|
||||||
priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
|
priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!priv->rx_buf_base)
|
if (!priv->rx_buf_base) {
|
||||||
|
ret = -ENOMEM;
|
||||||
goto init_fail;
|
goto init_fail;
|
||||||
|
}
|
||||||
|
|
||||||
platform_set_drvdata(pdev, ndev);
|
platform_set_drvdata(pdev, ndev);
|
||||||
|
|
||||||
|
@ -522,7 +531,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
|
||||||
init_fail:
|
init_fail:
|
||||||
netdev_err(ndev, "init failed\n");
|
netdev_err(ndev, "init failed\n");
|
||||||
moxart_mac_free_memory(ndev);
|
moxart_mac_free_memory(ndev);
|
||||||
|
irq_map_fail:
|
||||||
|
free_netdev(ndev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2257,7 +2257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
|
|
||||||
err = qlcnic_alloc_adapter_resources(adapter);
|
err = qlcnic_alloc_adapter_resources(adapter);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out_free_netdev;
|
goto err_out_free_wq;
|
||||||
|
|
||||||
adapter->dev_rst_time = jiffies;
|
adapter->dev_rst_time = jiffies;
|
||||||
adapter->ahw->revision_id = pdev->revision;
|
adapter->ahw->revision_id = pdev->revision;
|
||||||
|
@ -2396,6 +2396,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
err_out_free_hw:
|
err_out_free_hw:
|
||||||
qlcnic_free_adapter_resources(adapter);
|
qlcnic_free_adapter_resources(adapter);
|
||||||
|
|
||||||
|
err_out_free_wq:
|
||||||
|
destroy_workqueue(adapter->qlcnic_wq);
|
||||||
|
|
||||||
err_out_free_netdev:
|
err_out_free_netdev:
|
||||||
free_netdev(netdev);
|
free_netdev(netdev);
|
||||||
|
|
||||||
|
|
|
@ -620,12 +620,16 @@ static struct sh_eth_cpu_data sh7734_data = {
|
||||||
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
|
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
|
||||||
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
|
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
|
||||||
EESR_TDE | EESR_ECI,
|
EESR_TDE | EESR_ECI,
|
||||||
|
.fdr_value = 0x0000070f,
|
||||||
|
.rmcr_value = 0x00000001,
|
||||||
|
|
||||||
.apr = 1,
|
.apr = 1,
|
||||||
.mpr = 1,
|
.mpr = 1,
|
||||||
.tpauser = 1,
|
.tpauser = 1,
|
||||||
.bculr = 1,
|
.bculr = 1,
|
||||||
.hw_swap = 1,
|
.hw_swap = 1,
|
||||||
|
.rpadir = 1,
|
||||||
|
.rpadir_value = 2 << 16,
|
||||||
.no_trimd = 1,
|
.no_trimd = 1,
|
||||||
.no_ade = 1,
|
.no_ade = 1,
|
||||||
.tsu = 1,
|
.tsu = 1,
|
||||||
|
|
|
@ -754,6 +754,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
|
||||||
EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
|
EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
|
||||||
EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
|
EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
|
||||||
EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
|
EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
|
||||||
|
EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
|
||||||
|
EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
|
||||||
|
EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
|
||||||
|
EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
|
||||||
|
EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
|
||||||
|
EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
|
||||||
|
EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
|
||||||
|
EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
|
||||||
|
EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
|
||||||
|
EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
|
||||||
|
EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
|
||||||
|
EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
|
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
|
||||||
|
@ -808,44 +820,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
|
||||||
#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
|
#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
|
||||||
(1ULL << EF10_STAT_rx_length_error))
|
(1ULL << EF10_STAT_rx_length_error))
|
||||||
|
|
||||||
#if BITS_PER_LONG == 64
|
/* These statistics are only provided if the firmware supports the
|
||||||
#define STAT_MASK_BITMAP(bits) (bits)
|
* capability PM_AND_RXDP_COUNTERS.
|
||||||
#else
|
*/
|
||||||
#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
|
#define HUNT_PM_AND_RXDP_STAT_MASK ( \
|
||||||
#endif
|
(1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
|
||||||
|
(1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
|
||||||
|
(1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
|
||||||
|
(1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
|
||||||
|
(1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
|
||||||
|
(1ULL << EF10_STAT_rx_pm_discard_qbb) | \
|
||||||
|
(1ULL << EF10_STAT_rx_pm_discard_mapping) | \
|
||||||
|
(1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
|
||||||
|
(1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
|
||||||
|
(1ULL << EF10_STAT_rx_dp_streaming_packets) | \
|
||||||
|
(1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
|
||||||
|
(1ULL << EF10_STAT_rx_dp_emerg_wait))
|
||||||
|
|
||||||
static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
|
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
|
||||||
{
|
{
|
||||||
static const unsigned long hunt_40g_stat_mask[] = {
|
u64 raw_mask = HUNT_COMMON_STAT_MASK;
|
||||||
STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
|
|
||||||
HUNT_40G_EXTRA_STAT_MASK)
|
|
||||||
};
|
|
||||||
static const unsigned long hunt_10g_only_stat_mask[] = {
|
|
||||||
STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
|
|
||||||
HUNT_10G_ONLY_STAT_MASK)
|
|
||||||
};
|
|
||||||
u32 port_caps = efx_mcdi_phy_get_caps(efx);
|
u32 port_caps = efx_mcdi_phy_get_caps(efx);
|
||||||
|
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||||
|
|
||||||
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
|
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
|
||||||
return hunt_40g_stat_mask;
|
raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
|
||||||
else
|
else
|
||||||
return hunt_10g_only_stat_mask;
|
raw_mask |= HUNT_10G_ONLY_STAT_MASK;
|
||||||
|
|
||||||
|
if (nic_data->datapath_caps &
|
||||||
|
(1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
|
||||||
|
raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
|
||||||
|
|
||||||
|
return raw_mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
|
||||||
|
{
|
||||||
|
u64 raw_mask = efx_ef10_raw_stat_mask(efx);
|
||||||
|
|
||||||
|
#if BITS_PER_LONG == 64
|
||||||
|
mask[0] = raw_mask;
|
||||||
|
#else
|
||||||
|
mask[0] = raw_mask & 0xffffffff;
|
||||||
|
mask[1] = raw_mask >> 32;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
|
static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
|
||||||
{
|
{
|
||||||
|
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
|
||||||
|
|
||||||
|
efx_ef10_get_stat_mask(efx, mask);
|
||||||
return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
|
return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
|
||||||
efx_ef10_stat_mask(efx), names);
|
mask, names);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
|
static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
|
||||||
{
|
{
|
||||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||||
const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
|
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
|
||||||
__le64 generation_start, generation_end;
|
__le64 generation_start, generation_end;
|
||||||
u64 *stats = nic_data->stats;
|
u64 *stats = nic_data->stats;
|
||||||
__le64 *dma_stats;
|
__le64 *dma_stats;
|
||||||
|
|
||||||
|
efx_ef10_get_stat_mask(efx, mask);
|
||||||
|
|
||||||
dma_stats = efx->stats_buffer.addr;
|
dma_stats = efx->stats_buffer.addr;
|
||||||
nic_data = efx->nic_data;
|
nic_data = efx->nic_data;
|
||||||
|
|
||||||
|
@ -853,8 +893,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
|
||||||
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
|
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
|
||||||
return 0;
|
return 0;
|
||||||
rmb();
|
rmb();
|
||||||
efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
|
efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
|
||||||
stats, efx->stats_buffer.addr, false);
|
stats, efx->stats_buffer.addr, false);
|
||||||
|
rmb();
|
||||||
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
|
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
|
||||||
if (generation_end != generation_start)
|
if (generation_end != generation_start)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
@ -873,12 +914,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
|
||||||
static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
|
static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
|
||||||
struct rtnl_link_stats64 *core_stats)
|
struct rtnl_link_stats64 *core_stats)
|
||||||
{
|
{
|
||||||
const unsigned long *mask = efx_ef10_stat_mask(efx);
|
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
|
||||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||||
u64 *stats = nic_data->stats;
|
u64 *stats = nic_data->stats;
|
||||||
size_t stats_count = 0, index;
|
size_t stats_count = 0, index;
|
||||||
int retry;
|
int retry;
|
||||||
|
|
||||||
|
efx_ef10_get_stat_mask(efx, mask);
|
||||||
|
|
||||||
/* If we're unlucky enough to read statistics during the DMA, wait
|
/* If we're unlucky enough to read statistics during the DMA, wait
|
||||||
* up to 10ms for it to finish (typically takes <500us)
|
* up to 10ms for it to finish (typically takes <500us)
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
|
||||||
bool *was_attached)
|
bool *was_attached)
|
||||||
{
|
{
|
||||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
|
MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
|
||||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
|
MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
|
||||||
size_t outlen;
|
size_t outlen;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* We currently assume we have control of the external link
|
||||||
|
* and are completely trusted by firmware. Abort probing
|
||||||
|
* if that's not true for this function.
|
||||||
|
*/
|
||||||
|
if (driver_operating &&
|
||||||
|
outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
|
||||||
|
(MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
|
||||||
|
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
|
||||||
|
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
|
||||||
|
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
|
||||||
|
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
|
||||||
|
netif_err(efx, probe, efx->net_dev,
|
||||||
|
"This driver version only supports one function per port\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
if (was_attached != NULL)
|
if (was_attached != NULL)
|
||||||
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
|
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -2574,8 +2574,58 @@
|
||||||
#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
|
#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
|
||||||
#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
|
#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
|
||||||
#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
|
#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
|
||||||
#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */
|
/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
|
||||||
#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */
|
* capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
|
||||||
|
/* enum: PM discard_bb_overflow counter. Valid for EF10 with
|
||||||
|
* PM_AND_RXDP_COUNTERS capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
|
||||||
|
/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
|
||||||
|
* capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
|
||||||
|
/* enum: PM discard_vfifo_full counter. Valid for EF10 with
|
||||||
|
* PM_AND_RXDP_COUNTERS capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
|
||||||
|
/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
|
||||||
|
* capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
|
||||||
|
/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
|
||||||
|
* capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
|
||||||
|
/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
|
||||||
|
* capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
|
||||||
|
/* enum: RXDP counter: Number of packets dropped due to the queue being
|
||||||
|
* disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
|
||||||
|
/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
|
||||||
|
* with PM_AND_RXDP_COUNTERS capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
|
||||||
|
/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
|
||||||
|
* PM_AND_RXDP_COUNTERS capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
|
||||||
|
/* enum: RXDP counter: Number of times an emergency descriptor fetch was
|
||||||
|
* performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
|
||||||
|
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
|
||||||
|
* descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
|
||||||
|
*/
|
||||||
|
#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
|
||||||
|
/* enum: Start of GMAC stats buffer space, for Siena only. */
|
||||||
|
#define MC_CMD_GMAC_DMABUF_START 0x40
|
||||||
|
/* enum: End of GMAC stats buffer space, for Siena only. */
|
||||||
|
#define MC_CMD_GMAC_DMABUF_END 0x5f
|
||||||
#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
|
#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
|
||||||
#define MC_CMD_MAC_NSTATS 0x61 /* enum */
|
#define MC_CMD_MAC_NSTATS 0x61 /* enum */
|
||||||
|
|
||||||
|
@ -5065,6 +5115,8 @@
|
||||||
#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
|
#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
|
||||||
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
|
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
|
||||||
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
|
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
|
||||||
|
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
|
||||||
|
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
|
||||||
/* RxDPCPU firmware id. */
|
/* RxDPCPU firmware id. */
|
||||||
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
|
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
|
||||||
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
|
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
|
||||||
|
|
|
@ -480,8 +480,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||||
* @count: Length of the @desc array
|
* @count: Length of the @desc array
|
||||||
* @mask: Bitmask of which elements of @desc are enabled
|
* @mask: Bitmask of which elements of @desc are enabled
|
||||||
* @stats: Buffer to update with the converted statistics. The length
|
* @stats: Buffer to update with the converted statistics. The length
|
||||||
* of this array must be at least the number of set bits in the
|
* of this array must be at least @count.
|
||||||
* first @count bits of @mask.
|
|
||||||
* @dma_buf: DMA buffer containing hardware statistics
|
* @dma_buf: DMA buffer containing hardware statistics
|
||||||
* @accumulate: If set, the converted values will be added rather than
|
* @accumulate: If set, the converted values will be added rather than
|
||||||
* directly stored to the corresponding elements of @stats
|
* directly stored to the corresponding elements of @stats
|
||||||
|
@ -514,11 +513,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (accumulate)
|
if (accumulate)
|
||||||
*stats += val;
|
stats[index] += val;
|
||||||
else
|
else
|
||||||
*stats = val;
|
stats[index] = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
++stats;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -402,6 +402,18 @@ enum {
|
||||||
EF10_STAT_rx_align_error,
|
EF10_STAT_rx_align_error,
|
||||||
EF10_STAT_rx_length_error,
|
EF10_STAT_rx_length_error,
|
||||||
EF10_STAT_rx_nodesc_drops,
|
EF10_STAT_rx_nodesc_drops,
|
||||||
|
EF10_STAT_rx_pm_trunc_bb_overflow,
|
||||||
|
EF10_STAT_rx_pm_discard_bb_overflow,
|
||||||
|
EF10_STAT_rx_pm_trunc_vfifo_full,
|
||||||
|
EF10_STAT_rx_pm_discard_vfifo_full,
|
||||||
|
EF10_STAT_rx_pm_trunc_qbb,
|
||||||
|
EF10_STAT_rx_pm_discard_qbb,
|
||||||
|
EF10_STAT_rx_pm_discard_mapping,
|
||||||
|
EF10_STAT_rx_dp_q_disabled_packets,
|
||||||
|
EF10_STAT_rx_dp_di_dropped_packets,
|
||||||
|
EF10_STAT_rx_dp_streaming_packets,
|
||||||
|
EF10_STAT_rx_dp_emerg_fetch,
|
||||||
|
EF10_STAT_rx_dp_emerg_wait,
|
||||||
EF10_STAT_COUNT
|
EF10_STAT_COUNT
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1766,8 +1766,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
||||||
}
|
}
|
||||||
data->mac_control = prop;
|
data->mac_control = prop;
|
||||||
|
|
||||||
if (!of_property_read_u32(node, "dual_emac", &prop))
|
if (of_property_read_bool(node, "dual_emac"))
|
||||||
data->dual_emac = prop;
|
data->dual_emac = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Populate all the child nodes here...
|
* Populate all the child nodes here...
|
||||||
|
@ -1777,7 +1777,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
||||||
if (ret)
|
if (ret)
|
||||||
pr_warn("Doesn't have any child node\n");
|
pr_warn("Doesn't have any child node\n");
|
||||||
|
|
||||||
for_each_node_by_name(slave_node, "slave") {
|
for_each_child_of_node(node, slave_node) {
|
||||||
struct cpsw_slave_data *slave_data = data->slave_data + i;
|
struct cpsw_slave_data *slave_data = data->slave_data + i;
|
||||||
const void *mac_addr = NULL;
|
const void *mac_addr = NULL;
|
||||||
u32 phyid;
|
u32 phyid;
|
||||||
|
@ -1786,6 +1786,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
||||||
struct device_node *mdio_node;
|
struct device_node *mdio_node;
|
||||||
struct platform_device *mdio;
|
struct platform_device *mdio;
|
||||||
|
|
||||||
|
/* This is no slave child node, continue */
|
||||||
|
if (strcmp(slave_node->name, "slave"))
|
||||||
|
continue;
|
||||||
|
|
||||||
parp = of_get_property(slave_node, "phy_id", &lenp);
|
parp = of_get_property(slave_node, "phy_id", &lenp);
|
||||||
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
|
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
|
||||||
pr_err("Missing slave[%d] phy_id property\n", i);
|
pr_err("Missing slave[%d] phy_id property\n", i);
|
||||||
|
|
|
@ -82,7 +82,6 @@ struct mrf24j40 {
|
||||||
|
|
||||||
struct mutex buffer_mutex; /* only used to protect buf */
|
struct mutex buffer_mutex; /* only used to protect buf */
|
||||||
struct completion tx_complete;
|
struct completion tx_complete;
|
||||||
struct work_struct irqwork;
|
|
||||||
u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
|
u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
INIT_COMPLETION(devrec->tx_complete);
|
||||||
|
|
||||||
/* Set TXNTRIG bit of TXNCON to send packet */
|
/* Set TXNTRIG bit of TXNCON to send packet */
|
||||||
ret = read_short_reg(devrec, REG_TXNCON, &val);
|
ret = read_short_reg(devrec, REG_TXNCON, &val);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
|
||||||
val |= 0x4;
|
val |= 0x4;
|
||||||
write_short_reg(devrec, REG_TXNCON, val);
|
write_short_reg(devrec, REG_TXNCON, val);
|
||||||
|
|
||||||
INIT_COMPLETION(devrec->tx_complete);
|
|
||||||
|
|
||||||
/* Wait for the device to send the TX complete interrupt. */
|
/* Wait for the device to send the TX complete interrupt. */
|
||||||
ret = wait_for_completion_interruptible_timeout(
|
ret = wait_for_completion_interruptible_timeout(
|
||||||
&devrec->tx_complete,
|
&devrec->tx_complete,
|
||||||
|
@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
|
||||||
static irqreturn_t mrf24j40_isr(int irq, void *data)
|
static irqreturn_t mrf24j40_isr(int irq, void *data)
|
||||||
{
|
{
|
||||||
struct mrf24j40 *devrec = data;
|
struct mrf24j40 *devrec = data;
|
||||||
|
|
||||||
disable_irq_nosync(irq);
|
|
||||||
|
|
||||||
schedule_work(&devrec->irqwork);
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mrf24j40_isrwork(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
|
|
||||||
u8 intstat;
|
u8 intstat;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
|
||||||
mrf24j40_handle_rx(devrec);
|
mrf24j40_handle_rx(devrec);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
enable_irq(devrec->spi->irq);
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mrf24j40_probe(struct spi_device *spi)
|
static int mrf24j40_probe(struct spi_device *spi)
|
||||||
|
@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
|
||||||
|
|
||||||
mutex_init(&devrec->buffer_mutex);
|
mutex_init(&devrec->buffer_mutex);
|
||||||
init_completion(&devrec->tx_complete);
|
init_completion(&devrec->tx_complete);
|
||||||
INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
|
|
||||||
devrec->spi = spi;
|
devrec->spi = spi;
|
||||||
spi_set_drvdata(spi, devrec);
|
spi_set_drvdata(spi, devrec);
|
||||||
|
|
||||||
|
@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
|
||||||
val &= ~0x3; /* Clear RX mode (normal) */
|
val &= ~0x3; /* Clear RX mode (normal) */
|
||||||
write_short_reg(devrec, REG_RXMCR, val);
|
write_short_reg(devrec, REG_RXMCR, val);
|
||||||
|
|
||||||
ret = request_irq(spi->irq,
|
ret = request_threaded_irq(spi->irq,
|
||||||
mrf24j40_isr,
|
NULL,
|
||||||
IRQF_TRIGGER_FALLING,
|
mrf24j40_isr,
|
||||||
dev_name(&spi->dev),
|
IRQF_TRIGGER_LOW|IRQF_ONESHOT,
|
||||||
devrec);
|
dev_name(&spi->dev),
|
||||||
|
devrec);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(printdev(devrec), "Unable to get IRQ");
|
dev_err(printdev(devrec), "Unable to get IRQ");
|
||||||
|
@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
|
||||||
dev_dbg(printdev(devrec), "remove\n");
|
dev_dbg(printdev(devrec), "remove\n");
|
||||||
|
|
||||||
free_irq(spi->irq, devrec);
|
free_irq(spi->irq, devrec);
|
||||||
flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
|
|
||||||
ieee802154_unregister_device(devrec->dev);
|
ieee802154_unregister_device(devrec->dev);
|
||||||
ieee802154_free_device(devrec->dev);
|
ieee802154_free_device(devrec->dev);
|
||||||
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
|
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
|
||||||
|
|
|
@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
|
||||||
if (unlikely(!noblock))
|
if (unlikely(!noblock))
|
||||||
add_wait_queue(&tfile->wq.wait, &wait);
|
add_wait_queue(&tfile->wq.wait, &wait);
|
||||||
while (len) {
|
while (len) {
|
||||||
current->state = TASK_INTERRUPTIBLE;
|
if (unlikely(!noblock))
|
||||||
|
current->state = TASK_INTERRUPTIBLE;
|
||||||
|
|
||||||
/* Read frames from the queue */
|
/* Read frames from the queue */
|
||||||
if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
|
if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
|
||||||
|
@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
current->state = TASK_RUNNING;
|
if (unlikely(!noblock)) {
|
||||||
if (unlikely(!noblock))
|
current->state = TASK_RUNNING;
|
||||||
remove_wait_queue(&tfile->wq.wait, &wait);
|
remove_wait_queue(&tfile->wq.wait, &wait);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1969,15 +1969,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
|
||||||
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||||
struct ath_atx_tid *tid, struct sk_buff *skb)
|
struct ath_atx_tid *tid, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
||||||
struct ath_frame_info *fi = get_frame_info(skb);
|
struct ath_frame_info *fi = get_frame_info(skb);
|
||||||
struct list_head bf_head;
|
struct list_head bf_head;
|
||||||
struct ath_buf *bf;
|
struct ath_buf *bf = fi->bf;
|
||||||
|
|
||||||
bf = fi->bf;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&bf_head);
|
INIT_LIST_HEAD(&bf_head);
|
||||||
list_add_tail(&bf->list, &bf_head);
|
list_add_tail(&bf->list, &bf_head);
|
||||||
bf->bf_state.bf_type = 0;
|
bf->bf_state.bf_type = 0;
|
||||||
|
if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
|
||||||
|
bf->bf_state.bf_type = BUF_AMPDU;
|
||||||
|
ath_tx_addto_baw(sc, tid, bf);
|
||||||
|
}
|
||||||
|
|
||||||
bf->bf_next = NULL;
|
bf->bf_next = NULL;
|
||||||
bf->bf_lastbf = bf;
|
bf->bf_lastbf = bf;
|
||||||
|
|
|
@ -358,10 +358,12 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
|
||||||
}
|
}
|
||||||
} while (true);
|
} while (true);
|
||||||
|
|
||||||
if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
|
|
||||||
goto process_start;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&adapter->main_proc_lock, flags);
|
spin_lock_irqsave(&adapter->main_proc_lock, flags);
|
||||||
|
if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
|
||||||
|
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
|
||||||
|
goto process_start;
|
||||||
|
}
|
||||||
|
|
||||||
adapter->mwifiex_processing = false;
|
adapter->mwifiex_processing = false;
|
||||||
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
|
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
|
||||||
|
|
||||||
|
|
|
@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
|
||||||
goto exit_release_regions;
|
goto exit_release_regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_enable_msi(pci_dev);
|
|
||||||
|
|
||||||
hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
|
hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
|
||||||
if (!hw) {
|
if (!hw) {
|
||||||
rt2x00_probe_err("Failed to allocate hardware\n");
|
rt2x00_probe_err("Failed to allocate hardware\n");
|
||||||
retval = -ENOMEM;
|
retval = -ENOMEM;
|
||||||
goto exit_disable_msi;
|
goto exit_release_regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_set_drvdata(pci_dev, hw);
|
pci_set_drvdata(pci_dev, hw);
|
||||||
|
@ -152,9 +150,6 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
|
||||||
exit_free_device:
|
exit_free_device:
|
||||||
ieee80211_free_hw(hw);
|
ieee80211_free_hw(hw);
|
||||||
|
|
||||||
exit_disable_msi:
|
|
||||||
pci_disable_msi(pci_dev);
|
|
||||||
|
|
||||||
exit_release_regions:
|
exit_release_regions:
|
||||||
pci_release_regions(pci_dev);
|
pci_release_regions(pci_dev);
|
||||||
|
|
||||||
|
@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
|
||||||
rt2x00pci_free_reg(rt2x00dev);
|
rt2x00pci_free_reg(rt2x00dev);
|
||||||
ieee80211_free_hw(hw);
|
ieee80211_free_hw(hw);
|
||||||
|
|
||||||
pci_disable_msi(pci_dev);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free the PCI device data.
|
* Free the PCI device data.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -39,11 +39,15 @@ static int connect_rings(struct backend_info *);
|
||||||
static void connect(struct backend_info *);
|
static void connect(struct backend_info *);
|
||||||
static void backend_create_xenvif(struct backend_info *be);
|
static void backend_create_xenvif(struct backend_info *be);
|
||||||
static void unregister_hotplug_status_watch(struct backend_info *be);
|
static void unregister_hotplug_status_watch(struct backend_info *be);
|
||||||
|
static void set_backend_state(struct backend_info *be,
|
||||||
|
enum xenbus_state state);
|
||||||
|
|
||||||
static int netback_remove(struct xenbus_device *dev)
|
static int netback_remove(struct xenbus_device *dev)
|
||||||
{
|
{
|
||||||
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
||||||
|
|
||||||
|
set_backend_state(be, XenbusStateClosed);
|
||||||
|
|
||||||
unregister_hotplug_status_watch(be);
|
unregister_hotplug_status_watch(be);
|
||||||
if (be->vif) {
|
if (be->vif) {
|
||||||
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
|
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
|
#include <linux/workqueue.h>
|
||||||
#include <uapi/linux/filter.h>
|
#include <uapi/linux/filter.h>
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
|
@ -25,15 +26,19 @@ struct sk_filter
|
||||||
{
|
{
|
||||||
atomic_t refcnt;
|
atomic_t refcnt;
|
||||||
unsigned int len; /* Number of filter blocks */
|
unsigned int len; /* Number of filter blocks */
|
||||||
|
struct rcu_head rcu;
|
||||||
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
||||||
const struct sock_filter *filter);
|
const struct sock_filter *filter);
|
||||||
struct rcu_head rcu;
|
union {
|
||||||
struct sock_filter insns[0];
|
struct sock_filter insns[0];
|
||||||
|
struct work_struct work;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline unsigned int sk_filter_len(const struct sk_filter *fp)
|
static inline unsigned int sk_filter_size(unsigned int proglen)
|
||||||
{
|
{
|
||||||
return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
|
return max(sizeof(struct sk_filter),
|
||||||
|
offsetof(struct sk_filter, insns[proglen]));
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
|
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
|
||||||
|
@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
||||||
}
|
}
|
||||||
#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
|
#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
|
||||||
#else
|
#else
|
||||||
|
#include <linux/slab.h>
|
||||||
static inline void bpf_jit_compile(struct sk_filter *fp)
|
static inline void bpf_jit_compile(struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void bpf_jit_free(struct sk_filter *fp)
|
static inline void bpf_jit_free(struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
|
kfree(fp);
|
||||||
}
|
}
|
||||||
#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
|
#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2273,11 +2273,11 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_XPS
|
#ifdef CONFIG_XPS
|
||||||
int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
|
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
|
||||||
u16 index);
|
u16 index);
|
||||||
#else
|
#else
|
||||||
static inline int netif_set_xps_queue(struct net_device *dev,
|
static inline int netif_set_xps_queue(struct net_device *dev,
|
||||||
struct cpumask *mask,
|
const struct cpumask *mask,
|
||||||
u16 index)
|
u16 index)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1612,16 +1612,14 @@ static inline void sk_filter_release(struct sk_filter *fp)
|
||||||
|
|
||||||
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
|
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
unsigned int size = sk_filter_len(fp);
|
atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
|
||||||
|
|
||||||
atomic_sub(size, &sk->sk_omem_alloc);
|
|
||||||
sk_filter_release(fp);
|
sk_filter_release(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
|
static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
|
||||||
{
|
{
|
||||||
atomic_inc(&fp->refcnt);
|
atomic_inc(&fp->refcnt);
|
||||||
atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
|
atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
# UAPI Header export list
|
# UAPI Header export list
|
||||||
header-y += tc_csum.h
|
header-y += tc_csum.h
|
||||||
|
header-y += tc_defact.h
|
||||||
header-y += tc_gact.h
|
header-y += tc_gact.h
|
||||||
header-y += tc_ipt.h
|
header-y += tc_ipt.h
|
||||||
header-y += tc_mirred.h
|
header-y += tc_mirred.h
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
struct tc_defact {
|
struct tc_defact {
|
||||||
tc_gen;
|
tc_gen;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TCA_DEF_UNSPEC,
|
TCA_DEF_UNSPEC,
|
||||||
TCA_DEF_TM,
|
TCA_DEF_TM,
|
|
@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev)
|
||||||
|
|
||||||
return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
|
return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
|
||||||
nla_total_size(2) + /* IFLA_VLAN_ID */
|
nla_total_size(2) + /* IFLA_VLAN_ID */
|
||||||
sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
|
nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
|
||||||
vlan_qos_map_size(vlan->nr_ingress_mappings) +
|
vlan_qos_map_size(vlan->nr_ingress_mappings) +
|
||||||
vlan_qos_map_size(vlan->nr_egress_mappings);
|
vlan_qos_map_size(vlan->nr_egress_mappings);
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,6 +65,7 @@ static int __init batadv_init(void)
|
||||||
batadv_recv_handler_init();
|
batadv_recv_handler_init();
|
||||||
|
|
||||||
batadv_iv_init();
|
batadv_iv_init();
|
||||||
|
batadv_nc_init();
|
||||||
|
|
||||||
batadv_event_workqueue = create_singlethread_workqueue("bat_events");
|
batadv_event_workqueue = create_singlethread_workqueue("bat_events");
|
||||||
|
|
||||||
|
@ -142,7 +143,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
ret = batadv_nc_init(bat_priv);
|
ret = batadv_nc_mesh_init(bat_priv);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -167,7 +168,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
|
||||||
batadv_vis_quit(bat_priv);
|
batadv_vis_quit(bat_priv);
|
||||||
|
|
||||||
batadv_gw_node_purge(bat_priv);
|
batadv_gw_node_purge(bat_priv);
|
||||||
batadv_nc_free(bat_priv);
|
batadv_nc_mesh_free(bat_priv);
|
||||||
batadv_dat_free(bat_priv);
|
batadv_dat_free(bat_priv);
|
||||||
batadv_bla_free(bat_priv);
|
batadv_bla_free(bat_priv);
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,20 @@ static void batadv_nc_worker(struct work_struct *work);
|
||||||
static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
|
static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
|
||||||
struct batadv_hard_iface *recv_if);
|
struct batadv_hard_iface *recv_if);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* batadv_nc_init - one-time initialization for network coding
|
||||||
|
*/
|
||||||
|
int __init batadv_nc_init(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* Register our packet type */
|
||||||
|
ret = batadv_recv_handler_register(BATADV_CODED,
|
||||||
|
batadv_nc_recv_coded_packet);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* batadv_nc_start_timer - initialise the nc periodic worker
|
* batadv_nc_start_timer - initialise the nc periodic worker
|
||||||
* @bat_priv: the bat priv with all the soft interface information
|
* @bat_priv: the bat priv with all the soft interface information
|
||||||
|
@ -45,10 +59,10 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* batadv_nc_init - initialise coding hash table and start house keeping
|
* batadv_nc_mesh_init - initialise coding hash table and start house keeping
|
||||||
* @bat_priv: the bat priv with all the soft interface information
|
* @bat_priv: the bat priv with all the soft interface information
|
||||||
*/
|
*/
|
||||||
int batadv_nc_init(struct batadv_priv *bat_priv)
|
int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
|
||||||
{
|
{
|
||||||
bat_priv->nc.timestamp_fwd_flush = jiffies;
|
bat_priv->nc.timestamp_fwd_flush = jiffies;
|
||||||
bat_priv->nc.timestamp_sniffed_purge = jiffies;
|
bat_priv->nc.timestamp_sniffed_purge = jiffies;
|
||||||
|
@ -70,11 +84,6 @@ int batadv_nc_init(struct batadv_priv *bat_priv)
|
||||||
batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
|
batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
|
||||||
&batadv_nc_decoding_hash_lock_class_key);
|
&batadv_nc_decoding_hash_lock_class_key);
|
||||||
|
|
||||||
/* Register our packet type */
|
|
||||||
if (batadv_recv_handler_register(BATADV_CODED,
|
|
||||||
batadv_nc_recv_coded_packet) < 0)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
|
INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
|
||||||
batadv_nc_start_timer(bat_priv);
|
batadv_nc_start_timer(bat_priv);
|
||||||
|
|
||||||
|
@ -1721,12 +1730,11 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* batadv_nc_free - clean up network coding memory
|
* batadv_nc_mesh_free - clean up network coding memory
|
||||||
* @bat_priv: the bat priv with all the soft interface information
|
* @bat_priv: the bat priv with all the soft interface information
|
||||||
*/
|
*/
|
||||||
void batadv_nc_free(struct batadv_priv *bat_priv)
|
void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
|
||||||
{
|
{
|
||||||
batadv_recv_handler_unregister(BATADV_CODED);
|
|
||||||
cancel_delayed_work_sync(&bat_priv->nc.work);
|
cancel_delayed_work_sync(&bat_priv->nc.work);
|
||||||
|
|
||||||
batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
|
batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
|
||||||
|
|
|
@ -22,8 +22,9 @@
|
||||||
|
|
||||||
#ifdef CONFIG_BATMAN_ADV_NC
|
#ifdef CONFIG_BATMAN_ADV_NC
|
||||||
|
|
||||||
int batadv_nc_init(struct batadv_priv *bat_priv);
|
int batadv_nc_init(void);
|
||||||
void batadv_nc_free(struct batadv_priv *bat_priv);
|
int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
|
||||||
|
void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
|
||||||
void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
|
void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
|
||||||
struct batadv_orig_node *orig_node,
|
struct batadv_orig_node *orig_node,
|
||||||
struct batadv_orig_node *orig_neigh_node,
|
struct batadv_orig_node *orig_neigh_node,
|
||||||
|
@ -46,12 +47,17 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
|
||||||
|
|
||||||
#else /* ifdef CONFIG_BATMAN_ADV_NC */
|
#else /* ifdef CONFIG_BATMAN_ADV_NC */
|
||||||
|
|
||||||
static inline int batadv_nc_init(struct batadv_priv *bat_priv)
|
static inline int batadv_nc_init(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void batadv_nc_free(struct batadv_priv *bat_priv)
|
static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
|
||||||
__get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
|
__get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
|
||||||
__get_user(kmsg->msg_flags, &umsg->msg_flags))
|
__get_user(kmsg->msg_flags, &umsg->msg_flags))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
|
||||||
|
return -EINVAL;
|
||||||
kmsg->msg_name = compat_ptr(tmp1);
|
kmsg->msg_name = compat_ptr(tmp1);
|
||||||
kmsg->msg_iov = compat_ptr(tmp2);
|
kmsg->msg_iov = compat_ptr(tmp2);
|
||||||
kmsg->msg_control = compat_ptr(tmp3);
|
kmsg->msg_control = compat_ptr(tmp3);
|
||||||
|
|
|
@ -1916,7 +1916,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
|
||||||
return new_map;
|
return new_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
|
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
|
||||||
|
u16 index)
|
||||||
{
|
{
|
||||||
struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
|
struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
|
||||||
struct xps_map *map, *new_map;
|
struct xps_map *map, *new_map;
|
||||||
|
|
|
@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
|
||||||
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
|
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
|
||||||
|
|
||||||
bpf_jit_free(fp);
|
bpf_jit_free(fp);
|
||||||
kfree(fp);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sk_filter_release_rcu);
|
EXPORT_SYMBOL(sk_filter_release_rcu);
|
||||||
|
|
||||||
|
@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
|
||||||
if (fprog->filter == NULL)
|
if (fprog->filter == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
|
fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
|
||||||
if (!fp)
|
if (!fp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memcpy(fp->insns, fprog->filter, fsize);
|
memcpy(fp->insns, fprog->filter, fsize);
|
||||||
|
@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||||
{
|
{
|
||||||
struct sk_filter *fp, *old_fp;
|
struct sk_filter *fp, *old_fp;
|
||||||
unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
|
unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
|
||||||
|
unsigned int sk_fsize = sk_filter_size(fprog->len);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
if (sock_flag(sk, SOCK_FILTER_LOCKED))
|
||||||
|
@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||||
if (fprog->filter == NULL)
|
if (fprog->filter == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
|
fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
|
||||||
if (!fp)
|
if (!fp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
|
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
|
||||||
sock_kfree_s(sk, fp, fsize+sizeof(*fp));
|
sock_kfree_s(sk, fp, sk_fsize);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2331,6 +2331,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
sk->sk_max_pacing_rate = ~0U;
|
sk->sk_max_pacing_rate = ~0U;
|
||||||
|
sk->sk_pacing_rate = ~0U;
|
||||||
/*
|
/*
|
||||||
* Before updating sk_refcnt, we must commit prior changes to memory
|
* Before updating sk_refcnt, we must commit prior changes to memory
|
||||||
* (Documentation/RCU/rculist_nulls.txt for details)
|
* (Documentation/RCU/rculist_nulls.txt for details)
|
||||||
|
|
|
@ -1372,6 +1372,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
|
||||||
real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
|
real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
|
||||||
if (!real_dev)
|
if (!real_dev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
if (real_dev->type != ARPHRD_IEEE802154)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
lowpan_dev_info(dev)->real_dev = real_dev;
|
lowpan_dev_info(dev)->real_dev = real_dev;
|
||||||
lowpan_dev_info(dev)->fragment_tag = 0;
|
lowpan_dev_info(dev)->fragment_tag = 0;
|
||||||
|
@ -1386,6 +1388,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
|
||||||
|
|
||||||
entry->ldev = dev;
|
entry->ldev = dev;
|
||||||
|
|
||||||
|
/* Set the lowpan harware address to the wpan hardware address. */
|
||||||
|
memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
|
||||||
|
|
||||||
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
|
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
|
||||||
INIT_LIST_HEAD(&entry->list);
|
INIT_LIST_HEAD(&entry->list);
|
||||||
list_add_tail(&entry->list, &lowpan_devices);
|
list_add_tail(&entry->list, &lowpan_devices);
|
||||||
|
|
|
@ -287,7 +287,7 @@ struct sock *__inet_lookup_established(struct net *net,
|
||||||
if (unlikely(!INET_TW_MATCH(sk, net, acookie,
|
if (unlikely(!INET_TW_MATCH(sk, net, acookie,
|
||||||
saddr, daddr, ports,
|
saddr, daddr, ports,
|
||||||
dif))) {
|
dif))) {
|
||||||
sock_put(sk);
|
inet_twsk_put(inet_twsk(sk));
|
||||||
goto begintw;
|
goto begintw;
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
|
||||||
RT_SCOPE_LINK);
|
RT_SCOPE_LINK);
|
||||||
goto make_route;
|
goto make_route;
|
||||||
}
|
}
|
||||||
if (fl4->saddr) {
|
if (!fl4->saddr) {
|
||||||
if (ipv4_is_multicast(fl4->daddr))
|
if (ipv4_is_multicast(fl4->daddr))
|
||||||
fl4->saddr = inet_select_addr(dev_out, 0,
|
fl4->saddr = inet_select_addr(dev_out, 0,
|
||||||
fl4->flowi4_scope);
|
fl4->flowi4_scope);
|
||||||
|
|
|
@ -1326,7 +1326,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||||
tp->lost_cnt_hint -= tcp_skb_pcount(prev);
|
tp->lost_cnt_hint -= tcp_skb_pcount(prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
|
TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
|
||||||
|
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
|
||||||
|
TCP_SKB_CB(prev)->end_seq++;
|
||||||
|
|
||||||
if (skb == tcp_highest_sack(sk))
|
if (skb == tcp_highest_sack(sk))
|
||||||
tcp_advance_highest_sack(sk, skb);
|
tcp_advance_highest_sack(sk, skb);
|
||||||
|
|
||||||
|
|
|
@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
|
||||||
unsigned int size = 0;
|
unsigned int size = 0;
|
||||||
unsigned int eff_sacks;
|
unsigned int eff_sacks;
|
||||||
|
|
||||||
|
opts->options = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_TCP_MD5SIG
|
#ifdef CONFIG_TCP_MD5SIG
|
||||||
*md5 = tp->af_specific->md5_lookup(sk, sk);
|
*md5 = tp->af_specific->md5_lookup(sk, sk);
|
||||||
if (unlikely(*md5)) {
|
if (unlikely(*md5)) {
|
||||||
|
|
|
@ -116,7 +116,7 @@ struct sock *__inet6_lookup_established(struct net *net,
|
||||||
}
|
}
|
||||||
if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
|
if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
|
||||||
ports, dif))) {
|
ports, dif))) {
|
||||||
sock_put(sk);
|
inet_twsk_put(inet_twsk(sk));
|
||||||
goto begintw;
|
goto begintw;
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -1173,9 +1173,8 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
|
||||||
|
|
||||||
static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
||||||
{
|
{
|
||||||
struct ip6_tnl *tunnel = netdev_priv(dev);
|
|
||||||
if (new_mtu < 68 ||
|
if (new_mtu < 68 ||
|
||||||
new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
|
new_mtu > 0xFFF8 - dev->hard_header_len)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
dev->mtu = new_mtu;
|
dev->mtu = new_mtu;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||||
static int
|
static int
|
||||||
ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
|
ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
|
||||||
{
|
{
|
||||||
if (new_mtu < IPV6_MIN_MTU) {
|
struct ip6_tnl *tnl = netdev_priv(dev);
|
||||||
return -EINVAL;
|
|
||||||
|
if (tnl->parms.proto == IPPROTO_IPIP) {
|
||||||
|
if (new_mtu < 68)
|
||||||
|
return -EINVAL;
|
||||||
|
} else {
|
||||||
|
if (new_mtu < IPV6_MIN_MTU)
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
if (new_mtu > 0xFFF8 - dev->hard_header_len)
|
||||||
|
return -EINVAL;
|
||||||
dev->mtu = new_mtu;
|
dev->mtu = new_mtu;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,6 +115,11 @@ struct l2tp_net {
|
||||||
static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
|
static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
|
||||||
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
|
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
|
||||||
|
|
||||||
|
static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
|
||||||
|
{
|
||||||
|
return sk->sk_user_data;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct l2tp_net *l2tp_pernet(struct net *net)
|
static inline struct l2tp_net *l2tp_pernet(struct net *net)
|
||||||
{
|
{
|
||||||
BUG_ON(!net);
|
BUG_ON(!net);
|
||||||
|
@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
if (sk->sk_family == PF_INET6) {
|
if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
|
||||||
if (!uh->check) {
|
if (!uh->check) {
|
||||||
LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
|
LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
|
||||||
/* Queue the packet to IP for output */
|
/* Queue the packet to IP for output */
|
||||||
skb->local_df = 1;
|
skb->local_df = 1;
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
if (skb->sk->sk_family == PF_INET6)
|
if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
|
||||||
error = inet6_csk_xmit(skb, NULL);
|
error = inet6_csk_xmit(skb, NULL);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
|
@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
|
||||||
|
|
||||||
/* Calculate UDP checksum if configured to do so */
|
/* Calculate UDP checksum if configured to do so */
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
if (sk->sk_family == PF_INET6)
|
if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
|
||||||
l2tp_xmit_ipv6_csum(sk, skb, udp_len);
|
l2tp_xmit_ipv6_csum(sk, skb, udp_len);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
|
@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
|
||||||
*/
|
*/
|
||||||
static void l2tp_tunnel_destruct(struct sock *sk)
|
static void l2tp_tunnel_destruct(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct l2tp_tunnel *tunnel;
|
struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
|
||||||
struct l2tp_net *pn;
|
struct l2tp_net *pn;
|
||||||
|
|
||||||
tunnel = sk->sk_user_data;
|
|
||||||
if (tunnel == NULL)
|
if (tunnel == NULL)
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
|
@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check if this socket has already been prepped */
|
/* Check if this socket has already been prepped */
|
||||||
tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
|
tunnel = l2tp_tunnel(sk);
|
||||||
if (tunnel != NULL) {
|
if (tunnel != NULL) {
|
||||||
/* This socket has already been prepped */
|
/* This socket has already been prepped */
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
|
@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
|
||||||
if (cfg != NULL)
|
if (cfg != NULL)
|
||||||
tunnel->debug = cfg->debug;
|
tunnel->debug = cfg->debug;
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|
if (sk->sk_family == PF_INET6) {
|
||||||
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||||
|
|
||||||
|
if (ipv6_addr_v4mapped(&np->saddr) &&
|
||||||
|
ipv6_addr_v4mapped(&np->daddr)) {
|
||||||
|
struct inet_sock *inet = inet_sk(sk);
|
||||||
|
|
||||||
|
tunnel->v4mapped = true;
|
||||||
|
inet->inet_saddr = np->saddr.s6_addr32[3];
|
||||||
|
inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3];
|
||||||
|
inet->inet_daddr = np->daddr.s6_addr32[3];
|
||||||
|
} else {
|
||||||
|
tunnel->v4mapped = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
|
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
|
||||||
tunnel->encap = encap;
|
tunnel->encap = encap;
|
||||||
if (encap == L2TP_ENCAPTYPE_UDP) {
|
if (encap == L2TP_ENCAPTYPE_UDP) {
|
||||||
|
@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
|
||||||
udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
|
udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
|
||||||
udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
|
udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
if (sk->sk_family == PF_INET6)
|
if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
|
||||||
udpv6_encap_enable();
|
udpv6_encap_enable();
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -194,6 +194,9 @@ struct l2tp_tunnel {
|
||||||
struct sock *sock; /* Parent socket */
|
struct sock *sock; /* Parent socket */
|
||||||
int fd; /* Parent fd, if tunnel socket
|
int fd; /* Parent fd, if tunnel socket
|
||||||
* was created by userspace */
|
* was created by userspace */
|
||||||
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|
bool v4mapped;
|
||||||
|
#endif
|
||||||
|
|
||||||
struct work_struct del_work;
|
struct work_struct del_work;
|
||||||
|
|
||||||
|
|
|
@ -3056,6 +3056,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
|
||||||
case NL80211_IFTYPE_ADHOC:
|
case NL80211_IFTYPE_ADHOC:
|
||||||
if (!bssid)
|
if (!bssid)
|
||||||
return 0;
|
return 0;
|
||||||
|
if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
|
||||||
|
ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
|
||||||
|
return 0;
|
||||||
if (ieee80211_is_beacon(hdr->frame_control)) {
|
if (ieee80211_is_beacon(hdr->frame_control)) {
|
||||||
return 1;
|
return 1;
|
||||||
} else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
|
} else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
|
||||||
|
|
|
@ -2103,7 +2103,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
|
||||||
{
|
{
|
||||||
struct ieee80211_local *local = sdata->local;
|
struct ieee80211_local *local = sdata->local;
|
||||||
struct ieee80211_supported_band *sband;
|
struct ieee80211_supported_band *sband;
|
||||||
int rate, skip, shift;
|
int rate, shift;
|
||||||
u8 i, exrates, *pos;
|
u8 i, exrates, *pos;
|
||||||
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
|
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
|
||||||
u32 rate_flags;
|
u32 rate_flags;
|
||||||
|
@ -2131,14 +2131,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
|
||||||
pos = skb_put(skb, exrates + 2);
|
pos = skb_put(skb, exrates + 2);
|
||||||
*pos++ = WLAN_EID_EXT_SUPP_RATES;
|
*pos++ = WLAN_EID_EXT_SUPP_RATES;
|
||||||
*pos++ = exrates;
|
*pos++ = exrates;
|
||||||
skip = 0;
|
|
||||||
for (i = 8; i < sband->n_bitrates; i++) {
|
for (i = 8; i < sband->n_bitrates; i++) {
|
||||||
u8 basic = 0;
|
u8 basic = 0;
|
||||||
if ((rate_flags & sband->bitrates[i].flags)
|
if ((rate_flags & sband->bitrates[i].flags)
|
||||||
!= rate_flags)
|
!= rate_flags)
|
||||||
continue;
|
continue;
|
||||||
if (skip++ < 8)
|
|
||||||
continue;
|
|
||||||
if (need_basic && basic_rates & BIT(i))
|
if (need_basic && basic_rates & BIT(i))
|
||||||
basic = 0x80;
|
basic = 0x80;
|
||||||
rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
|
rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
|
||||||
|
|
|
@ -472,20 +472,16 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
|
||||||
if (f->credit > 0 || !q->rate_enable)
|
if (f->credit > 0 || !q->rate_enable)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
|
rate = q->flow_max_rate;
|
||||||
rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
|
if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
|
||||||
|
rate = min(skb->sk->sk_pacing_rate, rate);
|
||||||
|
|
||||||
rate = min(rate, q->flow_max_rate);
|
if (rate != ~0U) {
|
||||||
} else {
|
|
||||||
rate = q->flow_max_rate;
|
|
||||||
if (rate == ~0U)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
if (rate) {
|
|
||||||
u32 plen = max(qdisc_pkt_len(skb), q->quantum);
|
u32 plen = max(qdisc_pkt_len(skb), q->quantum);
|
||||||
u64 len = (u64)plen * NSEC_PER_SEC;
|
u64 len = (u64)plen * NSEC_PER_SEC;
|
||||||
|
|
||||||
do_div(len, rate);
|
if (likely(rate))
|
||||||
|
do_div(len, rate);
|
||||||
/* Since socket rate can change later,
|
/* Since socket rate can change later,
|
||||||
* clamp the delay to 125 ms.
|
* clamp the delay to 125 ms.
|
||||||
* TODO: maybe segment the too big skb, as in commit
|
* TODO: maybe segment the too big skb, as in commit
|
||||||
|
@ -656,7 +652,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
|
||||||
q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
|
q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
|
||||||
|
|
||||||
if (tb[TCA_FQ_INITIAL_QUANTUM])
|
if (tb[TCA_FQ_INITIAL_QUANTUM])
|
||||||
q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
|
q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
|
||||||
|
|
||||||
if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
|
if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
|
||||||
q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
|
q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
|
||||||
|
@ -735,12 +731,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||||
if (opts == NULL)
|
if (opts == NULL)
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
|
/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
|
||||||
|
* do not bother giving its value
|
||||||
|
*/
|
||||||
if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
|
if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
|
||||||
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
|
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
|
||||||
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
|
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
|
||||||
nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
|
nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
|
||||||
nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
|
nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
|
||||||
nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
|
|
||||||
nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
|
nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
|
||||||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
|
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
24
net/socket.c
24
net/socket.c
|
@ -1964,6 +1964,16 @@ struct used_address {
|
||||||
unsigned int name_len;
|
unsigned int name_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int copy_msghdr_from_user(struct msghdr *kmsg,
|
||||||
|
struct msghdr __user *umsg)
|
||||||
|
{
|
||||||
|
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
|
||||||
|
return -EFAULT;
|
||||||
|
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
|
||||||
|
return -EINVAL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
|
static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
|
||||||
struct msghdr *msg_sys, unsigned int flags,
|
struct msghdr *msg_sys, unsigned int flags,
|
||||||
struct used_address *used_address)
|
struct used_address *used_address)
|
||||||
|
@ -1982,8 +1992,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
|
||||||
if (MSG_CMSG_COMPAT & flags) {
|
if (MSG_CMSG_COMPAT & flags) {
|
||||||
if (get_compat_msghdr(msg_sys, msg_compat))
|
if (get_compat_msghdr(msg_sys, msg_compat))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
|
} else {
|
||||||
return -EFAULT;
|
err = copy_msghdr_from_user(msg_sys, msg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
|
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
|
||||||
err = -EMSGSIZE;
|
err = -EMSGSIZE;
|
||||||
|
@ -2191,8 +2204,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
|
||||||
if (MSG_CMSG_COMPAT & flags) {
|
if (MSG_CMSG_COMPAT & flags) {
|
||||||
if (get_compat_msghdr(msg_sys, msg_compat))
|
if (get_compat_msghdr(msg_sys, msg_compat))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
|
} else {
|
||||||
return -EFAULT;
|
err = copy_msghdr_from_user(msg_sys, msg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
|
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
|
||||||
err = -EMSGSIZE;
|
err = -EMSGSIZE;
|
||||||
|
|
|
@ -47,12 +47,12 @@ static int net_ctl_permissions(struct ctl_table_header *head,
|
||||||
|
|
||||||
/* Allow network administrator to have same access as root. */
|
/* Allow network administrator to have same access as root. */
|
||||||
if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
|
if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
|
||||||
uid_eq(root_uid, current_uid())) {
|
uid_eq(root_uid, current_euid())) {
|
||||||
int mode = (table->mode >> 6) & 7;
|
int mode = (table->mode >> 6) & 7;
|
||||||
return (mode << 6) | (mode << 3) | mode;
|
return (mode << 6) | (mode << 3) | mode;
|
||||||
}
|
}
|
||||||
/* Allow netns root group to have the same access as the root group */
|
/* Allow netns root group to have the same access as the root group */
|
||||||
if (gid_eq(root_gid, current_gid())) {
|
if (in_egroup_p(root_gid)) {
|
||||||
int mode = (table->mode >> 3) & 7;
|
int mode = (table->mode >> 3) & 7;
|
||||||
return (mode << 3) | mode;
|
return (mode << 3) | mode;
|
||||||
}
|
}
|
||||||
|
|
|
@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
|
||||||
rep->udiag_family = AF_UNIX;
|
rep->udiag_family = AF_UNIX;
|
||||||
rep->udiag_type = sk->sk_type;
|
rep->udiag_type = sk->sk_type;
|
||||||
rep->udiag_state = sk->sk_state;
|
rep->udiag_state = sk->sk_state;
|
||||||
|
rep->pad = 0;
|
||||||
rep->udiag_ino = sk_ino;
|
rep->udiag_ino = sk_ino;
|
||||||
sock_diag_save_cookie(sk, rep->udiag_cookie);
|
sock_diag_save_cookie(sk, rep->udiag_cookie);
|
||||||
|
|
||||||
|
|
|
@ -566,18 +566,13 @@ int wiphy_register(struct wiphy *wiphy)
|
||||||
/* check and set up bitrates */
|
/* check and set up bitrates */
|
||||||
ieee80211_set_bitrate_flags(wiphy);
|
ieee80211_set_bitrate_flags(wiphy);
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
res = device_add(&rdev->wiphy.dev);
|
res = device_add(&rdev->wiphy.dev);
|
||||||
if (res)
|
|
||||||
return res;
|
|
||||||
|
|
||||||
res = rfkill_register(rdev->rfkill);
|
|
||||||
if (res) {
|
if (res) {
|
||||||
device_del(&rdev->wiphy.dev);
|
rtnl_unlock();
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
/* set up regulatory info */
|
/* set up regulatory info */
|
||||||
wiphy_regulatory_register(wiphy);
|
wiphy_regulatory_register(wiphy);
|
||||||
|
|
||||||
|
@ -606,6 +601,15 @@ int wiphy_register(struct wiphy *wiphy)
|
||||||
|
|
||||||
rdev->wiphy.registered = true;
|
rdev->wiphy.registered = true;
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
|
res = rfkill_register(rdev->rfkill);
|
||||||
|
if (res) {
|
||||||
|
rfkill_destroy(rdev->rfkill);
|
||||||
|
rdev->rfkill = NULL;
|
||||||
|
wiphy_unregister(&rdev->wiphy);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(wiphy_register);
|
EXPORT_SYMBOL(wiphy_register);
|
||||||
|
@ -640,7 +644,8 @@ void wiphy_unregister(struct wiphy *wiphy)
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
__count == 0; }));
|
__count == 0; }));
|
||||||
|
|
||||||
rfkill_unregister(rdev->rfkill);
|
if (rdev->rfkill)
|
||||||
|
rfkill_unregister(rdev->rfkill);
|
||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
rdev->wiphy.registered = false;
|
rdev->wiphy.registered = false;
|
||||||
|
|
|
@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
|
||||||
if (chan->flags & IEEE80211_CHAN_DISABLED)
|
if (chan->flags & IEEE80211_CHAN_DISABLED)
|
||||||
continue;
|
continue;
|
||||||
wdev->wext.ibss.chandef.chan = chan;
|
wdev->wext.ibss.chandef.chan = chan;
|
||||||
|
wdev->wext.ibss.chandef.center_freq1 =
|
||||||
|
chan->center_freq;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
|
||||||
if (chan) {
|
if (chan) {
|
||||||
wdev->wext.ibss.chandef.chan = chan;
|
wdev->wext.ibss.chandef.chan = chan;
|
||||||
wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
|
wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
|
||||||
|
wdev->wext.ibss.chandef.center_freq1 = freq;
|
||||||
wdev->wext.ibss.channel_fixed = true;
|
wdev->wext.ibss.channel_fixed = true;
|
||||||
} else {
|
} else {
|
||||||
/* cfg80211_ibss_wext_join will pick one if needed */
|
/* cfg80211_ibss_wext_join will pick one if needed */
|
||||||
|
|
|
@ -2421,7 +2421,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
|
||||||
change = true;
|
change = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) &&
|
if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
|
||||||
!(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
|
!(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
@ -2483,7 +2483,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
|
||||||
info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
|
info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
|
||||||
&flags);
|
&flags);
|
||||||
|
|
||||||
if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) &&
|
if (!err && (flags & MONITOR_FLAG_ACTIVE) &&
|
||||||
!(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
|
!(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue