diff options
Diffstat (limited to 'drivers')
52 files changed, 1527 insertions, 637 deletions
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index fa7d701933ba..93eaf8d94492 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2614,27 +2614,7 @@ static struct pci_driver lanai_driver = { .probe = lanai_init_one, }; -static int __init lanai_module_init(void) -{ - int x; - - x = pci_register_driver(&lanai_driver); - if (x != 0) - printk(KERN_ERR DEV_LABEL ": no adapter found\n"); - return x; -} - -static void __exit lanai_module_exit(void) -{ - /* We'll only get called when all the interfaces are already - * gone, so there isn't much to do - */ - DPRINTK("cleanup_module()\n"); - pci_unregister_driver(&lanai_driver); -} - -module_init(lanai_module_init); -module_exit(lanai_module_exit); +module_pci_driver(lanai_driver); MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>"); MODULE_DESCRIPTION("Efficient Networks Speedstream 3010 driver"); diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index fd6d28f3fc36..1cc6ca8bfbda 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c @@ -506,7 +506,10 @@ static void send_message(capidrv_contr *card, _cmsg *cmsg) struct sk_buff *skb; size_t len; - capi_cmsg2message(cmsg, cmsg->buf); + if (capi_cmsg2message(cmsg, cmsg->buf)) { + printk(KERN_ERR "capidrv::send_message: parser failure\n"); + return; + } len = CAPIMSG_LEN(cmsg->buf); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { @@ -1578,7 +1581,12 @@ static _cmsg s_cmsg; static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb) { - capi_message2cmsg(&s_cmsg, skb->data); + if (capi_message2cmsg(&s_cmsg, skb->data)) { + printk(KERN_ERR "capidrv: applid=%d: received invalid message\n", + ap->applid); + kfree_skb(skb); + return; + } if (debugmode > 3) { _cdebbuf *cdb = capi_cmsg2str(&s_cmsg); @@ -1903,7 +1911,11 @@ static int capidrv_command(isdn_ctrl *c, capidrv_contr *card) NULL, /* Useruserdata */ NULL /* Facilitydataarray */ ); - capi_cmsg2message(&cmdcmsg, cmdcmsg.buf); + if (capi_cmsg2message(&cmdcmsg, cmdcmsg.buf)) { + printk(KERN_ERR "capidrv-%d: capidrv_command: parser failure\n", + card->contrnr); + return -EINVAL; + } plci_change_state(card, bchan->plcip, EV_PLCI_CONNECT_RESP); send_message(card, &cmdcmsg); return 0; @@ -2090,7 +2102,11 @@ static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb) if (capidrv_add_ack(nccip, datahandle, doack ? (int)skb->len : -1) < 0) return 0; - capi_cmsg2message(&sendcmsg, sendcmsg.buf); + if (capi_cmsg2message(&sendcmsg, sendcmsg.buf)) { + printk(KERN_ERR "capidrv-%d: if_sendbuf: parser failure\n", + card->contrnr); + return -EINVAL; + } msglen = CAPIMSG_LEN(sendcmsg.buf); if (skb_headroom(skb) < msglen) { struct sk_buff *nskb = skb_realloc_headroom(skb, msglen); diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c index 4073d1684d07..36c1b37cea0a 100644 --- a/drivers/isdn/capi/capiutil.c +++ b/drivers/isdn/capi/capiutil.c @@ -207,9 +207,24 @@ static unsigned command_2_index(unsigned c, unsigned sc) c = 0x9 + (c & 0x0f); else if (c == 0x41) c = 0x9 + 0x1; + if (c > 0x18) + c = 0x00; return (sc & 3) * (0x9 + 0x9) + c; } +/** + * capi_cmd2par() - find parameter string for CAPI 2.0 command/subcommand + * @cmd: command number + * @subcmd: subcommand number + * + * Return value: static string, NULL if command/subcommand unknown + */ + +static unsigned char *capi_cmd2par(u8 cmd, u8 subcmd) +{ + return cpars[command_2_index(cmd, subcmd)]; +} + /*-------------------------------------------------------*/ #define TYP (cdef[cmsg->par[cmsg->p]].typ) #define OFF (((u8 *)cmsg) + cdef[cmsg->par[cmsg->p]].off) @@ -302,7 +317,9 @@ unsigned capi_cmsg2message(_cmsg *cmsg, u8 *msg) cmsg->m = msg; cmsg->l = 8; cmsg->p = 0; - cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)]; + cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand); + if (!cmsg->par) + return 1; /* invalid command/subcommand */ pars_2_message(cmsg); @@ -375,7 +392,9 @@ unsigned capi_message2cmsg(_cmsg *cmsg, u8 *msg) cmsg->p = 0; byteTRcpy(cmsg->m + 4, &cmsg->Command); byteTRcpy(cmsg->m + 5, &cmsg->Subcommand); - cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)]; + cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand); + if (!cmsg->par) + return 1; /* invalid command/subcommand */ message_2_pars(cmsg); @@ -470,12 +489,17 @@ static char *mnames[] = * @cmd: command number * @subcmd: subcommand number * - * Return value: static string, NULL if command/subcommand unknown + * Return value: static string */ char *capi_cmd2str(u8 cmd, u8 subcmd) { - return mnames[command_2_index(cmd, subcmd)]; + char *result; + + result = mnames[command_2_index(cmd, subcmd)]; + if (result == NULL) + result = "INVALID_COMMAND"; + return result; } @@ -625,6 +649,9 @@ static _cdebbuf *printstruct(_cdebbuf *cdb, u8 *m) static _cdebbuf *protocol_message_2_pars(_cdebbuf *cdb, _cmsg *cmsg, int level) { + if (!cmsg->par) + return NULL; /* invalid command/subcommand */ + for (; TYP != _CEND; cmsg->p++) { int slen = 29 + 3 - level; int i; @@ -759,10 +786,10 @@ _cdebbuf *capi_message2str(u8 *msg) cmsg->p = 0; byteTRcpy(cmsg->m + 4, &cmsg->Command); byteTRcpy(cmsg->m + 5, &cmsg->Subcommand); - cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)]; + cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand); cdb = bufprint(cdb, "%-26s ID=%03d #0x%04x LEN=%04d\n", - mnames[command_2_index(cmsg->Command, cmsg->Subcommand)], + capi_cmd2str(cmsg->Command, cmsg->Subcommand), ((unsigned short *) msg)[1], ((unsigned short *) msg)[3], ((unsigned short *) msg)[0]); @@ -796,7 +823,7 @@ _cdebbuf *capi_cmsg2str(_cmsg *cmsg) cmsg->l = 8; cmsg->p = 0; cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n", - mnames[command_2_index(cmsg->Command, cmsg->Subcommand)], + capi_cmd2str(cmsg->Command, cmsg->Subcommand), ((u16 *) cmsg->m)[1], ((u16 *) cmsg->m)[3], ((u16 *) cmsg->m)[0]); diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index c123709acf82..823f6985b260 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -1184,7 +1184,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data) * Return value: CAPI result code */ -int capi20_manufacturer(unsigned int cmd, void __user *data) +int capi20_manufacturer(unsigned long cmd, void __user *data) { struct capi_ctr *ctr; int retval; @@ -1259,7 +1259,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data) } default: - printk(KERN_ERR "kcapi: manufacturer command %d unknown.\n", + printk(KERN_ERR "kcapi: manufacturer command %lu unknown.\n", cmd); break; diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index 3286903a95d2..ccec7778cad2 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -250,6 +250,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag, l -= 12; if (l <= 0) return; + if (l > 64) + l = 64; /* arbitrary limit */ dbgline = kmalloc(3 * l, GFP_ATOMIC); if (!dbgline) return; @@ -645,7 +647,13 @@ int gigaset_isdn_icall(struct at_state_t *at_state) __func__); break; } - capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); + if (capi_cmsg2message(&iif->hcmsg, + __skb_put(skb, msgsize))) { + dev_err(cs->dev, "%s: message parser failure\n", + __func__); + dev_kfree_skb_any(skb); + break; + } dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); /* add to listeners on this B channel, update state */ @@ -691,7 +699,12 @@ static void send_disconnect_ind(struct bc_state *bcs, dev_err(cs->dev, "%s: out of memory\n", __func__); return; } - capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_IND_LEN)); + if (capi_cmsg2message(&iif->hcmsg, + __skb_put(skb, CAPI_DISCONNECT_IND_LEN))) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } @@ -721,8 +734,12 @@ static void send_disconnect_b3_ind(struct bc_state *bcs, dev_err(cs->dev, "%s: out of memory\n", __func__); return; } - capi_cmsg2message(&iif->hcmsg, - __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN)); + if (capi_cmsg2message(&iif->hcmsg, + __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN))) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } @@ -787,7 +804,11 @@ void gigaset_isdn_connD(struct bc_state *bcs) dev_err(cs->dev, "%s: out of memory\n", __func__); return; } - capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); + if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } @@ -887,7 +908,11 @@ void gigaset_isdn_connB(struct bc_state *bcs) dev_err(cs->dev, "%s: out of memory\n", __func__); return; } - capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); + if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } @@ -1094,13 +1119,19 @@ static void send_conf(struct gigaset_capi_ctr *iif, struct sk_buff *skb, u16 info) { + struct cardstate *cs = iif->ctr.driverdata; + /* * _CONF replies always only have NCCI and Info parameters * so they'll fit into the _REQ message skb */ capi_cmsg_answer(&iif->acmsg); iif->acmsg.Info = info; - capi_cmsg2message(&iif->acmsg, skb->data); + if (capi_cmsg2message(&iif->acmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } __skb_trim(skb, CAPI_STDCONF_LEN); dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); @@ -1122,7 +1153,11 @@ static void do_facility_req(struct gigaset_capi_ctr *iif, static u8 confparam[10]; /* max. 9 octets + length byte */ /* decode message */ - capi_message2cmsg(cmsg, skb->data); + if (capi_message2cmsg(cmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, cmsg); /* @@ -1180,6 +1215,7 @@ static void do_facility_req(struct gigaset_capi_ctr *iif, confparam[3] = 2; /* length */ capimsg_setu16(confparam, 4, CapiSupplementaryServiceNotSupported); + break; } info = CapiSuccess; confparam[3] = 2; /* length */ @@ -1220,6 +1256,7 @@ static void do_facility_req(struct gigaset_capi_ctr *iif, } /* send FACILITY_CONF with given Info and confirmation parameter */ + dev_kfree_skb_any(skb); capi_cmsg_answer(cmsg); cmsg->Info = info; cmsg->FacilityConfirmationParameter = confparam; @@ -1229,7 +1266,11 @@ static void do_facility_req(struct gigaset_capi_ctr *iif, dev_err(cs->dev, "%s: out of memory\n", __func__); return; } - capi_cmsg2message(cmsg, __skb_put(cskb, msgsize)); + if (capi_cmsg2message(cmsg, __skb_put(cskb, msgsize))) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(cskb); + return; + } dump_cmsg(DEBUG_CMD, __func__, cmsg); capi_ctr_handle_message(&iif->ctr, ap->id, cskb); } @@ -1243,8 +1284,14 @@ static void do_listen_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { + struct cardstate *cs = iif->ctr.driverdata; + /* decode message */ - capi_message2cmsg(&iif->acmsg, skb->data); + if (capi_message2cmsg(&iif->acmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); /* store listening parameters */ @@ -1261,8 +1308,14 @@ static void do_alert_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { + struct cardstate *cs = iif->ctr.driverdata; + /* decode message */ - capi_message2cmsg(&iif->acmsg, skb->data); + if (capi_message2cmsg(&iif->acmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); send_conf(iif, ap, skb, CapiAlertAlreadySent); } @@ -1287,7 +1340,11 @@ static void do_connect_req(struct gigaset_capi_ctr *iif, u16 info; /* decode message */ - capi_message2cmsg(cmsg, skb->data); + if (capi_message2cmsg(cmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, cmsg); /* get free B channel & construct PLCI */ @@ -1574,7 +1631,11 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif, int channel; /* decode message */ - capi_message2cmsg(cmsg, skb->data); + if (capi_message2cmsg(cmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, cmsg); dev_kfree_skb_any(skb); @@ -1740,7 +1801,11 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif, int channel; /* decode message */ - capi_message2cmsg(cmsg, skb->data); + if (capi_message2cmsg(cmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, cmsg); /* extract and check channel number from PLCI */ @@ -1785,7 +1850,11 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif, u8 command; /* decode message */ - capi_message2cmsg(cmsg, skb->data); + if (capi_message2cmsg(cmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, cmsg); /* extract and check channel number and NCCI */ @@ -1825,7 +1894,11 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif, capi_cmsg_header(cmsg, ap->id, command, CAPI_IND, ap->nextMessageNumber++, cmsg->adr.adrNCCI); __skb_trim(skb, msgsize); - capi_cmsg2message(cmsg, skb->data); + if (capi_cmsg2message(cmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, cmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } @@ -1847,7 +1920,11 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif, int channel; /* decode message */ - capi_message2cmsg(cmsg, skb->data); + if (capi_message2cmsg(cmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, cmsg); /* extract and check channel number from PLCI */ @@ -1903,8 +1980,14 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif, kfree(b3cmsg); return; } - capi_cmsg2message(b3cmsg, - __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN)); + if (capi_cmsg2message(b3cmsg, + __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN))) { + dev_err(cs->dev, "%s: message parser failure\n", + __func__); + kfree(b3cmsg); + dev_kfree_skb_any(b3skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, b3cmsg); kfree(b3cmsg); capi_ctr_handle_message(&iif->ctr, ap->id, b3skb); @@ -1935,7 +2018,11 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif, int channel; /* decode message */ - capi_message2cmsg(cmsg, skb->data); + if (capi_message2cmsg(cmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, cmsg); /* extract and check channel number and NCCI */ @@ -2052,8 +2139,14 @@ static void do_reset_b3_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { + struct cardstate *cs = iif->ctr.driverdata; + /* decode message */ - capi_message2cmsg(&iif->acmsg, skb->data); + if (capi_message2cmsg(&iif->acmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); send_conf(iif, ap, skb, CapiResetProcedureNotSupportedByCurrentProtocol); @@ -2066,8 +2159,14 @@ static void do_unsupported(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { + struct cardstate *cs = iif->ctr.driverdata; + /* decode message */ - capi_message2cmsg(&iif->acmsg, skb->data); + if (capi_message2cmsg(&iif->acmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); } @@ -2079,8 +2178,14 @@ static void do_nothing(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { + struct cardstate *cs = iif->ctr.driverdata; + /* decode message */ - capi_message2cmsg(&iif->acmsg, skb->data); + if (capi_message2cmsg(&iif->acmsg, skb->data)) { + dev_err(cs->dev, "%s: message parser failure\n", __func__); + dev_kfree_skb_any(skb); + return; + } dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); dev_kfree_skb_any(skb); } @@ -2357,7 +2462,7 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) struct gigaset_capi_ctr *iif; int rc; - iif = kmalloc(sizeof(*iif), GFP_KERNEL); + iif = kzalloc(sizeof(*iif), GFP_KERNEL); if (!iif) { pr_err("%s: out of memory\n", __func__); return -ENOMEM; @@ -2366,7 +2471,7 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) /* prepare controller structure */ iif->ctr.owner = THIS_MODULE; iif->ctr.driverdata = cs; - strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name)); + strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name) - 1); iif->ctr.driver_name = "gigaset"; iif->ctr.load_firmware = NULL; iif->ctr.reset_ctr = NULL; diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index dcae14aef376..c8ced12fa452 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c @@ -604,14 +604,14 @@ void gigaset_handle_modem_response(struct cardstate *cs) } EXPORT_SYMBOL_GPL(gigaset_handle_modem_response); -/* disconnect +/* disconnect_nobc * process closing of connection associated with given AT state structure + * without B channel */ -static void disconnect(struct at_state_t **at_state_p) +static void disconnect_nobc(struct at_state_t **at_state_p, + struct cardstate *cs) { unsigned long flags; - struct bc_state *bcs = (*at_state_p)->bcs; - struct cardstate *cs = (*at_state_p)->cs; spin_lock_irqsave(&cs->lock, flags); ++(*at_state_p)->seq_index; @@ -622,23 +622,44 @@ static void disconnect(struct at_state_t **at_state_p) gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE"); cs->commands_pending = 1; } - spin_unlock_irqrestore(&cs->lock, flags); - if (bcs) { - /* B channel assigned: invoke hardware specific handler */ - cs->ops->close_bchannel(bcs); - /* notify LL */ - if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { - bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); - gigaset_isdn_hupD(bcs); - } - } else { - /* no B channel assigned: just deallocate */ - spin_lock_irqsave(&cs->lock, flags); + /* check for and deallocate temporary AT state */ + if (!list_empty(&(*at_state_p)->list)) { list_del(&(*at_state_p)->list); kfree(*at_state_p); *at_state_p = NULL; - spin_unlock_irqrestore(&cs->lock, flags); + } + + spin_unlock_irqrestore(&cs->lock, flags); +} + +/* disconnect_bc + * process closing of connection associated with given AT state structure + * and B channel + */ +static void disconnect_bc(struct at_state_t *at_state, + struct cardstate *cs, struct bc_state *bcs) +{ + unsigned long flags; + + spin_lock_irqsave(&cs->lock, flags); + ++at_state->seq_index; + + /* revert to selected idle mode */ + if (!cs->cidmode) { + cs->at_state.pending_commands |= PC_UMMODE; + gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE"); + cs->commands_pending = 1; + } + spin_unlock_irqrestore(&cs->lock, flags); + + /* invoke hardware specific handler */ + cs->ops->close_bchannel(bcs); + + /* notify LL */ + if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { + bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); + gigaset_isdn_hupD(bcs); } } @@ -646,7 +667,7 @@ static void disconnect(struct at_state_t **at_state_p) * get a free AT state structure: either one of those associated with the * B channels of the Gigaset device, or if none of those is available, * a newly allocated one with bcs=NULL - * The structure should be freed by calling disconnect() after use. + * The structure should be freed by calling disconnect_nobc() after use. */ static inline struct at_state_t *get_free_channel(struct cardstate *cs, int cid) @@ -1057,7 +1078,7 @@ static void do_action(int action, struct cardstate *cs, struct event_t *ev) { struct at_state_t *at_state = *p_at_state; - struct at_state_t *at_state2; + struct bc_state *bcs2; unsigned long flags; int channel; @@ -1156,8 +1177,8 @@ static void do_action(int action, struct cardstate *cs, break; case ACT_RING: /* get fresh AT state structure for new CID */ - at_state2 = get_free_channel(cs, ev->parameter); - if (!at_state2) { + at_state = get_free_channel(cs, ev->parameter); + if (!at_state) { dev_warn(cs->dev, "RING ignored: could not allocate channel structure\n"); break; @@ -1166,16 +1187,16 @@ static void do_action(int action, struct cardstate *cs, /* initialize AT state structure * note that bcs may be NULL if no B channel is free */ - at_state2->ConState = 700; + at_state->ConState = 700; for (i = 0; i < STR_NUM; ++i) { - kfree(at_state2->str_var[i]); - at_state2->str_var[i] = NULL; + kfree(at_state->str_var[i]); + at_state->str_var[i] = NULL; } - at_state2->int_var[VAR_ZCTP] = -1; + at_state->int_var[VAR_ZCTP] = -1; spin_lock_irqsave(&cs->lock, flags); - at_state2->timer_expires = RING_TIMEOUT; - at_state2->timer_active = 1; + at_state->timer_expires = RING_TIMEOUT; + at_state->timer_active = 1; spin_unlock_irqrestore(&cs->lock, flags); break; case ACT_ICALL: @@ -1213,14 +1234,17 @@ static void do_action(int action, struct cardstate *cs, case ACT_DISCONNECT: cs->cur_at_seq = SEQ_NONE; at_state->cid = -1; - if (bcs && cs->onechannel && cs->dle) { + if (!bcs) { + disconnect_nobc(p_at_state, cs); + } else if (cs->onechannel && cs->dle) { /* Check for other open channels not needed: * DLE only used for M10x with one B channel. */ at_state->pending_commands |= PC_DLE0; cs->commands_pending = 1; - } else - disconnect(p_at_state); + } else { + disconnect_bc(at_state, cs, bcs); + } break; case ACT_FAKEDLE0: at_state->int_var[VAR_ZDLE] = 0; @@ -1228,25 +1252,27 @@ static void do_action(int action, struct cardstate *cs, /* fall through */ case ACT_DLE0: cs->cur_at_seq = SEQ_NONE; - at_state2 = &cs->bcs[cs->curchannel].at_state; - disconnect(&at_state2); + bcs2 = cs->bcs + cs->curchannel; + disconnect_bc(&bcs2->at_state, cs, bcs2); break; case ACT_ABORTHUP: cs->cur_at_seq = SEQ_NONE; dev_warn(cs->dev, "Could not hang up.\n"); at_state->cid = -1; - if (bcs && cs->onechannel) + if (!bcs) + disconnect_nobc(p_at_state, cs); + else if (cs->onechannel) at_state->pending_commands |= PC_DLE0; else - disconnect(p_at_state); + disconnect_bc(at_state, cs, bcs); schedule_init(cs, MS_RECOVER); break; case ACT_FAILDLE0: cs->cur_at_seq = SEQ_NONE; dev_warn(cs->dev, "Error leaving DLE mode.\n"); cs->dle = 0; - at_state2 = &cs->bcs[cs->curchannel].at_state; - disconnect(&at_state2); + bcs2 = cs->bcs + cs->curchannel; + disconnect_bc(&bcs2->at_state, cs, bcs2); schedule_init(cs, MS_RECOVER); break; case ACT_FAILDLE1: @@ -1275,14 +1301,14 @@ static void do_action(int action, struct cardstate *cs, if (reinit_and_retry(cs, channel) < 0) { dev_warn(cs->dev, "Could not get a call ID. Cannot dial.\n"); - at_state2 = &cs->bcs[channel].at_state; - disconnect(&at_state2); + bcs2 = cs->bcs + channel; + disconnect_bc(&bcs2->at_state, cs, bcs2); } break; case ACT_ABORTCID: cs->cur_at_seq = SEQ_NONE; - at_state2 = &cs->bcs[cs->curchannel].at_state; - disconnect(&at_state2); + bcs2 = cs->bcs + cs->curchannel; + disconnect_bc(&bcs2->at_state, cs, bcs2); break; case ACT_DIALING: @@ -1291,7 +1317,10 @@ static void do_action(int action, struct cardstate *cs, break; case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */ - disconnect(p_at_state); + if (bcs) + disconnect_bc(at_state, cs, bcs); + else + disconnect_nobc(p_at_state, cs); break; case ACT_ABORTDIAL: /* error/timeout during dial preparation */ @@ -1380,6 +1409,11 @@ static void do_action(int action, struct cardstate *cs, /* events from the LL */ case ACT_DIAL: + if (!ev->ptr) { + *p_genresp = 1; + *p_resp_code = RSP_ERROR; + break; + } start_dial(at_state, ev->ptr, ev->parameter); break; case ACT_ACCEPT: diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 82e91ba1acd3..a8e652dac54d 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -497,6 +497,7 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) { unsigned long flags; + int len; gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? DEBUG_TRANSCMD : DEBUG_LOCKCMD, @@ -515,10 +516,11 @@ static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) spin_unlock_irqrestore(&cs->cmdlock, flags); spin_lock_irqsave(&cs->lock, flags); + len = cb->len; if (cs->connected) tasklet_schedule(&cs->write_tasklet); spin_unlock_irqrestore(&cs->lock, flags); - return cb->len; + return len; } static int gigaset_write_room(struct cardstate *cs) diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 776e965dc9f4..05b0ca3bf71d 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -21,8 +21,12 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) { - return mdiobus_read(to_mii_bus(ds->master_dev), - ds->pd->sw_addr + addr, reg); + struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); + + if (bus == NULL) + return -EINVAL; + + return mdiobus_read(bus, ds->pd->sw_addr + addr, reg); } #define REG_READ(addr, reg) \ @@ -38,8 +42,12 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) { - return mdiobus_write(to_mii_bus(ds->master_dev), - ds->pd->sw_addr + addr, reg, val); + struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); + + if (bus == NULL) + return -EINVAL; + + return mdiobus_write(bus, ds->pd->sw_addr + addr, reg, val); } #define REG_WRITE(addr, reg, val) \ diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 6365e30138af..1020a7af67cf 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -206,7 +206,7 @@ static int mv88e6171_setup_port(struct dsa_switch *ds, int p) */ val = 0x0433; if (dsa_is_cpu_port(ds, p)) { - if (ds->dst->tag_protocol == htons(ETH_P_EDSA)) + if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) val |= 0x3300; else val |= 0x0100; diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index d6f6428b27dc..a6c90cf5634d 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -75,11 +75,14 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); int ret; + if (bus == NULL) + return -EINVAL; + mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_reg_read(to_mii_bus(ds->master_dev), - ds->pd->sw_addr, addr, reg); + ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg); mutex_unlock(&ps->smi_mutex); return ret; @@ -119,11 +122,14 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); int ret; + if (bus == NULL) + return -EINVAL; + mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_reg_write(to_mii_bus(ds->master_dev), - ds->pd->sw_addr, addr, reg, val); + ret = __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val); mutex_unlock(&ps->smi_mutex); return ret; diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile index 589b35247713..68be565548c0 100644 --- a/drivers/net/ethernet/apm/xgene/Makefile +++ b/drivers/net/ethernet/apm/xgene/Makefile @@ -2,6 +2,6 @@ # Makefile for APM X-Gene Ethernet Driver. # -xgene-enet-objs := xgene_enet_hw.o xgene_enet_xgmac.o \ +xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \ xgene_enet_main.o xgene_enet_ethtool.o obj-$(CONFIG_NET_XGENE) += xgene-enet.o diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c index c1c997b92342..416d6ebfc2ce 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -64,16 +64,25 @@ static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) return -ENODEV; return phy_ethtool_gset(phydev, cmd); + } else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { + cmd->supported = SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_MII; + cmd->advertising = cmd->supported; + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_MII; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE; + cmd->advertising = cmd->supported; + ethtool_cmd_speed_set(cmd, SPEED_10000); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_DISABLE; } - cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE; - cmd->advertising = cmd->supported; - ethtool_cmd_speed_set(cmd, SPEED_10000); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_FIBRE; - cmd->transceiver = XCVR_EXTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - return 0; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index c8f3824f7606..63ea1941e973 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -410,7 +410,6 @@ static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); - addr1 |= pdata->phy_addr & 0xFFFF; xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 15ec4267779c..38558584080e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -44,6 +44,7 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) enum xgene_enet_rm { RM0, + RM1, RM3 = 3 }; @@ -143,6 +144,8 @@ enum xgene_enet_rm { #define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) #define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) #define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16) +#define CFG_CLE_DSTQID0(val) (val & GENMASK(11, 0)) +#define CFG_CLE_FPSEL0(val) ((val << 16) & GENMASK(19, 16)) #define ICM_CONFIG0_REG_0_ADDR 0x0400 #define ICM_CONFIG2_REG_0_ADDR 0x0410 #define RX_DV_GATE_REG_0_ADDR 0x05fc @@ -179,7 +182,6 @@ enum xgene_enet_rm { #define TUND_ADDR 0x4a #define TSO_IPPROTO_TCP 1 -#define FULL_DUPLEX 2 #define USERINFO_POS 0 #define USERINFO_LEN 32 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 9b85239ceedf..3c208cc6f6bb 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -21,6 +21,7 @@ #include "xgene_enet_main.h" #include "xgene_enet_hw.h" +#include "xgene_enet_sgmac.h" #include "xgene_enet_xgmac.h" static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) @@ -813,6 +814,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) return pdata->phy_mode; } if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && + pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { dev_err(dev, "Incorrect phy-connection-type specified\n"); return -ENODEV; @@ -830,14 +832,13 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || + pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; - pdata->rm = RM3; } else { pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; - pdata->rm = RM0; } pdata->rx_buff_cnt = NUM_PKT_BUF; @@ -881,10 +882,17 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) case PHY_INTERFACE_MODE_RGMII: pdata->mac_ops = &xgene_gmac_ops; pdata->port_ops = &xgene_gport_ops; + pdata->rm = RM3; + break; + case PHY_INTERFACE_MODE_SGMII: + pdata->mac_ops = &xgene_sgmac_ops; + pdata->port_ops = &xgene_sgport_ops; + pdata->rm = RM1; break; default: pdata->mac_ops = &xgene_xgmac_ops; pdata->port_ops = &xgene_xgport_ops; + pdata->rm = RM0; break; } } @@ -895,6 +903,7 @@ static int xgene_enet_probe(struct platform_device *pdev) struct xgene_enet_pdata *pdata; struct device *dev = &pdev->dev; struct napi_struct *napi; + struct xgene_mac_ops *mac_ops; int ret; ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); @@ -937,10 +946,11 @@ static int xgene_enet_probe(struct platform_device *pdev) napi = &pdata->rx_ring->napi; netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); + mac_ops = pdata->mac_ops; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) ret = xgene_enet_mdio_config(pdata); else - INIT_DELAYED_WORK(&pdata->link_work, xgene_enet_link_state); + INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); return ret; err: diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 86cf68b65584..874e5a01161f 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -39,6 +39,9 @@ #define NUM_PKT_BUF 64 #define NUM_BUFPOOL 32 +#define PHY_POLL_LINK_ON (10 * HZ) +#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) + /* software context of a descriptor ring */ struct xgene_enet_desc_ring { struct net_device *ndev; @@ -76,6 +79,7 @@ struct xgene_mac_ops { void (*tx_disable)(struct xgene_enet_pdata *pdata); void (*rx_disable)(struct xgene_enet_pdata *pdata); void (*set_mac_addr)(struct xgene_enet_pdata *pdata); + void (*link_state)(struct work_struct *work); }; struct xgene_port_ops { @@ -109,7 +113,6 @@ struct xgene_enet_pdata { void __iomem *base_addr; void __iomem *ring_csr_addr; void __iomem *ring_cmd_addr; - u32 phy_addr; int phy_mode; enum xgene_enet_rm rm; struct rtnl_link_stats64 stats; @@ -118,6 +121,13 @@ struct xgene_enet_pdata { struct delayed_work link_work; }; +struct xgene_indirect_ctl { + void __iomem *addr; + void __iomem *ctl; + void __iomem *cmd; + void __iomem *cmd_done; +}; + /* Set the specified value into a bit-field defined by its starting position * and length within a single u64. */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c new file mode 100644 index 000000000000..e6d24c210198 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -0,0 +1,389 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian <isubramanian@apm.com> + * Keyur Chudgar <kchudgar@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" +#include "xgene_enet_sgmac.h" + +static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val) +{ + iowrite32(val, p->eth_csr_addr + offset); +} + +static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p, + u32 offset, u32 val) +{ + iowrite32(val, p->eth_ring_if_addr + offset); +} + +static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p, + u32 offset, u32 val) +{ + iowrite32(val, p->eth_diag_csr_addr + offset); +} + +static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl, + u32 wr_addr, u32 wr_data) +{ + int i; + + iowrite32(wr_addr, ctl->addr); + iowrite32(wr_data, ctl->ctl); + iowrite32(XGENE_ENET_WR_CMD, ctl->cmd); + + /* wait for write command to complete */ + for (i = 0; i < 10; i++) { + if (ioread32(ctl->cmd_done)) { + iowrite32(0, ctl->cmd); + return true; + } + udelay(1); + } + + return false; +} + +static void xgene_enet_wr_mac(struct xgene_enet_pdata *p, + u32 wr_addr, u32 wr_data) +{ + struct xgene_indirect_ctl ctl = { + .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET, + .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET, + .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET, + .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET + }; + + if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data)) + netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr); +} + +static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset) +{ + return ioread32(p->eth_csr_addr + offset); +} + +static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset) +{ + return ioread32(p->eth_diag_csr_addr + offset); +} + +static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr) +{ + u32 rd_data; + int i; + + iowrite32(rd_addr, ctl->addr); + iowrite32(XGENE_ENET_RD_CMD, ctl->cmd); + + /* wait for read command to complete */ + for (i = 0; i < 10; i++) { + if (ioread32(ctl->cmd_done)) { + rd_data = ioread32(ctl->ctl); + iowrite32(0, ctl->cmd); + + return rd_data; + } + udelay(1); + } + + pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr); + + return 0; +} + +static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr) +{ + struct xgene_indirect_ctl ctl = { + .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET, + .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET, + .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET, + .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET + }; + + return xgene_enet_rd_indirect(&ctl, rd_addr); +} + +static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) +{ + struct net_device *ndev = p->ndev; + u32 data; + int i; + + xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); + for (i = 0; i < 10 && data != ~0U ; i++) { + usleep_range(100, 110); + data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); + } + + if (data != ~0U) { + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; + } + + return 0; +} + +static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) +{ + u32 val = 0xffffffff; + + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val); + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val); +} + +static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id, + u32 reg, u16 data) +{ + u32 addr, wr_data, done; + int i; + + addr = PHY_ADDR(phy_id) | REG_ADDR(reg); + xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr); + + wr_data = PHY_CONTROL(data); + xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data); + + for (i = 0; i < 10; i++) { + done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR); + if (!(done & BUSY_MASK)) + return; + usleep_range(10, 20); + } + + netdev_err(p->ndev, "MII_MGMT write failed\n"); +} + +static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg) +{ + u32 addr, data, done; + int i; + + addr = PHY_ADDR(phy_id) | REG_ADDR(reg); + xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr); + xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); + + for (i = 0; i < 10; i++) { + done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR); + if (!(done & BUSY_MASK)) { + data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR); + xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0); + + return data; + } + usleep_range(10, 20); + } + + netdev_err(p->ndev, "MII_MGMT read failed\n"); + + return 0; +} + +static void xgene_sgmac_reset(struct xgene_enet_pdata *p) +{ + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1); + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0); +} + +static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p) +{ + u32 addr0, addr1; + u8 *dev_addr = p->ndev->dev_addr; + + addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0); + + addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR); + addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16); + xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1); +} + +static u32 xgene_enet_link_status(struct xgene_enet_pdata *p) +{ + u32 data; + + data = xgene_mii_phy_read(p, INT_PHY_ADDR, + SGMII_BASE_PAGE_ABILITY_ADDR >> 2); + + return data & LINK_UP; +} + +static void xgene_sgmac_init(struct xgene_enet_pdata *p) +{ + u32 data, loop = 10; + + xgene_sgmac_reset(p); + + /* Enable auto-negotiation */ + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000); + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0); + + while (loop--) { + data = xgene_mii_phy_read(p, INT_PHY_ADDR, + SGMII_STATUS_ADDR >> 2); + if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS)) + break; + usleep_range(10, 20); + } + if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS)) + netdev_err(p->ndev, "Auto-negotiation failed\n"); + + data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR); + ENET_INTERFACE_MODE2_SET(&data, 2); + xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2); + xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE); + + data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR); + data |= MPA_IDLE_WITH_QMI_EMPTY; + xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data); + + xgene_sgmac_set_mac_addr(p); + + data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR); + data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; + xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data); + + /* Adjust MDC clock frequency */ + data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR); + MGMT_CLOCK_SEL_SET(&data, 7); + xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data); + + /* Enable drop if bufpool not available */ + data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR); + data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; + xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data); + + /* Rtype should be copied from FP */ + xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0); + + /* Bypass traffic gating */ + xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); + xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX); + xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0); +} + +static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) +{ + u32 data; + + data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR); + + if (set) + data |= bits; + else + data &= ~bits; + + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data); +} + +static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, RX_EN, true); +} + +static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, TX_EN, true); +} + +static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, RX_EN, false); +} + +static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, TX_EN, false); +} + +static void xgene_enet_reset(struct xgene_enet_pdata *p) +{ + clk_prepare_enable(p->clk); + clk_disable_unprepare(p->clk); + clk_prepare_enable(p->clk); + + xgene_enet_ecc_init(p); + xgene_enet_config_ring_if_assoc(p); +} + +static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, + u32 dst_ring_num, u16 bufpool_id) +{ + u32 data, fpsel; + + data = CFG_CLE_BYPASS_EN0; + xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data); + + fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel); + xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data); +} + +static void xgene_enet_shutdown(struct xgene_enet_pdata *p) +{ + clk_disable_unprepare(p->clk); +} + +static void xgene_enet_link_state(struct work_struct *work) +{ + struct xgene_enet_pdata *p = container_of(to_delayed_work(work), + struct xgene_enet_pdata, link_work); + struct net_device *ndev = p->ndev; + u32 link, poll_interval; + + link = xgene_enet_link_status(p); + if (link) { + if (!netif_carrier_ok(ndev)) { + netif_carrier_on(ndev); + xgene_sgmac_init(p); + xgene_sgmac_rx_enable(p); + xgene_sgmac_tx_enable(p); + netdev_info(ndev, "Link is Up - 1Gbps\n"); + } + poll_interval = PHY_POLL_LINK_ON; + } else { + if (netif_carrier_ok(ndev)) { + xgene_sgmac_rx_disable(p); + xgene_sgmac_tx_disable(p); + netif_carrier_off(ndev); + netdev_info(ndev, "Link is Down\n"); + } + poll_interval = PHY_POLL_LINK_OFF; + } + + schedule_delayed_work(&p->link_work, poll_interval); +} + +struct xgene_mac_ops xgene_sgmac_ops = { + .init = xgene_sgmac_init, + .reset = xgene_sgmac_reset, + .rx_enable = xgene_sgmac_rx_enable, + .tx_enable = xgene_sgmac_tx_enable, + .rx_disable = xgene_sgmac_rx_disable, + .tx_disable = xgene_sgmac_tx_disable, + .set_mac_addr = xgene_sgmac_set_mac_addr, + .link_state = xgene_enet_link_state +}; + +struct xgene_port_ops xgene_sgport_ops = { + .reset = xgene_enet_reset, + .cle_bypass = xgene_enet_cle_bypass, + .shutdown = xgene_enet_shutdown +}; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h new file mode 100644 index 000000000000..de432465009c --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h @@ -0,0 +1,41 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian <isubramanian@apm.com> + * Keyur Chudgar <kchudgar@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __XGENE_ENET_SGMAC_H__ +#define __XGENE_ENET_SGMAC_H__ + +#define PHY_ADDR(src) (((src)<<8) & GENMASK(12, 8)) +#define REG_ADDR(src) ((src) & GENMASK(4, 0)) +#define PHY_CONTROL(src) ((src) & GENMASK(15, 0)) +#define INT_PHY_ADDR 0x1e +#define SGMII_TBI_CONTROL_ADDR 0x44 +#define SGMII_CONTROL_ADDR 0x00 +#define SGMII_STATUS_ADDR 0x04 +#define SGMII_BASE_PAGE_ABILITY_ADDR 0x14 +#define AUTO_NEG_COMPLETE BIT(5) +#define LINK_STATUS BIT(2) +#define LINK_UP BIT(15) +#define MPA_IDLE_WITH_QMI_EMPTY BIT(12) +#define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc + +extern struct xgene_mac_ops xgene_sgmac_ops; +extern struct xgene_port_ops xgene_sgport_ops; + +#endif /* __XGENE_ENET_SGMAC_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index cd64b9f18b58..67d07206b3c7 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -284,7 +284,7 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) clk_disable_unprepare(pdata->clk); } -void xgene_enet_link_state(struct work_struct *work) +static void xgene_enet_link_state(struct work_struct *work) { struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work), struct xgene_enet_pdata, link_work); @@ -322,6 +322,7 @@ struct xgene_mac_ops xgene_xgmac_ops = { .rx_disable = xgene_xgmac_rx_disable, .tx_disable = xgene_xgmac_tx_disable, .set_mac_addr = xgene_xgmac_set_mac_addr, + .link_state = xgene_enet_link_state }; struct xgene_port_ops xgene_xgport_ops = { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index d2d59e7ed9ab..5a5296a6d1df 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -47,10 +47,6 @@ #define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 #define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 -#define PHY_POLL_LINK_ON (10 * HZ) -#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) - -void xgene_enet_link_state(struct work_struct *work); extern struct xgene_mac_ops xgene_xgmac_ops; extern struct xgene_port_ops xgene_xgport_ops; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ba499489969a..dbb41c1923e6 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8099,9 +8099,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Sync BD data before updating mailbox */ wmb(); - /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox(tnapi->prodmbox, entry); - tnapi->tx_prod = entry; if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { netif_tx_stop_queue(txq); @@ -8116,7 +8113,12 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_wake_queue(txq); } - mmiowb(); + if (!skb->xmit_more || netif_xmit_stopped(txq)) { + /* Packets are ready, update Tx producer idx on card. */ + tw32_tx_mbox(tnapi->prodmbox, entry); + mmiowb(); + } + return NETDEV_TX_OK; dma_error: diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 153cafac323c..c3861de9dc81 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -552,6 +552,7 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, len = (vec == nvecs) ? last_fraglen : unmap->vector.len; + skb->truesize += unmap->vector.len; totlen += len; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, @@ -563,7 +564,6 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, skb->len += totlen; skb->data_len += totlen; - skb->truesize += totlen; } static inline void diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index c3ce9df0041a..ac6473f75eb9 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -68,7 +68,7 @@ config CHELSIO_T3 config CHELSIO_T4 tristate "Chelsio Communications T4/T5 Ethernet support" - depends on PCI + depends on PCI && (IPV6 || IPV6=n) select FW_LOADER select MDIO ---help--- diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 410ed5805a9a..3c481b260745 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -986,6 +986,8 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, int t4_seeprom_wp(struct adapter *adapter, bool enable); int get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); +int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, + const u8 *fw_data, unsigned int size, int force); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_get_fw_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5b38e955af6e..3f60070f2519 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2929,16 +2929,26 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) int ret; const struct firmware *fw; struct adapter *adap = netdev2adap(netdev); + unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1; ef->data[sizeof(ef->data) - 1] = '\0'; ret = request_firmware(&fw, ef->data, adap->pdev_dev); if (ret < 0) return ret; - ret = t4_load_fw(adap, fw->data, fw->size); + /* If the adapter has been fully initialized then we'll go ahead and + * try to get the firmware's cooperation in upgrading to the new + * firmware image otherwise we'll try to do the entire job from the + * host ... and we always "force" the operation in this path. + */ + if (adap->flags & FULL_INIT_DONE) + mbox = adap->mbox; + + ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); release_firmware(fw); if (!ret) - dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); + dev_info(adap->pdev_dev, "loaded firmware %s," + " reload cxgb4 driver\n", ef->data); return ret; } @@ -4359,6 +4369,7 @@ EXPORT_SYMBOL(cxgb4_unregister_uld); * success (true) if it belongs otherwise failure (false). * Called with rcu_read_lock() held. */ +#if IS_ENABLED(CONFIG_IPV6) static bool cxgb4_netdev(const struct net_device *netdev) { struct adapter *adap; @@ -4480,6 +4491,13 @@ static int update_root_dev_clip(struct net_device *dev) return ret; /* Parse all bond and vlan devices layered on top of the physical dev */ + root_dev = netdev_master_upper_dev_get_rcu(dev); + if (root_dev) { + ret = update_dev_clip(root_dev, dev); + if (ret) + return ret; + } + for (i = 0; i < VLAN_N_VID; i++) { root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); if (!root_dev) @@ -4512,6 +4530,7 @@ static void update_clip(const struct adapter *adap) } rcu_read_unlock(); } +#endif /* IS_ENABLED(CONFIG_IPV6) */ /** * cxgb_up - enable the adapter @@ -4558,7 +4577,9 @@ static int cxgb_up(struct adapter *adap) t4_intr_enable(adap); adap->flags |= FULL_INIT_DONE; notify_ulds(adap, CXGB4_STATE_UP); +#if IS_ENABLED(CONFIG_IPV6) update_clip(adap); +#endif out: return err; irq_err: @@ -6852,14 +6873,18 @@ static int __init cxgb4_init_module(void) if (ret < 0) debugfs_remove(cxgb4_debugfs_root); +#if IS_ENABLED(CONFIG_IPV6) register_inet6addr_notifier(&cxgb4_inet6addr_notifier); +#endif return ret; } static void __exit cxgb4_cleanup_module(void) { +#if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); +#endif pci_unregister_driver(&cxgb4_driver); debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 1fff1495fe31..a9d9d74e4f09 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -37,8 +37,6 @@ #include "t4_regs.h" #include "t4fw_api.h" -static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, - const u8 *fw_data, unsigned int size, int force); /** * t4_wait_op_done_val - wait until an operation is completed * @adapter: the adapter performing the operation @@ -3076,8 +3074,8 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) * positive errno indicates that the adapter is ~probably~ intact, a * negative errno indicates that things are looking bad ... */ -static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, - const u8 *fw_data, unsigned int size, int force) +int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, + const u8 *fw_data, unsigned int size, int force) { const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; int reset, ret; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 1d5e1822bb2c..9af296a1ca99 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -367,6 +367,56 @@ struct bufdesc_ex { #define FEC_VLAN_TAG_LEN 0x04 #define FEC_ETHTYPE_LEN 0x02 +/* Controller is ENET-MAC */ +#define FEC_QUIRK_ENET_MAC (1 << 0) +/* Controller needs driver to swap frame */ +#define FEC_QUIRK_SWAP_FRAME (1 << 1) +/* Controller uses gasket */ +#define FEC_QUIRK_USE_GASKET (1 << 2) +/* Controller has GBIT support */ +#define FEC_QUIRK_HAS_GBIT (1 << 3) +/* Controller has extend desc buffer */ +#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) +/* Controller has hardware checksum support */ +#define FEC_QUIRK_HAS_CSUM (1 << 5) +/* Controller has hardware vlan support */ +#define FEC_QUIRK_HAS_VLAN (1 << 6) +/* ENET IP errata ERR006358 + * + * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously + * detected as not set during a prior frame transmission, then the + * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs + * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in + * frames not being transmitted until there is a 0-to-1 transition on + * ENET_TDAR[TDAR]. + */ +#define FEC_QUIRK_ERR006358 (1 << 7) +/* ENET IP hw AVB + * + * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. + * - Two class indicators on receive with configurable priority + * - Two class indicators and line speed timer on transmit allowing + * implementation class credit based shapers externally + * - Additional DMA registers provisioned to allow managing up to 3 + * independent rings + */ +#define FEC_QUIRK_HAS_AVB (1 << 8) +/* There is a TDAR race condition for mutliQ when the software sets TDAR + * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles). + * This will cause the udma_tx and udma_tx_arbiter state machines to hang. + * The issue exist at i.MX6SX enet IP. + */ +#define FEC_QUIRK_ERR007885 (1 << 9) +/* ENET Block Guide/ Chapter for the iMX6SX (PELE) address one issue: + * After set ENET_ATCR[Capture], there need some time cycles before the counter + * value is capture in the register clock domain. + * The wait-time-cycles is at least 6 clock cycles of the slower clock between + * the register clock and the 1588 clock. The 1588 ts_clk is fixed to 25Mhz, + * register clock is 66Mhz, so the wait-time-cycles must be greater than 240ns + * (40ns * 6). + */ +#define FEC_QUIRK_BUG_CAPTURE (1 << 10) + struct fec_enet_priv_tx_q { int index; unsigned char *tx_bounce[TX_RING_SIZE]; @@ -484,12 +534,22 @@ struct fec_enet_private { unsigned int itr_clk_rate; u32 rx_copybreak; + + /* ptp clock period in ns*/ + unsigned int ptp_inc; + + /* pps */ + int pps_channel; + unsigned int reload_period; + int pps_enable; + unsigned int next_counter; }; void fec_ptp_init(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); +uint fec_ptp_check_pps_event(struct fec_enet_private *fep); /****************************************************************************/ #endif /* FEC_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 87975b5dda94..81b96cf87574 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -78,47 +78,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_RAFL_V 0x8 #define FEC_ENET_OPD_V 0xFFF0 -/* Controller is ENET-MAC */ -#define FEC_QUIRK_ENET_MAC (1 << 0) -/* Controller needs driver to swap frame */ -#define FEC_QUIRK_SWAP_FRAME (1 << 1) -/* Controller uses gasket */ -#define FEC_QUIRK_USE_GASKET (1 << 2) -/* Controller has GBIT support */ -#define FEC_QUIRK_HAS_GBIT (1 << 3) -/* Controller has extend desc buffer */ -#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) -/* Controller has hardware checksum support */ -#define FEC_QUIRK_HAS_CSUM (1 << 5) -/* Controller has hardware vlan support */ -#define FEC_QUIRK_HAS_VLAN (1 << 6) -/* ENET IP errata ERR006358 - * - * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously - * detected as not set during a prior frame transmission, then the - * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs - * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in - * frames not being transmitted until there is a 0-to-1 transition on - * ENET_TDAR[TDAR]. - */ -#define FEC_QUIRK_ERR006358 (1 << 7) -/* ENET IP hw AVB - * - * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. - * - Two class indicators on receive with configurable priority - * - Two class indicators and line speed timer on transmit allowing - * implementation class credit based shapers externally - * - Additional DMA registers provisioned to allow managing up to 3 - * independent rings - */ -#define FEC_QUIRK_HAS_AVB (1 << 8) -/* There is a TDAR race condition for mutliQ when the software sets TDAR - * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles). - * This will cause the udma_tx and udma_tx_arbiter state machines to hang. - * The issue exist at i.MX6SX enet IP. - */ -#define FEC_QUIRK_ERR007885 (1 << 9) - static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ @@ -146,7 +105,7 @@ static struct platform_device_id fec_devtype[] = { .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | - FEC_QUIRK_ERR007885, + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, }, { /* sentinel */ } @@ -1622,6 +1581,8 @@ fec_enet_interrupt(int irq, void *dev_id) complete(&fep->mdio_done); } + fec_ptp_check_pps_event(fep); + return ret; } @@ -2912,20 +2873,12 @@ static void fec_poll_controller(struct net_device *dev) #endif #define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM - -static int fec_set_features(struct net_device *netdev, +static inline void fec_enet_set_netdev_features(struct net_device *netdev, netdev_features_t features) { struct fec_enet_private *fep = netdev_priv(netdev); netdev_features_t changed = features ^ netdev->features; - /* Quiesce the device if necessary */ - if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { - napi_disable(&fep->napi); - netif_tx_lock_bh(netdev); - fec_stop(netdev); - } - netdev->features = features; /* Receive checksum has been changed */ @@ -2935,13 +2888,25 @@ static int fec_set_features(struct net_device *netdev, else fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; } +} + +static int fec_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; - /* Resume the device after updates */ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { + napi_disable(&fep->napi); + netif_tx_lock_bh(netdev); + fec_stop(netdev); + fec_enet_set_netdev_features(netdev, features); fec_restart(netdev); netif_tx_wake_all_queues(netdev); netif_tx_unlock_bh(netdev); napi_enable(&fep->napi); + } else { + fec_enet_set_netdev_features(netdev, features); } return 0; diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index cca3617a2321..992c8c3db553 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -61,6 +61,24 @@ #define FEC_T_INC_CORR_MASK 0x00007f00 #define FEC_T_INC_CORR_OFFSET 8 +#define FEC_T_CTRL_PINPER 0x00000080 +#define FEC_T_TF0_MASK 0x00000001 +#define FEC_T_TF0_OFFSET 0 +#define FEC_T_TF1_MASK 0x00000002 +#define FEC_T_TF1_OFFSET 1 +#define FEC_T_TF2_MASK 0x00000004 +#define FEC_T_TF2_OFFSET 2 +#define FEC_T_TF3_MASK 0x00000008 +#define FEC_T_TF3_OFFSET 3 +#define FEC_T_TDRE_MASK 0x00000001 +#define FEC_T_TDRE_OFFSET 0 +#define FEC_T_TMODE_MASK 0x0000003C +#define FEC_T_TMODE_OFFSET 2 +#define FEC_T_TIE_MASK 0x00000040 +#define FEC_T_TIE_OFFSET 6 +#define FEC_T_TF_MASK 0x00000080 +#define FEC_T_TF_OFFSET 7 + #define FEC_ATIME_CTRL 0x400 #define FEC_ATIME 0x404 #define FEC_ATIME_EVT_OFFSET 0x408 @@ -69,7 +87,143 @@ #define FEC_ATIME_INC 0x414 #define FEC_TS_TIMESTAMP 0x418 +#define FEC_TGSR 0x604 +#define FEC_TCSR(n) (0x608 + n * 0x08) +#define FEC_TCCR(n) (0x60C + n * 0x08) +#define MAX_TIMER_CHANNEL 3 +#define FEC_TMODE_TOGGLE 0x05 +#define FEC_HIGH_PULSE 0x0F + #define FEC_CC_MULT (1 << 31) +#define FEC_COUNTER_PERIOD (1 << 31) +#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC +#define FEC_CHANNLE_0 0 +#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0 + +/** + * fec_ptp_enable_pps + * @fep: the fec_enet_private structure handle + * @enable: enable the channel pps output + * + * This function enble the PPS ouput on the timer channel. + */ +static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) +{ + unsigned long flags; + u32 val, tempval; + int inc; + struct timespec ts; + u64 ns; + u32 remainder; + val = 0; + + if (!(fep->hwts_tx_en || fep->hwts_rx_en)) { + dev_err(&fep->pdev->dev, "No ptp stack is running\n"); + return -EINVAL; + } + + if (fep->pps_enable == enable) + return 0; + + fep->pps_channel = DEFAULT_PPS_CHANNEL; + fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; + inc = fep->ptp_inc; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + if (enable) { + /* clear capture or output compare interrupt status if have. + */ + writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); + + /* It is recommended to doulbe check the TMODE field in the + * TCSR register to be cleared before the first compare counter + * is written into TCCR register. Just add a double check. + */ + val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + do { + val &= ~(FEC_T_TMODE_MASK); + writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); + val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + } while (val & FEC_T_TMODE_MASK); + + /* Dummy read counter to update the counter */ + timecounter_read(&fep->tc); + /* We want to find the first compare event in the next + * second point. So we need to know what the ptp time + * is now and how many nanoseconds is ahead to get next second. + * The remaining nanosecond ahead before the next second would be + * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds + * to current timer would be next second. + */ + tempval = readl(fep->hwp + FEC_ATIME_CTRL); + tempval |= FEC_T_CTRL_CAPTURE; + writel(tempval, fep->hwp + FEC_ATIME_CTRL); + + tempval = readl(fep->hwp + FEC_ATIME); + /* Convert the ptp local counter to 1588 timestamp */ + ns = timecounter_cyc2time(&fep->tc, tempval); + ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); + ts.tv_nsec = remainder; + + /* The tempval is less than 3 seconds, and so val is less than + * 4 seconds. No overflow for 32bit calculation. + */ + val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval; + + /* Need to consider the situation that the current time is + * very close to the second point, which means NSEC_PER_SEC + * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer + * is still running when we calculate the first compare event, it is + * possible that the remaining nanoseonds run out before the compare + * counter is calculated and written into TCCR register. To avoid + * this possibility, we will set the compare event to be the next + * of next second. The current setting is 31-bit timer and wrap + * around over 2 seconds. So it is okay to set the next of next + * seond for the timer. + */ + val += NSEC_PER_SEC; + + /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current + * ptp counter, which maybe cause 32-bit wrap. Since the + * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second. + * We can ensure the wrap will not cause issue. If the offset + * is bigger than fep->cc.mask would be a error. + */ + val &= fep->cc.mask; + writel(val, fep->hwp + FEC_TCCR(fep->pps_channel)); + + /* Calculate the second the compare event timestamp */ + fep->next_counter = (val + fep->reload_period) & fep->cc.mask; + + /* * Enable compare event when overflow */ + val = readl(fep->hwp + FEC_ATIME_CTRL); + val |= FEC_T_CTRL_PINPER; + writel(val, fep->hwp + FEC_ATIME_CTRL); + + /* Compare channel setting. */ + val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); + val &= ~(1 << FEC_T_TDRE_OFFSET); + val &= ~(FEC_T_TMODE_MASK); + val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET); + writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); + + /* Write the second compare event timestamp and calculate + * the third timestamp. Refer the TCCR register detail in the spec. + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); + fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; + } else { + writel(0, fep->hwp + FEC_TCSR(fep->pps_channel)); + } + + fep->pps_enable = enable; + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + /** * fec_ptp_read - read raw cycle counter (to be used by time counter) * @cc: the cyclecounter structure @@ -82,12 +236,17 @@ static cycle_t fec_ptp_read(const struct cyclecounter *cc) { struct fec_enet_private *fep = container_of(cc, struct fec_enet_private, cc); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); u32 tempval; tempval = readl(fep->hwp + FEC_ATIME_CTRL); tempval |= FEC_T_CTRL_CAPTURE; writel(tempval, fep->hwp + FEC_ATIME_CTRL); + if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE) + udelay(1); + return readl(fep->hwp + FEC_ATIME); } @@ -113,14 +272,15 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) /* 1ns counter */ writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); - /* use free running count */ - writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD); + /* use 31-bit timer counter */ + writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD); - writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL); + writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST, + fep->hwp + FEC_ATIME_CTRL); memset(&fep->cc, 0, sizeof(fep->cc)); fep->cc.read = fec_ptp_read; - fep->cc.mask = CLOCKSOURCE_MASK(32); + fep->cc.mask = CLOCKSOURCE_MASK(31); fep->cc.shift = 31; fep->cc.mult = FEC_CC_MULT; @@ -143,32 +303,59 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) */ static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { - u64 diff; unsigned long flags; int neg_adj = 0; - u32 mult = FEC_CC_MULT; + u32 i, tmp; + u32 corr_inc, corr_period; + u32 corr_ns; + u64 lhs, rhs; struct fec_enet_private *fep = container_of(ptp, struct fec_enet_private, ptp_caps); + if (ppb == 0) + return 0; + if (ppb < 0) { ppb = -ppb; neg_adj = 1; } - diff = mult; - diff *= ppb; - diff = div_u64(diff, 1000000000ULL); + /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC; + * Try to find the corr_inc between 1 to fep->ptp_inc to + * meet adjustment requirement. + */ + lhs = NSEC_PER_SEC; + rhs = (u64)ppb * (u64)fep->ptp_inc; + for (i = 1; i <= fep->ptp_inc; i++) { + if (lhs >= rhs) { + corr_inc = i; + corr_period = div_u64(lhs, rhs); + break; + } + lhs += NSEC_PER_SEC; + } + /* Not found? Set it to high value - double speed + * correct in every clock step. + */ + if (i > fep->ptp_inc) { + corr_inc = fep->ptp_inc; + corr_period = 1; + } + + if (neg_adj) + corr_ns = fep->ptp_inc - corr_inc; + else + corr_ns = fep->ptp_inc + corr_inc; spin_lock_irqsave(&fep->tmreg_lock, flags); - /* - * dummy read to set cycle_last in tc to now. - * So use adjusted mult to calculate when next call - * timercounter_read. - */ - timecounter_read(&fep->tc); - fep->cc.mult = neg_adj ? mult - diff : mult + diff; + tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; + tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; + writel(tmp, fep->hwp + FEC_ATIME_INC); + writel(corr_period, fep->hwp + FEC_ATIME_CORR); + /* dummy read to update the timer. */ + timecounter_read(&fep->tc); spin_unlock_irqrestore(&fep->tmreg_lock, flags); @@ -188,12 +375,19 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) container_of(ptp, struct fec_enet_private, ptp_caps); unsigned long flags; u64 now; + u32 counter; spin_lock_irqsave(&fep->tmreg_lock, flags); now = timecounter_read(&fep->tc); now += delta; + /* Get the timer value based on adjusted timestamp. + * Update the counter with the masked value. + */ + counter = now & fep->cc.mask; + writel(counter, fep->hwp + FEC_ATIME); + /* reset the timecounter */ timecounter_init(&fep->tc, &fep->cc, now); @@ -244,6 +438,7 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, u64 ns; unsigned long flags; + u32 counter; mutex_lock(&fep->ptp_clk_mutex); /* Check the ptp clock */ @@ -254,8 +449,13 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, ns = ts->tv_sec * 1000000000ULL; ns += ts->tv_nsec; + /* Get the timer value based on timestamp. + * Update the counter with the masked value. + */ + counter = ns & fep->cc.mask; spin_lock_irqsave(&fep->tmreg_lock, flags); + writel(counter, fep->hwp + FEC_ATIME); timecounter_init(&fep->tc, &fep->cc, ns); spin_unlock_irqrestore(&fep->tmreg_lock, flags); mutex_unlock(&fep->ptp_clk_mutex); @@ -272,6 +472,15 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, static int fec_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + int ret = 0; + + if (rq->type == PTP_CLK_REQ_PPS) { + ret = fec_ptp_enable_pps(fep, on); + + return ret; + } return -EOPNOTSUPP; } @@ -386,7 +595,7 @@ void fec_ptp_init(struct platform_device *pdev) fep->ptp_caps.n_ext_ts = 0; fep->ptp_caps.n_per_out = 0; fep->ptp_caps.n_pins = 0; - fep->ptp_caps.pps = 0; + fep->ptp_caps.pps = 1; fep->ptp_caps.adjfreq = fec_ptp_adjfreq; fep->ptp_caps.adjtime = fec_ptp_adjtime; fep->ptp_caps.gettime = fec_ptp_gettime; @@ -394,6 +603,7 @@ void fec_ptp_init(struct platform_device *pdev) fep->ptp_caps.enable = fec_ptp_enable; fep->cycle_speed = clk_get_rate(fep->clk_ptp); + fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; spin_lock_init(&fep->tmreg_lock); @@ -409,3 +619,36 @@ void fec_ptp_init(struct platform_device *pdev) schedule_delayed_work(&fep->time_keep, HZ); } + +/** + * fec_ptp_check_pps_event + * @fep: the fec_enet_private structure handle + * + * This function check the pps event and reload the timer compare counter. + */ +uint fec_ptp_check_pps_event(struct fec_enet_private *fep) +{ + u32 val; + u8 channel = fep->pps_channel; + struct ptp_clock_event event; + + val = readl(fep->hwp + FEC_TCSR(channel)); + if (val & FEC_T_TF_MASK) { + /* Write the next next compare(not the next according the spec) + * value to the register + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); + do { + writel(val, fep->hwp + FEC_TCSR(channel)); + } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); + + /* Update the counter; */ + fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; + + event.type = PTP_CLOCK_PPS; + ptp_clock_event(fep->ptp_clock, &event); + return 1; + } + + return 0; +} diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 379b1a578d3d..4fdf0aa16978 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -338,7 +338,7 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv) static void gfar_rx_buff_size_config(struct gfar_private *priv) { - int frame_size = priv->ndev->mtu + ETH_HLEN; + int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; /* set this when rx hw offload (TOE) functions are being used */ priv->uses_rxfcb = 0; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 6919adb66f53..5b8300a32bf5 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -320,4 +320,15 @@ config FM10K To compile this driver as a module, choose M here. The module will be called fm10k. MSI-X interrupt support is required +config FM10K_VXLAN + bool "Virtual eXtensible Local Area Network Support" + default n + depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m) + ---help--- + This allows one to create VXLAN virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. VXLAN is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to use Virtual eXtensible Local Area Network + (VXLAN) in the driver. + endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 9d7118a0d67a..e645af412e76 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -929,6 +929,30 @@ static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, return i == tx_ring->count; } +static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(fm10k_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) +{ + if (likely(fm10k_desc_unused(tx_ring) >= size)) + return 0; + return __fm10k_maybe_stop_tx(tx_ring, size); +} + static void fm10k_tx_map(struct fm10k_ring *tx_ring, struct fm10k_tx_buffer *first) { @@ -1022,13 +1046,18 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, tx_ring->next_to_use = i; + /* Make sure there is space in the ring for the next send. */ + fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); + /* notify HW of packet */ - writel(i, tx_ring->tail); + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } return; dma_error: @@ -1048,30 +1077,6 @@ dma_error: tx_ring->next_to_use = i; } -static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. */ - if (likely(fm10k_desc_unused(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) -{ - if (likely(fm10k_desc_unused(tx_ring) >= size)) - return 0; - return __fm10k_maybe_stop_tx(tx_ring, size); -} - netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring) { @@ -1116,8 +1121,6 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, fm10k_tx_map(tx_ring, first); - fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; out_drop: diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index bf44a8fe711f..8811364b91cb 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -20,9 +20,9 @@ #include "fm10k.h" #include <linux/vmalloc.h> -#if IS_ENABLED(CONFIG_VXLAN) +#if IS_ENABLED(CONFIG_FM10K_VXLAN) #include <net/vxlan.h> -#endif /* CONFIG_VXLAN */ +#endif /* CONFIG_FM10K_VXLAN */ /** * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) @@ -556,7 +556,7 @@ int fm10k_open(struct net_device *netdev) if (err) goto err_set_queues; -#if IS_ENABLED(CONFIG_VXLAN) +#if IS_ENABLED(CONFIG_FM10K_VXLAN) /* update VXLAN port configuration */ vxlan_get_rx_port(netdev); @@ -785,14 +785,14 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (!(netdev->flags & IFF_PROMISC)) { err = hw->mac.ops.update_vlan(hw, vid, 0, set); if (err) - return err; + goto err_out; } /* update our base MAC address */ err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr, vid, set, 0); if (err) - return err; + goto err_out; /* set vid prior to syncing/unsyncing the VLAN */ interface->vid = vid + (set ? VLAN_N_VID : 0); @@ -801,9 +801,10 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync); __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync); +err_out: fm10k_mbx_unlock(interface); - return 0; + return err; } static int fm10k_vlan_rx_add_vid(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index e02036c427b9..a0cb74ab3dc6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1489,6 +1489,7 @@ void fm10k_up(struct fm10k_intfc *interface) netif_tx_start_all_queues(interface->netdev); /* kick off the service timer */ + hw->mac.get_host_state = 1; mod_timer(&interface->service_timer, jiffies); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 706fc69aa0c5..97c85b859536 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1261,6 +1261,9 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) struct ixgbe_hw *hw = &adapter->hw; u32 regval; + if (vf >= adapter->num_vfs) + return -EINVAL; + adapter->vfinfo[vf].spoofchk_enabled = setting; regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index ffbae293cef5..6e6f18fc5d76 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -11,7 +11,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { {QLCNIC_CMD_CREATE_RX_CTX, 4, 1}, {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, {QLCNIC_CMD_CREATE_TX_CTX, 4, 1}, - {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, + {QLCNIC_CMD_DESTROY_TX_CTX, 3, 1}, {QLCNIC_CMD_INTRPT_TEST, 4, 1}, {QLCNIC_CMD_SET_MTU, 4, 1}, {QLCNIC_CMD_READ_PHY, 4, 2}, @@ -32,7 +32,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, {QLCNIC_CMD_GET_MAC_STATS, 4, 1}, {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, - {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, + {QLCNIC_CMD_GET_ESWITCH_STATS, 4, 1}, {QLCNIC_CMD_CONFIG_PORT, 4, 1}, {QLCNIC_CMD_TEMP_SIZE, 4, 4}, {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, @@ -129,7 +129,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, } QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); - for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++) + for (i = 1; i < cmd->req.num; i++) QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]); QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0])); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 60f85149fc4c..f77cce034ad4 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -71,9 +71,17 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; } -/* Report whether the NIC considers this TX queue empty, given the - * write_count used for the last doorbell push. May return false - * negative. +/* Get partner of a TX queue, seen as part of the same net core queue */ +static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) +{ + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) + return tx_queue - EFX_TXQ_TYPE_OFFLOAD; + else + return tx_queue + EFX_TXQ_TYPE_OFFLOAD; +} + +/* Report whether this TX queue would be empty for the given write_count. + * May return false negative. */ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count) @@ -86,9 +94,18 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; } -static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) +/* Decide whether we can use TX PIO, ie. write packet data directly into + * a buffer on the device. This can reduce latency at the expense of + * throughput, so we only do this if both hardware and software TX rings + * are empty. This also ensures that only one packet at a time can be + * using the PIO buffer. + */ +static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) { - return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count); + struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); + return tx_queue->piobuf && + __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) && + __efx_nic_tx_is_empty(partner, partner->insert_count); } /* Decide whether to push a TX descriptor to the NIC vs merely writing @@ -96,6 +113,8 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) * descriptor to an empty queue, but is otherwise pointless. Further, * Falcon and Siena have hardware bugs (SF bug 33851) that may be * triggered if we don't check this. + * We use the write_count used for the last doorbell push, to get the + * NIC's view of the tx queue. */ static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 320609842211..ee84a90e371c 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -132,15 +132,6 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) return max_descs; } -/* Get partner of a TX queue, seen as part of the same net core queue */ -static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) -{ - if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) - return tx_queue - EFX_TXQ_TYPE_OFFLOAD; - else - return tx_queue + EFX_TXQ_TYPE_OFFLOAD; -} - static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) { /* We need to consider both queues that the net core sees as one */ @@ -344,6 +335,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) struct efx_nic *efx = tx_queue->efx; struct device *dma_dev = &efx->pci_dev->dev; struct efx_tx_buffer *buffer; + unsigned int old_insert_count = tx_queue->insert_count; skb_frag_t *fragment; unsigned int len, unmap_len = 0; dma_addr_t dma_addr, unmap_addr = 0; @@ -351,7 +343,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) unsigned short dma_flags; int i = 0; - EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); + EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); if (skb_shinfo(skb)->gso_size) return efx_enqueue_skb_tso(tx_queue, skb); @@ -369,9 +361,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Consider using PIO for short packets */ #ifdef EFX_USE_PIO - if (skb->len <= efx_piobuf_size && tx_queue->piobuf && - efx_nic_tx_is_empty(tx_queue) && - efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) { + if (skb->len <= efx_piobuf_size && !skb->xmit_more && + efx_nic_may_tx_pio(tx_queue)) { buffer = efx_enqueue_skb_pio(tx_queue, skb); dma_flags = EFX_TX_BUF_OPTION; goto finish_packet; @@ -439,13 +430,14 @@ finish_packet: netdev_tx_sent_queue(tx_queue->core_txq, skb->len); + efx_tx_maybe_stop_queue(tx_queue); + /* Pass off to hardware */ - efx_nic_push_buffers(tx_queue); + if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) + efx_nic_push_buffers(tx_queue); tx_queue->tx_packets++; - efx_tx_maybe_stop_queue(tx_queue); - return NETDEV_TX_OK; dma_err: @@ -458,7 +450,7 @@ finish_packet: dev_kfree_skb_any(skb); /* Work backwards until we hit the original insert pointer value */ - while (tx_queue->insert_count != tx_queue->write_count) { + while (tx_queue->insert_count != old_insert_count) { unsigned int pkts_compl = 0, bytes_compl = 0; --tx_queue->insert_count; buffer = __efx_tx_queue_get_insert_buffer(tx_queue); @@ -989,12 +981,13 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue, /* Remove buffers put into a tx_queue. None of the buffers must have * an skb attached. */ -static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) +static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count) { struct efx_tx_buffer *buffer; /* Work backwards until we hit the original insert pointer value */ - while (tx_queue->insert_count != tx_queue->write_count) { + while (tx_queue->insert_count != insert_count) { --tx_queue->insert_count; buffer = __efx_tx_queue_get_insert_buffer(tx_queue); efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); @@ -1258,13 +1251,14 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; + unsigned int old_insert_count = tx_queue->insert_count; int frag_i, rc; struct tso_state state; /* Find the packet protocol and sanity-check it */ state.protocol = efx_tso_check_protocol(skb); - EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); + EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); rc = tso_start(&state, efx, skb); if (rc) @@ -1308,11 +1302,12 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, netdev_tx_sent_queue(tx_queue->core_txq, skb->len); - /* Pass off to hardware */ - efx_nic_push_buffers(tx_queue); - efx_tx_maybe_stop_queue(tx_queue); + /* Pass off to hardware */ + if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) + efx_nic_push_buffers(tx_queue); + tx_queue->tso_bursts++; return NETDEV_TX_OK; @@ -1336,6 +1331,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, state.header_unmap_len, DMA_TO_DEVICE); - efx_enqueue_unwind(tx_queue); + efx_enqueue_unwind(tx_queue, old_insert_count); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 552bbc17863c..ccfe7e510418 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -3,7 +3,7 @@ * * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> - * + * Contributors: Giuseppe Cavallaro <peppe.cavallaro@st.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,45 +22,22 @@ #include <linux/of.h> #include <linux/of_net.h> +#define DWMAC_125MHZ 125000000 +#define DWMAC_50MHZ 50000000 +#define DWMAC_25MHZ 25000000 +#define DWMAC_2_5MHZ 2500000 + +#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ + iface == PHY_INTERFACE_MODE_RGMII_ID || \ + iface == PHY_INTERFACE_MODE_RGMII_RXID || \ + iface == PHY_INTERFACE_MODE_RGMII_TXID) + +#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ + iface == PHY_INTERFACE_MODE_GMII) + +/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families) */ + /** - * STi GMAC glue logic. - * -------------------- - * - * _ - * | \ - * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK - * phyclk | |___________________________________________ - * | | | (phyclk-in) - * --------|1 / | - * int-clk |_ / | - * | _ - * | | \ - * |_______|1 \ ETH_SEL_TX_RETIME_CLK - * | |___________________________ - * | | (tx-retime-clk) - * _______|0 / - * | |_ / - * _ | - * | \ | - * --------|0 \ | - * clk_125 | |__| - * | | ETH_SEL_TXCLK_NOT_CLK125 - * --------|1 / - * txclk |_ / - * - * - * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can - * generate 50MHz clock or MAC can generate it. - * This bit is configured by "st,ext-phyclk" property. - * - * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz - * clock either comes from clk-125 pin or txclk pin. This configuration is - * totally driven by the board wiring. This bit is configured by - * "st,tx-retime-src" property. - * - * TXCLK configuration is different for different phy interface modes - * and changes according to link speed in modes like RGMII. - * * Below table summarizes the clock requirement and clock sources for * supported phy interface modes with link speeds. * ________________________________________________ @@ -74,44 +51,58 @@ * ------------------------------------------------ *| RGMII | 125Mhz | 25Mhz | *| | clk-125/txclk | clkgen | + *| | clkgen | | * ------------------------------------------------ *| RMII | n/a | 25Mhz | *| | |clkgen/phyclk-in | * ------------------------------------------------ * - * TX lines are always retimed with a clk, which can vary depending - * on the board configuration. Below is the table of these bits - * in eth configuration register depending on source of retime clk. - * - *--------------------------------------------------------------- - * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125| - *--------------------------------------------------------------- - * txclk | 0 | n/a | 1 | - *--------------------------------------------------------------- - * ck_125| 0 | n/a | 0 | - *--------------------------------------------------------------- - * phyclk| 1 | 0 | n/a | - *--------------------------------------------------------------- - * clkgen| 1 | 1 | n/a | - *--------------------------------------------------------------- + * Register Configuration + *------------------------------- + * src |BIT(8)| BIT(7)| BIT(6)| + *------------------------------- + * txclk | 0 | n/a | 1 | + *------------------------------- + * ck_125| 0 | n/a | 0 | + *------------------------------- + * phyclk| 1 | 0 | n/a | + *------------------------------- + * clkgen| 1 | 1 | n/a | + *------------------------------- */ - /* Register definition */ +#define STIH4XX_RETIME_SRC_MASK GENMASK(8, 6) +#define STIH4XX_ETH_SEL_TX_RETIME_CLK BIT(8) +#define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) +#define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6) + +/* STiD127 register definitions */ - /* 3 bits [8:6] - * [6:6] ETH_SEL_TXCLK_NOT_CLK125 - * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK - * [8:8] ETH_SEL_TX_RETIME_CLK - * - */ +/** + *----------------------- + * src |BIT(6)| BIT(7)| + *----------------------- + * MII | 1 | n/a | + *----------------------- + * RMII | n/a | 1 | + * clkgen| | | + *----------------------- + * RMII | n/a | 0 | + * phyclk| | | + *----------------------- + * RGMII | 1 | n/a | + * clkgen| | | + *----------------------- + */ -#define TX_RETIME_SRC_MASK GENMASK(8, 6) -#define ETH_SEL_TX_RETIME_CLK BIT(8) -#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) -#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6) +#define STID127_RETIME_SRC_MASK GENMASK(7, 6) +#define STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) +#define STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK BIT(6) -#define ENMII_MASK GENMASK(5, 5) -#define ENMII BIT(5) +#define ENMII_MASK GENMASK(5, 5) +#define ENMII BIT(5) +#define EN_MASK GENMASK(1, 1) +#define EN BIT(1) /** * 3 bits [4:2] @@ -120,29 +111,23 @@ * 010-SGMII * 100-RMII */ -#define MII_PHY_SEL_MASK GENMASK(4, 2) -#define ETH_PHY_SEL_RMII BIT(4) -#define ETH_PHY_SEL_SGMII BIT(3) -#define ETH_PHY_SEL_RGMII BIT(2) -#define ETH_PHY_SEL_GMII 0x0 -#define ETH_PHY_SEL_MII 0x0 - -#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ - iface == PHY_INTERFACE_MODE_RGMII_ID || \ - iface == PHY_INTERFACE_MODE_RGMII_RXID || \ - iface == PHY_INTERFACE_MODE_RGMII_TXID) - -#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ - iface == PHY_INTERFACE_MODE_GMII) +#define MII_PHY_SEL_MASK GENMASK(4, 2) +#define ETH_PHY_SEL_RMII BIT(4) +#define ETH_PHY_SEL_SGMII BIT(3) +#define ETH_PHY_SEL_RGMII BIT(2) +#define ETH_PHY_SEL_GMII 0x0 +#define ETH_PHY_SEL_MII 0x0 struct sti_dwmac { - int interface; - bool ext_phyclk; - bool is_tx_retime_src_clk_125; - struct clk *clk; - int reg; + int interface; /* MII interface */ + bool ext_phyclk; /* Clock from external PHY */ + u32 tx_retime_src; /* TXCLK Retiming*/ + struct clk *clk; /* PHY clock */ + int ctrl_reg; /* GMAC glue-logic control register */ + int clk_sel_reg; /* GMAC ext clk selection register */ struct device *dev; struct regmap *regmap; + u32 speed; }; static u32 phy_intf_sels[] = { @@ -162,74 +147,133 @@ enum { TX_RETIME_SRC_CLKGEN, }; -static const char *const tx_retime_srcs[] = { - [TX_RETIME_SRC_NA] = "", - [TX_RETIME_SRC_TXCLK] = "txclk", - [TX_RETIME_SRC_CLK_125] = "clk_125", - [TX_RETIME_SRC_PHYCLK] = "phyclk", - [TX_RETIME_SRC_CLKGEN] = "clkgen", -}; - -static u32 tx_retime_val[] = { - [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125, +static u32 stih4xx_tx_retime_val[] = { + [TX_RETIME_SRC_TXCLK] = STIH4XX_ETH_SEL_TXCLK_NOT_CLK125, [TX_RETIME_SRC_CLK_125] = 0x0, - [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK, - [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK | - ETH_SEL_INTERNAL_NOTEXT_PHYCLK, + [TX_RETIME_SRC_PHYCLK] = STIH4XX_ETH_SEL_TX_RETIME_CLK, + [TX_RETIME_SRC_CLKGEN] = STIH4XX_ETH_SEL_TX_RETIME_CLK + | STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK, }; -static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd) +static void stih4xx_fix_retime_src(void *priv, u32 spd) { - u32 src = 0, freq = 0; - - if (spd == SPEED_100) { - if (dwmac->interface == PHY_INTERFACE_MODE_MII || - dwmac->interface == PHY_INTERFACE_MODE_GMII) { - src = TX_RETIME_SRC_TXCLK; - } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { - if (dwmac->ext_phyclk) { - src = TX_RETIME_SRC_PHYCLK; - } else { - src = TX_RETIME_SRC_CLKGEN; - freq = 50000000; - } - - } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { + struct sti_dwmac *dwmac = priv; + u32 src = dwmac->tx_retime_src; + u32 reg = dwmac->ctrl_reg; + u32 freq = 0; + + if (dwmac->interface == PHY_INTERFACE_MODE_MII) { + src = TX_RETIME_SRC_TXCLK; + } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { + if (dwmac->ext_phyclk) { + src = TX_RETIME_SRC_PHYCLK; + } else { src = TX_RETIME_SRC_CLKGEN; - freq = 25000000; + freq = DWMAC_50MHZ; } + } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { + /* On GiGa clk source can be either ext or from clkgen */ + if (spd == SPEED_1000) { + freq = DWMAC_125MHZ; + } else { + /* Switch to clkgen for these speeds */ + src = TX_RETIME_SRC_CLKGEN; + if (spd == SPEED_100) + freq = DWMAC_25MHZ; + else if (spd == SPEED_10) + freq = DWMAC_2_5MHZ; + } + } - if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk) - clk_set_rate(dwmac->clk, freq); + if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk && freq) + clk_set_rate(dwmac->clk, freq); - } else if (spd == SPEED_1000) { - if (dwmac->is_tx_retime_src_clk_125) - src = TX_RETIME_SRC_CLK_125; - else - src = TX_RETIME_SRC_TXCLK; + regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK, + stih4xx_tx_retime_val[src]); +} + +static void stid127_fix_retime_src(void *priv, u32 spd) +{ + struct sti_dwmac *dwmac = priv; + u32 reg = dwmac->ctrl_reg; + u32 freq = 0; + u32 val = 0; + + if (dwmac->interface == PHY_INTERFACE_MODE_MII) { + val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK; + } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { + if (!dwmac->ext_phyclk) { + val = STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK; + freq = DWMAC_50MHZ; + } + } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { + val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK; + if (spd == SPEED_1000) + freq = DWMAC_125MHZ; + else if (spd == SPEED_100) + freq = DWMAC_25MHZ; + else if (spd == SPEED_10) + freq = DWMAC_2_5MHZ; } - regmap_update_bits(dwmac->regmap, dwmac->reg, - TX_RETIME_SRC_MASK, tx_retime_val[src]); + if (dwmac->clk && freq) + clk_set_rate(dwmac->clk, freq); + + regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val); } -static void sti_dwmac_exit(struct platform_device *pdev, void *priv) +static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac) { - struct sti_dwmac *dwmac = priv; + struct regmap *regmap = dwmac->regmap; + int iface = dwmac->interface; + struct device *dev = dwmac->dev; + struct device_node *np = dev->of_node; + u32 reg = dwmac->ctrl_reg; + u32 val; if (dwmac->clk) - clk_disable_unprepare(dwmac->clk); + clk_prepare_enable(dwmac->clk); + + if (of_property_read_bool(np, "st,gmac_en")) + regmap_update_bits(regmap, reg, EN_MASK, EN); + + regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); + + val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; + regmap_update_bits(regmap, reg, ENMII_MASK, val); +} + +static int stix4xx_init(struct platform_device *pdev, void *priv) +{ + struct sti_dwmac *dwmac = priv; + u32 spd = dwmac->speed; + + sti_dwmac_ctrl_init(dwmac); + + stih4xx_fix_retime_src(priv, spd); + + return 0; } -static void sti_fix_mac_speed(void *priv, unsigned int spd) +static int stid127_init(struct platform_device *pdev, void *priv) { struct sti_dwmac *dwmac = priv; + u32 spd = dwmac->speed; - setup_retime_src(dwmac, spd); + sti_dwmac_ctrl_init(dwmac); - return; + stid127_fix_retime_src(priv, spd); + + return 0; } +static void sti_dwmac_exit(struct platform_device *pdev, void *priv) +{ + struct sti_dwmac *dwmac = priv; + + if (dwmac->clk) + clk_disable_unprepare(dwmac->clk); +} static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, struct platform_device *pdev) { @@ -245,6 +289,13 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf"); if (!res) return -ENODATA; + dwmac->ctrl_reg = res->start; + + /* clk selection from extra syscfg register */ + dwmac->clk_sel_reg = -ENXIO; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf"); + if (res) + dwmac->clk_sel_reg = res->start; regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); if (IS_ERR(regmap)) @@ -253,53 +304,31 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, dwmac->dev = dev; dwmac->interface = of_get_phy_mode(np); dwmac->regmap = regmap; - dwmac->reg = res->start; dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); - dwmac->is_tx_retime_src_clk_125 = false; + dwmac->tx_retime_src = TX_RETIME_SRC_NA; + dwmac->speed = SPEED_100; if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { const char *rs; + dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; err = of_property_read_string(np, "st,tx-retime-src", &rs); - if (err < 0) { - dev_err(dev, "st,tx-retime-src not specified\n"); - return err; - } + if (err < 0) + dev_warn(dev, "Use internal clock source\n"); if (!strcasecmp(rs, "clk_125")) - dwmac->is_tx_retime_src_clk_125 = true; + dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; + else if (!strcasecmp(rs, "txclk")) + dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; + + dwmac->speed = SPEED_1000; } dwmac->clk = devm_clk_get(dev, "sti-ethclk"); - - if (IS_ERR(dwmac->clk)) + if (IS_ERR(dwmac->clk)) { + dev_warn(dev, "No phy clock provided...\n"); dwmac->clk = NULL; - - return 0; -} - -static int sti_dwmac_init(struct platform_device *pdev, void *priv) -{ - struct sti_dwmac *dwmac = priv; - struct regmap *regmap = dwmac->regmap; - int iface = dwmac->interface; - u32 reg = dwmac->reg; - u32 val, spd; - - if (dwmac->clk) - clk_prepare_enable(dwmac->clk); - - regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); - - val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; - regmap_update_bits(regmap, reg, ENMII_MASK, val); - - if (IS_PHY_IF_MODE_GBIT(iface)) - spd = SPEED_1000; - else - spd = SPEED_100; - - setup_retime_src(dwmac, spd); + } return 0; } @@ -322,9 +351,16 @@ static void *sti_dwmac_setup(struct platform_device *pdev) return dwmac; } -const struct stmmac_of_data sti_gmac_data = { - .fix_mac_speed = sti_fix_mac_speed, +const struct stmmac_of_data stih4xx_dwmac_data = { + .fix_mac_speed = stih4xx_fix_retime_src, + .setup = sti_dwmac_setup, + .init = stix4xx_init, + .exit = sti_dwmac_exit, +}; + +const struct stmmac_of_data stid127_dwmac_data = { + .fix_mac_speed = stid127_fix_retime_src, .setup = sti_dwmac_setup, - .init = sti_dwmac_init, + .init = stid127_init, .exit = sti_dwmac_exit, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 44528896355d..c3c40650b309 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -144,7 +144,8 @@ extern const struct stmmac_of_data meson6_dwmac_data; extern const struct stmmac_of_data sun7i_gmac_data; #endif #ifdef CONFIG_DWMAC_STI -extern const struct stmmac_of_data sti_gmac_data; +extern const struct stmmac_of_data stih4xx_dwmac_data; +extern const struct stmmac_of_data stid127_dwmac_data; #endif #ifdef CONFIG_DWMAC_SOCFPGA extern const struct stmmac_of_data socfpga_gmac_data; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 652171706258..db56fa7ce8f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -37,9 +37,10 @@ static const struct of_device_id stmmac_dt_ids[] = { { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, #endif #ifdef CONFIG_DWMAC_STI - { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, - { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, - { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data}, + { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data}, + { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data}, + { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data}, + { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data}, #endif #ifdef CONFIG_DWMAC_SOCFPGA { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data }, @@ -160,11 +161,16 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); - plat->mdio_bus_data = devm_kzalloc(&pdev->dev, - sizeof(struct stmmac_mdio_bus_data), - GFP_KERNEL); + if (plat->phy_bus_name) + plat->mdio_bus_data = NULL; + else + plat->mdio_bus_data = + devm_kzalloc(&pdev->dev, + sizeof(struct stmmac_mdio_bus_data), + GFP_KERNEL); - plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode"); + plat->force_sf_dma_mode = + of_property_read_bool(np, "snps,force_sf_dma_mode"); /* Set the maxmtu to a default of JUMBO_LEN in case the * parameter is not present in the device tree. diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ab167dc49ce4..952e1e4764b7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2392,6 +2392,15 @@ clean_ndev_ret: return ret; } +static int cpsw_remove_child_device(struct device *dev, void *c) +{ + struct platform_device *pdev = to_platform_device(dev); + + of_device_unregister(pdev); + + return 0; +} + static int cpsw_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); @@ -2406,6 +2415,7 @@ static int cpsw_remove(struct platform_device *pdev) cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); pm_runtime_disable(&pdev->dev); + device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); if (priv->data.dual_emac) free_netdev(cpsw_get_slave_ndev(priv, 1)); free_netdev(ndev); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 4a000f6dd6fc..657b65bf5cac 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -193,12 +193,9 @@ fail: static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) { - unsigned long flags; - if (!pool) return; - spin_lock_irqsave(&pool->lock, flags); WARN_ON(pool->used_desc); if (pool->cpumap) { dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, @@ -206,7 +203,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) } else { iounmap(pool->iomap); } - spin_unlock_irqrestore(&pool->lock, flags); } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, @@ -561,7 +557,6 @@ int cpdma_chan_destroy(struct cpdma_chan *chan) cpdma_chan_stop(chan); ctlr->channels[chan->chan_num] = NULL; spin_unlock_irqrestore(&ctlr->lock, flags); - kfree(chan); return 0; } EXPORT_SYMBOL_GPL(cpdma_chan_destroy); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 0fcb5e7eb073..9e17d1a91e71 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -162,7 +162,7 @@ union sub_key { * data: network byte order * return: host byte order */ -static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen) +static u32 comp_hash(u8 *key, int klen, void *data, int dlen) { union sub_key subk; int k_next = 4; @@ -176,7 +176,7 @@ static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen) for (i = 0; i < dlen; i++) { subk.kb = key[k_next]; k_next = (k_next + 1) % klen; - dt = data[i]; + dt = ((u8 *)data)[i]; for (j = 0; j < 8; j++) { if (dt & 0x80) ret ^= subk.ka; @@ -190,26 +190,20 @@ static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen) static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) { - struct iphdr *iphdr; + struct flow_keys flow; int data_len; - bool ret = false; - if (eth_hdr(skb)->h_proto != htons(ETH_P_IP)) + if (!skb_flow_dissect(skb, &flow) || flow.n_proto != htons(ETH_P_IP)) return false; - iphdr = ip_hdr(skb); + if (flow.ip_proto == IPPROTO_TCP) + data_len = 12; + else + data_len = 8; - if (iphdr->version == 4) { - if (iphdr->protocol == IPPROTO_TCP) - data_len = 12; - else - data_len = 8; - *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, - (u8 *)&iphdr->saddr, data_len); - ret = true; - } + *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len); - return ret; + return true; } static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 0c6adaaf898c..65e2892342bd 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -298,7 +298,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) */ if (q->flags & IFF_VNET_HDR) features |= vlan->tap_features; - if (netif_needs_gso(skb, features)) { + if (netif_needs_gso(dev, skb, features)) { struct sk_buff *segs = __skb_gso_segment(skb, features, false); if (IS_ERR(segs)) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 492435fce1d4..8c2a29a9bd7f 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -198,8 +198,10 @@ static int ksz8021_config_init(struct phy_device *phydev) if (rc) dev_err(&phydev->dev, "failed to set led mode\n"); - phy_write(phydev, MII_KSZPHY_OMSO, val); rc = ksz_config_flags(phydev); + if (rc < 0) + return rc; + rc = phy_write(phydev, MII_KSZPHY_OMSO, val); return rc < 0 ? rc : 0; } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 864159eb744e..e3d84c322e4e 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3189,31 +3189,39 @@ static void r8153_init(struct r8152 *tp) static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) { struct r8152 *tp = usb_get_intfdata(intf); + struct net_device *netdev = tp->netdev; + int ret = 0; mutex_lock(&tp->control); - if (PMSG_IS_AUTO(message)) + if (PMSG_IS_AUTO(message)) { + if (netif_running(netdev) && work_busy(&tp->schedule.work)) { + ret = -EBUSY; + goto out1; + } + set_bit(SELECTIVE_SUSPEND, &tp->flags); - else - netif_device_detach(tp->netdev); + } else { + netif_device_detach(netdev); + } - if (netif_running(tp->netdev)) { + if (netif_running(netdev)) { clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); - cancel_delayed_work_sync(&tp->schedule); tasklet_disable(&tp->tl); if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { rtl_stop_rx(tp); rtl_runtime_suspend_enable(tp, true); } else { + cancel_delayed_work_sync(&tp->schedule); tp->rtl_ops.down(tp); } tasklet_enable(&tp->tl); } - +out1: mutex_unlock(&tp->control); - return 0; + return ret; } static int rtl8152_resume(struct usb_interface *intf) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3d0ce4468ce6..13d0a8bc8bf3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -920,6 +920,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) int qnum = skb_get_queue_mapping(skb); struct send_queue *sq = &vi->sq[qnum]; int err; + struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); + bool kick = !skb->xmit_more; /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(sq); @@ -956,7 +958,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) } } - if (__netif_subqueue_stopped(dev, qnum) || !skb->xmit_more) + if (kick || netif_xmit_stopped(txq)) virtqueue_kick(sq->vq); return NETDEV_TX_OK; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 2a51e6e48e1e..ca309820d39e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1437,9 +1437,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) if (!in6_dev) goto out; - if (!pskb_may_pull(skb, skb->len)) - goto out; - iphdr = ipv6_hdr(skb); saddr = &iphdr->saddr; daddr = &iphdr->daddr; @@ -1668,6 +1665,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; + struct net_device *dev = skb->dev; + int len = skb->len; tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); @@ -1691,16 +1690,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; - tx_stats->tx_bytes += skb->len; + tx_stats->tx_bytes += len; u64_stats_update_end(&tx_stats->syncp); if (netif_rx(skb) == NET_RX_SUCCESS) { u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; - rx_stats->rx_bytes += skb->len; + rx_stats->rx_bytes += len; u64_stats_update_end(&rx_stats->syncp); } else { - skb->dev->stats.rx_dropped++; + dev->stats.rx_dropped++; } } @@ -1878,7 +1877,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) return arp_reduce(dev, skb); #if IS_ENABLED(CONFIG_IPV6) else if (ntohs(eth->h_proto) == ETH_P_IPV6 && - skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) && + pskb_may_pull(skb, sizeof(struct ipv6hdr) + + sizeof(struct nd_msg)) && ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { struct nd_msg *msg; @@ -1887,6 +1887,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) return neigh_reduce(dev, skb); } + eth = eth_hdr(skb); #endif } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index fa671442f420..cca871346a0f 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -638,7 +638,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(!netif_carrier_ok(dev) || (slots > 1 && !xennet_can_sg(dev)) || - netif_needs_gso(skb, netif_skb_features(skb)))) { + netif_needs_gso(dev, skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&queue->tx_lock, flags); goto drop; } diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 02e69e7ee4a3..3e0a0d315f72 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -259,6 +259,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); } +#if IS_ENABLED(CONFIG_IPV6) static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, struct l2t_entry *e) { @@ -344,6 +345,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); } +#endif static void send_close_req(struct cxgbi_sock *csk) { @@ -756,7 +758,7 @@ static int act_open_rpl_status_to_errno(int status) static void csk_act_open_retry_timer(unsigned long data) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; struct cxgbi_sock *csk = (struct cxgbi_sock *)data; struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, @@ -781,9 +783,11 @@ static void csk_act_open_retry_timer(unsigned long data) if (csk->csk_family == AF_INET) { send_act_open_func = send_act_open_req; skb = alloc_wr(size, 0, GFP_ATOMIC); +#if IS_ENABLED(CONFIG_IPV6) } else { send_act_open_func = send_act_open_req6; skb = alloc_wr(size6, 0, GFP_ATOMIC); +#endif } if (!skb) @@ -1313,11 +1317,6 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); - n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr); - if (!n) { - pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); - goto rel_resource; - } csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); if (!csk->l2t) { pr_err("%s, cannot alloc l2t.\n", ndev->name); @@ -1335,8 +1334,10 @@ static int init_act_open(struct cxgbi_sock *csk) if (csk->csk_family == AF_INET) skb = alloc_wr(size, 0, GFP_NOIO); +#if IS_ENABLED(CONFIG_IPV6) else skb = alloc_wr(size6, 0, GFP_NOIO); +#endif if (!skb) goto rel_resource; @@ -1370,8 +1371,10 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); if (csk->csk_family == AF_INET) send_act_open_req(csk, skb, csk->l2t); +#if IS_ENABLED(CONFIG_IPV6) else send_act_open_req6(csk, skb, csk->l2t); +#endif neigh_release(n); return 0; @@ -1635,129 +1638,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev) return 0; } -#if IS_ENABLED(CONFIG_IPV6) -static int cxgbi_inet6addr_handler(struct notifier_block *this, - unsigned long event, void *data) -{ - struct inet6_ifaddr *ifa = data; - struct net_device *event_dev = ifa->idev->dev; - struct cxgbi_device *cdev; - int ret = NOTIFY_DONE; - - if (event_dev->priv_flags & IFF_802_1Q_VLAN) - event_dev = vlan_dev_real_dev(event_dev); - - cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL); - - if (!cdev) - return ret; - - switch (event) { - case NETDEV_UP: - ret = cxgb4_clip_get(event_dev, - (const struct in6_addr *) - ((ifa)->addr.s6_addr)); - if (ret < 0) - return ret; - - ret = NOTIFY_OK; - break; - - case NETDEV_DOWN: - cxgb4_clip_release(event_dev, - (const struct in6_addr *) - ((ifa)->addr.s6_addr)); - ret = NOTIFY_OK; - break; - - default: - break; - } - - return ret; -} - -static struct notifier_block cxgbi_inet6addr_notifier = { - .notifier_call = cxgbi_inet6addr_handler -}; - -/* Retrieve IPv6 addresses from a root device (bond, vlan) associated with - * a physical device. - * The physical device reference is needed to send the actual CLIP command. - */ -static int update_dev_clip(struct net_device *root_dev, struct net_device *dev) -{ - struct inet6_dev *idev = NULL; - struct inet6_ifaddr *ifa; - int ret = 0; - - idev = __in6_dev_get(root_dev); - if (!idev) - return ret; - - read_lock_bh(&idev->lock); - list_for_each_entry(ifa, &idev->addr_list, if_list) { - pr_info("updating the clip for addr %pI6\n", - ifa->addr.s6_addr); - ret = cxgb4_clip_get(dev, (const struct in6_addr *) - ifa->addr.s6_addr); - if (ret < 0) - break; - } - - read_unlock_bh(&idev->lock); - return ret; -} - -static int update_root_dev_clip(struct net_device *dev) -{ - struct net_device *root_dev = NULL; - int i, ret = 0; - - /* First populate the real net device's IPv6 address */ - ret = update_dev_clip(dev, dev); - if (ret) - return ret; - - /* Parse all bond and vlan devices layered on top of the physical dev */ - root_dev = netdev_master_upper_dev_get(dev); - if (root_dev) { - ret = update_dev_clip(root_dev, dev); - if (ret) - return ret; - } - - for (i = 0; i < VLAN_N_VID; i++) { - root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); - if (!root_dev) - continue; - - ret = update_dev_clip(root_dev, dev); - if (ret) - break; - } - return ret; -} - -static void cxgbi_update_clip(struct cxgbi_device *cdev) -{ - int i; - - rcu_read_lock(); - - for (i = 0; i < cdev->nports; i++) { - struct net_device *dev = cdev->ports[i]; - int ret = 0; - - if (dev) - ret = update_root_dev_clip(dev); - if (ret < 0) - break; - } - rcu_read_unlock(); -} -#endif /* IS_ENABLED(CONFIG_IPV6) */ - static void *t4_uld_add(const struct cxgb4_lld_info *lldi) { struct cxgbi_device *cdev; @@ -1876,10 +1756,6 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state) switch (state) { case CXGB4_STATE_UP: pr_info("cdev 0x%p, UP.\n", cdev); -#if IS_ENABLED(CONFIG_IPV6) - cxgbi_update_clip(cdev); -#endif - /* re-initialize */ break; case CXGB4_STATE_START_RECOVERY: pr_info("cdev 0x%p, RECOVERY.\n", cdev); @@ -1910,17 +1786,11 @@ static int __init cxgb4i_init_module(void) return rc; cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); -#if IS_ENABLED(CONFIG_IPV6) - register_inet6addr_notifier(&cxgbi_inet6addr_notifier); -#endif return 0; } static void __exit cxgb4i_exit_module(void) { -#if IS_ENABLED(CONFIG_IPV6) - unregister_inet6addr_notifier(&cxgbi_inet6addr_notifier); -#endif cxgb4_unregister_uld(CXGB4_ULD_ISCSI); cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 6a2001d6b442..54fa6e0bc1bb 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -275,6 +275,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, } EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); +#if IS_ENABLED(CONFIG_IPV6) static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, int *port) { @@ -307,6 +308,7 @@ static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, ndev, ndev->name); return NULL; } +#endif void cxgbi_hbas_remove(struct cxgbi_device *cdev) { |