diff options
462 files changed, 7814 insertions, 3090 deletions
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt index 7c717699032c..63392c9132b4 100644 --- a/Documentation/DMA-mapping.txt +++ b/Documentation/DMA-mapping.txt @@ -698,12 +698,12 @@ these interfaces. Remember that, as defined, consistent mappings are always going to be SAC addressable. The first thing your driver needs to do is query the PCI platform -layer with your devices DAC addressing capabilities: +layer if it is capable of handling your devices DAC addressing +capabilities: - int pci_dac_set_dma_mask(struct pci_dev *pdev, u64 mask); + int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask); -This routine behaves identically to pci_set_dma_mask. You may not -use the following interfaces if this routine fails. +You may not use the following interfaces if this routine fails. Next, DMA addresses using this API are kept track of using the dma64_addr_t type. It is guaranteed to be big enough to hold any diff --git a/Documentation/accounting/delay-accounting.txt b/Documentation/accounting/delay-accounting.txt new file mode 100644 index 000000000000..be215e58423b --- /dev/null +++ b/Documentation/accounting/delay-accounting.txt @@ -0,0 +1,110 @@ +Delay accounting +---------------- + +Tasks encounter delays in execution when they wait +for some kernel resource to become available e.g. a +runnable task may wait for a free CPU to run on. + +The per-task delay accounting functionality measures +the delays experienced by a task while + +a) waiting for a CPU (while being runnable) +b) completion of synchronous block I/O initiated by the task +c) swapping in pages + +and makes these statistics available to userspace through +the taskstats interface. + +Such delays provide feedback for setting a task's cpu priority, +io priority and rss limit values appropriately. Long delays for +important tasks could be a trigger for raising its corresponding priority. + +The functionality, through its use of the taskstats interface, also provides +delay statistics aggregated for all tasks (or threads) belonging to a +thread group (corresponding to a traditional Unix process). This is a commonly +needed aggregation that is more efficiently done by the kernel. + +Userspace utilities, particularly resource management applications, can also +aggregate delay statistics into arbitrary groups. To enable this, delay +statistics of a task are available both during its lifetime as well as on its +exit, ensuring continuous and complete monitoring can be done. + + +Interface +--------- + +Delay accounting uses the taskstats interface which is described +in detail in a separate document in this directory. Taskstats returns a +generic data structure to userspace corresponding to per-pid and per-tgid +statistics. The delay accounting functionality populates specific fields of +this structure. See + include/linux/taskstats.h +for a description of the fields pertaining to delay accounting. +It will generally be in the form of counters returning the cumulative +delay seen for cpu, sync block I/O, swapin etc. + +Taking the difference of two successive readings of a given +counter (say cpu_delay_total) for a task will give the delay +experienced by the task waiting for the corresponding resource +in that interval. + +When a task exits, records containing the per-task statistics +are sent to userspace without requiring a command. If it is the last exiting +task of a thread group, the per-tgid statistics are also sent. More details +are given in the taskstats interface description. + +The getdelays.c userspace utility in this directory allows simple commands to +be run and the corresponding delay statistics to be displayed. It also serves +as an example of using the taskstats interface. + +Usage +----- + +Compile the kernel with + CONFIG_TASK_DELAY_ACCT=y + CONFIG_TASKSTATS=y + +Enable the accounting at boot time by adding +the following to the kernel boot options + delayacct + +and after the system has booted up, use a utility +similar to getdelays.c to access the delays +seen by a given task or a task group (tgid). +The utility also allows a given command to be +executed and the corresponding delays to be +seen. + +General format of the getdelays command + +getdelays [-t tgid] [-p pid] [-c cmd...] + + +Get delays, since system boot, for pid 10 +# ./getdelays -p 10 +(output similar to next case) + +Get sum of delays, since system boot, for all pids with tgid 5 +# ./getdelays -t 5 + + +CPU count real total virtual total delay total + 7876 92005750 100000000 24001500 +IO count delay total + 0 0 +MEM count delay total + 0 0 + +Get delays seen in executing a given simple command +# ./getdelays -c ls / + +bin data1 data3 data5 dev home media opt root srv sys usr +boot data2 data4 data6 etc lib mnt proc sbin subdomain tmp var + + +CPU count real total virtual total delay total + 6 4000250 4000000 0 +IO count delay total + 0 0 +MEM count delay total + 0 0 diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c new file mode 100644 index 000000000000..795ca3911cc5 --- /dev/null +++ b/Documentation/accounting/getdelays.c @@ -0,0 +1,396 @@ +/* getdelays.c + * + * Utility to get per-pid and per-tgid delay accounting statistics + * Also illustrates usage of the taskstats interface + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2005 + * Copyright (C) Balbir Singh, IBM Corp. 2006 + * Copyright (c) Jay Lan, SGI. 2006 + * + */ + +#include <stdio.h> +#include <stdlib.h> +#include <errno.h> +#include <unistd.h> +#include <poll.h> +#include <string.h> +#include <fcntl.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/socket.h> +#include <sys/types.h> +#include <signal.h> + +#include <linux/genetlink.h> +#include <linux/taskstats.h> + +/* + * Generic macros for dealing with netlink sockets. Might be duplicated + * elsewhere. It is recommended that commercial grade applications use + * libnl or libnetlink and use the interfaces provided by the library + */ +#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN)) +#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN) +#define NLA_DATA(na) ((void *)((char*)(na) + NLA_HDRLEN)) +#define NLA_PAYLOAD(len) (len - NLA_HDRLEN) + +#define err(code, fmt, arg...) do { printf(fmt, ##arg); exit(code); } while (0) +int done = 0; +int rcvbufsz=0; + + char name[100]; +int dbg=0, print_delays=0; +__u64 stime, utime; +#define PRINTF(fmt, arg...) { \ + if (dbg) { \ + printf(fmt, ##arg); \ + } \ + } + +/* Maximum size of response requested or message sent */ +#define MAX_MSG_SIZE 256 +/* Maximum number of cpus expected to be specified in a cpumask */ +#define MAX_CPUS 32 +/* Maximum length of pathname to log file */ +#define MAX_FILENAME 256 + +struct msgtemplate { + struct nlmsghdr n; + struct genlmsghdr g; + char buf[MAX_MSG_SIZE]; +}; + +char cpumask[100+6*MAX_CPUS]; + +/* + * Create a raw netlink socket and bind + */ +static int create_nl_socket(int protocol) +{ + int fd; + struct sockaddr_nl local; + + fd = socket(AF_NETLINK, SOCK_RAW, protocol); + if (fd < 0) + return -1; + + if (rcvbufsz) + if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, + &rcvbufsz, sizeof(rcvbufsz)) < 0) { + printf("Unable to set socket rcv buf size to %d\n", + rcvbufsz); + return -1; + } + + memset(&local, 0, sizeof(local)); + local.nl_family = AF_NETLINK; + + if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0) + goto error; + + return fd; +error: + close(fd); + return -1; +} + + +int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid, + __u8 genl_cmd, __u16 nla_type, + void *nla_data, int nla_len) +{ + struct nlattr *na; + struct sockaddr_nl nladdr; + int r, buflen; + char *buf; + + struct msgtemplate msg; + + msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + msg.n.nlmsg_type = nlmsg_type; + msg.n.nlmsg_flags = NLM_F_REQUEST; + msg.n.nlmsg_seq = 0; + msg.n.nlmsg_pid = nlmsg_pid; + msg.g.cmd = genl_cmd; + msg.g.version = 0x1; + na = (struct nlattr *) GENLMSG_DATA(&msg); + na->nla_type = nla_type; + na->nla_len = nla_len + 1 + NLA_HDRLEN; + memcpy(NLA_DATA(na), nla_data, nla_len); + msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len); + + buf = (char *) &msg; + buflen = msg.n.nlmsg_len ; + memset(&nladdr, 0, sizeof(nladdr)); + nladdr.nl_family = AF_NETLINK; + while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr, + sizeof(nladdr))) < buflen) { + if (r > 0) { + buf += r; + buflen -= r; + } else if (errno != EAGAIN) + return -1; + } + return 0; +} + + +/* + * Probe the controller in genetlink to find the family id + * for the TASKSTATS family + */ +int get_family_id(int sd) +{ + struct { + struct nlmsghdr n; + struct genlmsghdr g; + char buf[256]; + } ans; + + int id, rc; + struct nlattr *na; + int rep_len; + + strcpy(name, TASKSTATS_GENL_NAME); + rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY, + CTRL_ATTR_FAMILY_NAME, (void *)name, + strlen(TASKSTATS_GENL_NAME)+1); + + rep_len = recv(sd, &ans, sizeof(ans), 0); + if (ans.n.nlmsg_type == NLMSG_ERROR || + (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len)) + return 0; + + na = (struct nlattr *) GENLMSG_DATA(&ans); + na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len)); + if (na->nla_type == CTRL_ATTR_FAMILY_ID) { + id = *(__u16 *) NLA_DATA(na); + } + return id; +} + +void print_delayacct(struct taskstats *t) +{ + printf("\n\nCPU %15s%15s%15s%15s\n" + " %15llu%15llu%15llu%15llu\n" + "IO %15s%15s\n" + " %15llu%15llu\n" + "MEM %15s%15s\n" + " %15llu%15llu\n\n", + "count", "real total", "virtual total", "delay total", + t->cpu_count, t->cpu_run_real_total, t->cpu_run_virtual_total, + t->cpu_delay_total, + "count", "delay total", + t->blkio_count, t->blkio_delay_total, + "count", "delay total", t->swapin_count, t->swapin_delay_total); +} + +int main(int argc, char *argv[]) +{ + int c, rc, rep_len, aggr_len, len2, cmd_type; + __u16 id; + __u32 mypid; + + struct nlattr *na; + int nl_sd = -1; + int len = 0; + pid_t tid = 0; + pid_t rtid = 0; + + int fd = 0; + int count = 0; + int write_file = 0; + int maskset = 0; + char logfile[128]; + int loop = 0; + + struct msgtemplate msg; + + while (1) { + c = getopt(argc, argv, "dw:r:m:t:p:v:l"); + if (c < 0) + break; + + switch (c) { + case 'd': + printf("print delayacct stats ON\n"); + print_delays = 1; + break; + case 'w': + strncpy(logfile, optarg, MAX_FILENAME); + printf("write to file %s\n", logfile); + write_file = 1; + break; + case 'r': + rcvbufsz = atoi(optarg); + printf("receive buf size %d\n", rcvbufsz); + if (rcvbufsz < 0) + err(1, "Invalid rcv buf size\n"); + break; + case 'm': + strncpy(cpumask, optarg, sizeof(cpumask)); + maskset = 1; + printf("cpumask %s maskset %d\n", cpumask, maskset); + break; + case 't': + tid = atoi(optarg); + if (!tid) + err(1, "Invalid tgid\n"); + cmd_type = TASKSTATS_CMD_ATTR_TGID; + print_delays = 1; + break; + case 'p': + tid = atoi(optarg); + if (!tid) + err(1, "Invalid pid\n"); + cmd_type = TASKSTATS_CMD_ATTR_PID; + print_delays = 1; + break; + case 'v': + printf("debug on\n"); + dbg = 1; + break; + case 'l': + printf("listen forever\n"); + loop = 1; + break; + default: + printf("Unknown option %d\n", c); + exit(-1); + } + } + + if (write_file) { + fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC, + S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); + if (fd == -1) { + perror("Cannot open output file\n"); + exit(1); + } + } + + if ((nl_sd = create_nl_socket(NETLINK_GENERIC)) < 0) + err(1, "error creating Netlink socket\n"); + + + mypid = getpid(); + id = get_family_id(nl_sd); + if (!id) { + printf("Error getting family id, errno %d", errno); + goto err; + } + PRINTF("family id %d\n", id); + + if (maskset) { + rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK, + &cpumask, sizeof(cpumask)); + PRINTF("Sent register cpumask, retval %d\n", rc); + if (rc < 0) { + printf("error sending register cpumask\n"); + goto err; + } + } + + if (tid) { + rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, + cmd_type, &tid, sizeof(__u32)); + PRINTF("Sent pid/tgid, retval %d\n", rc); + if (rc < 0) { + printf("error sending tid/tgid cmd\n"); + goto done; + } + } + + do { + int i; + + rep_len = recv(nl_sd, &msg, sizeof(msg), 0); + PRINTF("received %d bytes\n", rep_len); + + if (rep_len < 0) { + printf("nonfatal reply error: errno %d\n", errno); + continue; + } + if (msg.n.nlmsg_type == NLMSG_ERROR || + !NLMSG_OK((&msg.n), rep_len)) { + printf("fatal reply error, errno %d\n", errno); + goto done; + } + + PRINTF("nlmsghdr size=%d, nlmsg_len=%d, rep_len=%d\n", + sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len); + + + rep_len = GENLMSG_PAYLOAD(&msg.n); + + na = (struct nlattr *) GENLMSG_DATA(&msg); + len = 0; + i = 0; + while (len < rep_len) { + len += NLA_ALIGN(na->nla_len); + switch (na->nla_type) { + case TASKSTATS_TYPE_AGGR_TGID: + /* Fall through */ + case TASKSTATS_TYPE_AGGR_PID: + aggr_len = NLA_PAYLOAD(na->nla_len); + len2 = 0; + /* For nested attributes, na follows */ + na = (struct nlattr *) NLA_DATA(na); + done = 0; + while (len2 < aggr_len) { + switch (na->nla_type) { + case TASKSTATS_TYPE_PID: + rtid = *(int *) NLA_DATA(na); + if (print_delays) + printf("PID\t%d\n", rtid); + break; + case TASKSTATS_TYPE_TGID: + rtid = *(int *) NLA_DATA(na); + if (print_delays) + printf("TGID\t%d\n", rtid); + break; + case TASKSTATS_TYPE_STATS: + count++; + if (print_delays) + print_delayacct((struct taskstats *) NLA_DATA(na)); + if (fd) { + if (write(fd, NLA_DATA(na), na->nla_len) < 0) { + err(1,"write error\n"); + } + } + if (!loop) + goto done; + break; + default: + printf("Unknown nested nla_type %d\n", na->nla_type); + break; + } + len2 += NLA_ALIGN(na->nla_len); + na = (struct nlattr *) ((char *) na + len2); + } + break; + + default: + printf("Unknown nla_type %d\n", na->nla_type); + break; + } + na = (struct nlattr *) (GENLMSG_DATA(&msg) + len); + } + } while (loop); +done: + if (maskset) { + rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK, + &cpumask, sizeof(cpumask)); + printf("Sent deregister mask, retval %d\n", rc); + if (rc < 0) + err(rc, "error sending deregister cpumask\n"); + } +err: + close(nl_sd); + if (fd) + close(fd); + return 0; +} diff --git a/Documentation/accounting/taskstats.txt b/Documentation/accounting/taskstats.txt new file mode 100644 index 000000000000..92ebf29e9041 --- /dev/null +++ b/Documentation/accounting/taskstats.txt @@ -0,0 +1,181 @@ +Per-task statistics interface +----------------------------- + + +Taskstats is a netlink-based interface for sending per-task and +per-process statistics from the kernel to userspace. + +Taskstats was designed for the following benefits: + +- efficiently provide statistics during lifetime of a task and on its exit +- unified interface for multiple accounting subsystems +- extensibility for use by future accounting patches + +Terminology +----------- + +"pid", "tid" and "task" are used interchangeably and refer to the standard +Linux task defined by struct task_struct. per-pid stats are the same as +per-task stats. + +"tgid", "process" and "thread group" are used interchangeably and refer to the +tasks that share an mm_struct i.e. the traditional Unix process. Despite the +use of tgid, there is no special treatment for the task that is thread group +leader - a process is deemed alive as long as it has any task belonging to it. + +Usage +----- + +To get statistics during a task's lifetime, userspace opens a unicast netlink +socket (NETLINK_GENERIC family) and sends commands specifying a pid or a tgid. +The response contains statistics for a task (if pid is specified) or the sum of +statistics for all tasks of the process (if tgid is specified). + +To obtain statistics for tasks which are exiting, the userspace listener +sends a register command and specifies a cpumask. Whenever a task exits on +one of the cpus in the cpumask, its per-pid statistics are sent to the +registered listener. Using cpumasks allows the data received by one listener +to be limited and assists in flow control over the netlink interface and is +explained in more detail below. + +If the exiting task is the last thread exiting its thread group, +an additional record containing the per-tgid stats is also sent to userspace. +The latter contains the sum of per-pid stats for all threads in the thread +group, both past and present. + +getdelays.c is a simple utility demonstrating usage of the taskstats interface +for reporting delay accounting statistics. Users can register cpumasks, +send commands and process responses, listen for per-tid/tgid exit data, +write the data received to a file and do basic flow control by increasing +receive buffer sizes. + +Interface +--------- + +The user-kernel interface is encapsulated in include/linux/taskstats.h + +To avoid this documentation becoming obsolete as the interface evolves, only +an outline of the current version is given. taskstats.h always overrides the +description here. + +struct taskstats is the common accounting structure for both per-pid and +per-tgid data. It is versioned and can be extended by each accounting subsystem +that is added to the kernel. The fields and their semantics are defined in the +taskstats.h file. + +The data exchanged between user and kernel space is a netlink message belonging +to the NETLINK_GENERIC family and using the netlink attributes interface. +The messages are in the format + + +----------+- - -+-------------+-------------------+ + | nlmsghdr | Pad | genlmsghdr | taskstats payload | + +----------+- - -+-------------+-------------------+ + + +The taskstats payload is one of the following three kinds: + +1. Commands: Sent from user to kernel. Commands to get data on +a pid/tgid consist of one attribute, of type TASKSTATS_CMD_ATTR_PID/TGID, +containing a u32 pid or tgid in the attribute payload. The pid/tgid denotes +the task/process for which userspace wants statistics. + +Commands to register/deregister interest in exit data from a set of cpus +consist of one attribute, of type +TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK and contain a cpumask in the +attribute payload. The cpumask is specified as an ascii string of +comma-separated cpu ranges e.g. to listen to exit data from cpus 1,2,3,5,7,8 +the cpumask would be "1-3,5,7-8". If userspace forgets to deregister interest +in cpus before closing the listening socket, the kernel cleans up its interest +set over time. However, for the sake of efficiency, an explicit deregistration +is advisable. + +2. Response for a command: sent from the kernel in response to a userspace +command. The payload is a series of three attributes of type: + +a) TASKSTATS_TYPE_AGGR_PID/TGID : attribute containing no payload but indicates +a pid/tgid will be followed by some stats. + +b) TASKSTATS_TYPE_PID/TGID: attribute whose payload is the pid/tgid whose stats +is being returned. + +c) TASKSTATS_TYPE_STATS: attribute with a struct taskstsats as payload. The +same structure is used for both per-pid and per-tgid stats. + +3. New message sent by kernel whenever a task exits. The payload consists of a + series of attributes of the following type: + +a) TASKSTATS_TYPE_AGGR_PID: indicates next two attributes will be pid+stats +b) TASKSTATS_TYPE_PID: contains exiting task's pid +c) TASKSTATS_TYPE_STATS: contains the exiting task's per-pid stats +d) TASKSTATS_TYPE_AGGR_TGID: indicates next two attributes will be tgid+stats +e) TASKSTATS_TYPE_TGID: contains tgid of process to which task belongs +f) TASKSTATS_TYPE_STATS: contains the per-tgid stats for exiting task's process + + +per-tgid stats +-------------- + +Taskstats provides per-process stats, in addition to per-task stats, since +resource management is often done at a process granularity and aggregating task +stats in userspace alone is inefficient and potentially inaccurate (due to lack +of atomicity). + +However, maintaining per-process, in addition to per-task stats, within the +kernel has space and time overheads. To address this, the taskstats code +accumalates each exiting task's statistics into a process-wide data structure. +When the last task of a process exits, the process level data accumalated also +gets sent to userspace (along with the per-task data). + +When a user queries to get per-tgid data, the sum of all other live threads in +the group is added up and added to the accumalated total for previously exited +threads of the same thread group. + +Extending taskstats +------------------- + +There are two ways to extend the taskstats interface to export more +per-task/process stats as patches to collect them get added to the kernel +in future: + +1. Adding more fields to the end of the existing struct taskstats. Backward + compatibility is ensured by the version number within the + structure. Userspace will use only the fields of the struct that correspond + to the version its using. + +2. Defining separate statistic structs and using the netlink attributes + interface to return them. Since userspace processes each netlink attribute + independently, it can always ignore attributes whose type it does not + understand (because it is using an older version of the interface). + + +Choosing between 1. and 2. is a matter of trading off flexibility and +overhead. If only a few fields need to be added, then 1. is the preferable +path since the kernel and userspace don't need to incur the overhead of +processing new netlink attributes. But if the new fields expand the existing +struct too much, requiring disparate userspace accounting utilities to +unnecessarily receive large structures whose fields are of no interest, then +extending the attributes structure would be worthwhile. + +Flow control for taskstats +-------------------------- + +When the rate of task exits becomes large, a listener may not be able to keep +up with the kernel's rate of sending per-tid/tgid exit data leading to data +loss. This possibility gets compounded when the taskstats structure gets +extended and the number of cpus grows large. + +To avoid losing statistics, userspace should do one or more of the following: + +- increase the receive buffer sizes for the netlink sockets opened by +listeners to receive exit data. + +- create more listeners and reduce the number of cpus being listened to by +each listener. In the extreme case, there could be one listener for each cpu. +Users may also consider setting the cpu affinity of the listener to the subset +of cpus to which it listens, especially if they are listening to just one cpu. + +Despite these measures, if the userspace receives ENOBUFS error messages +indicated overflow of receive buffers, it should take measures to handle the +loss of data. + +---- diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 9d3a0775a11d..87851efb0228 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -258,3 +258,19 @@ Why: These drivers never compiled since they were added to the kernel Who: Jean Delvare <khali@linux-fr.org> --------------------------- + +What: Bridge netfilter deferred IPv4/IPv6 output hook calling +When: January 2007 +Why: The deferred output hooks are a layering violation causing unusual + and broken behaviour on bridge devices. Examples of things they + break include QoS classifation using the MARK or CLASSIFY targets, + the IPsec policy match and connection tracking with VLANs on a + bridge. Their only use is to enable bridge output port filtering + within iptables with the physdev match, which can also be done by + combining iptables and ebtables using netfilter marks. Until it + will get removed the hook deferral is disabled by default and is + only enabled when needed. + +Who: Patrick McHardy <kaber@trash.net> + +--------------------------- diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 149f62ba14a5..e11f7728ec6f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -448,6 +448,8 @@ running once the system is up. Format: <area>[,<node>] See also Documentation/networking/decnet.txt. + delayacct [KNL] Enable per-task delay accounting + dhash_entries= [KNL] Set number of hash buckets for dentry cache. diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 28d1bc3edb1c..46b9b389df35 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -1015,10 +1015,9 @@ CPU from reordering them. There are some more advanced barrier functions: (*) set_mb(var, value) - (*) set_wmb(var, value) - These assign the value to the variable and then insert at least a write - barrier after it, depending on the function. They aren't guaranteed to + This assigns the value to the variable and then inserts at least a write + barrier after it, depending on the function. It isn't guaranteed to insert anything more than a compiler barrier in a UP compilation. diff --git a/Documentation/ramdisk.txt b/Documentation/ramdisk.txt index 7c25584e082c..52f75b7d51c2 100644 --- a/Documentation/ramdisk.txt +++ b/Documentation/ramdisk.txt @@ -6,7 +6,7 @@ Contents: 1) Overview 2) Kernel Command Line Parameters 3) Using "rdev -r" - 4) An Example of Creating a Compressed RAM Disk + 4) An Example of Creating a Compressed RAM Disk 1) Overview @@ -34,7 +34,7 @@ make it clearer. The original "ramdisk=<ram_size>" has been kept around for compatibility reasons, but it may be removed in the future. The new RAM disk also has the ability to load compressed RAM disk images, -allowing one to squeeze more programs onto an average installation or +allowing one to squeeze more programs onto an average installation or rescue floppy disk. @@ -51,7 +51,7 @@ default is 4096 (4 MB) (8192 (8 MB) on S390). =================== This parameter tells the RAM disk driver how many bytes to use per block. The -default is 512. +default is 1024 (BLOCK_SIZE). 3) Using "rdev -r" @@ -70,7 +70,7 @@ These numbers are no magical secrets, as seen below: ./arch/i386/kernel/setup.c:#define RAMDISK_PROMPT_FLAG 0x8000 ./arch/i386/kernel/setup.c:#define RAMDISK_LOAD_FLAG 0x4000 -Consider a typical two floppy disk setup, where you will have the +Consider a typical two floppy disk setup, where you will have the kernel on disk one, and have already put a RAM disk image onto disk #2. Hence you want to set bits 0 to 13 as 0, meaning that your RAM disk @@ -97,12 +97,12 @@ Since the default start = 0 and the default prompt = 1, you could use: append = "load_ramdisk=1" -4) An Example of Creating a Compressed RAM Disk +4) An Example of Creating a Compressed RAM Disk ---------------------------------------------- To create a RAM disk image, you will need a spare block device to construct it on. This can be the RAM disk device itself, or an -unused disk partition (such as an unmounted swap partition). For this +unused disk partition (such as an unmounted swap partition). For this example, we will use the RAM disk device, "/dev/ram0". Note: This technique should not be done on a machine with less than 8 MB diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt index 6887d44d2661..6da24e7a56cb 100644 --- a/Documentation/x86_64/boot-options.txt +++ b/Documentation/x86_64/boot-options.txt @@ -238,6 +238,13 @@ Debugging pagefaulttrace Dump all page faults. Only useful for extreme debugging and will create a lot of output. + call_trace=[old|both|newfallback|new] + old: use old inexact backtracer + new: use new exact dwarf2 unwinder + both: print entries from both + newfallback: use new unwinder but fall back to old if it gets + stuck (default) + Misc noreplacement Don't replace instructions with more appropriate ones diff --git a/MAINTAINERS b/MAINTAINERS index a3462c3414cd..b2afc7ae965b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -771,6 +771,7 @@ M: aliakc@web.de P: Jamie Lenehan M: lenehan@twibble.org W: http://twibble.org/dist/dc395x/ +L: dc395x@twibble.org L: http://lists.twibble.org/mailman/listinfo/dc395x/ S: Maintained @@ -1501,6 +1502,7 @@ P: Yi Zhu M: yi.zhu@intel.com P: James Ketrenos M: jketreno@linux.intel.com +L: ipw2100-devel@lists.sourceforge.net L: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel W: http://ipw2100.sourceforge.net S: Supported @@ -1510,6 +1512,7 @@ P: Yi Zhu M: yi.zhu@intel.com P: James Ketrenos M: jketreno@linux.intel.com +L: ipw2100-devel@lists.sourceforge.net L: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel W: http://ipw2200.sourceforge.net S: Supported @@ -1673,10 +1676,8 @@ L: linux-kernel@vger.kernel.org S: Maintained LAPB module -P: Henner Eisen -M: eis@baty.hanse.de L: linux-x25@vger.kernel.org -S: Maintained +S: Orphan LASI 53c700 driver for PARISC P: James E.J. Bottomley @@ -2226,6 +2227,7 @@ S: Maintained PCMCIA SUBSYSTEM P: Linux PCMCIA Team +L: linux-pcmcia@lists.infradead.org L: http://lists.infradead.org/mailman/listinfo/linux-pcmcia T: git kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git S: Maintained @@ -2236,6 +2238,12 @@ M: tsbogend@alpha.franken.de L: netdev@vger.kernel.org S: Maintained +PER-TASK DELAY ACCOUNTING +P: Shailabh Nagar +M: nagar@watson.ibm.com +L: linux-kernel@vger.kernel.org +S: Maintained + PERSONALITY HANDLING P: Christoph Hellwig M: hch@infradead.org @@ -2763,6 +2771,12 @@ P: Deepak Saxena M: dsaxena@plexity.net S: Maintained +TASKSTATS STATISTICS INTERFACE +P: Shailabh Nagar +M: nagar@watson.ibm.com +L: linux-kernel@vger.kernel.org +S: Maintained + TI PARALLEL LINK CABLE DRIVER P: Romain Lievin M: roms@lpg.ticalc.org @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 18 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME=Crazed Snow-Weasel # *DOCUMENTATION* diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index 48f0f62f781c..5b96f038367f 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c @@ -90,7 +90,7 @@ static void crash_save_self(struct pt_regs *regs) crash_save_this_cpu(regs, cpu); } -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) static atomic_t waiting_for_crash_ipi; static int crash_nmi_callback(struct pt_regs *regs, int cpu) diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 923bb292f47f..8657c739656a 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -690,8 +690,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas /* * Now maybe handle debug registers and/or IO bitmaps */ - if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)) - || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) + if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) + || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) __switch_to_xtra(next_p, tss); disable_tsc(prev_p, next_p); diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 7864395c1441..f1682206d304 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -1327,7 +1327,10 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat res->start = e820.map[i].addr; res->end = res->start + e820.map[i].size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; - request_resource(&iomem_resource, res); + if (request_resource(&iomem_resource, res)) { + kfree(res); + continue; + } if (e820.map[i].type == E820_RAM) { /* * We don't know which RAM region contains kernel data, diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 8705c0f05788..edd00f6cee37 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c @@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); - if (in_lock_functions(pc)) + if (!user_mode_vm(regs) && in_lock_functions(pc)) return *(unsigned long *)(regs->ebp + 4); return pc; diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 5cfd4f42eeba..021f8fdc7512 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, if (unwind_init_blocked(&info, task) == 0) unw_ret = show_trace_unwind(&info, log_lvl); } - if (unw_ret > 0) { - if (call_trace > 0) + if (unw_ret > 0 && !arch_unw_user_mode(&info)) { +#ifdef CONFIG_STACK_UNWIND + print_symbol("DWARF2 unwinder stuck at %s\n", + UNW_PC(&info)); + if (call_trace == 1) { + printk("Leftover inexact backtrace:\n"); + if (UNW_SP(&info)) + stack = (void *)UNW_SP(&info); + } else if (call_trace > 1) return; - printk("%sLegacy call trace:\n", log_lvl); + else + printk("Full inexact backtrace again:\n"); +#else + printk("Inexact backtrace:\n"); +#endif } } @@ -324,35 +335,35 @@ void show_registers(struct pt_regs *regs) static void handle_BUG(struct pt_regs *regs) { + unsigned long eip = regs->eip; unsigned short ud2; - unsigned short line; - char *file; - char c; - unsigned long eip; - - eip = regs->eip; if (eip < PAGE_OFFSET) - goto no_bug; + return; if (__get_user(ud2, (unsigned short __user *)eip)) - goto no_bug; + return; if (ud2 != 0x0b0f) - goto no_bug; - if (__get_user(line, (unsigned short __user *)(eip + 2))) - goto bug; - if (__get_user(file, (char * __user *)(eip + 4)) || - (unsigned long)file < PAGE_OFFSET || __get_user(c, file)) - file = "<bad filename>"; + return; printk(KERN_EMERG "------------[ cut here ]------------\n"); - printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line); -no_bug: - return; +#ifdef CONFIG_DEBUG_BUGVERBOSE + do { + unsigned short line; + char *file; + char c; + + if (__get_user(line, (unsigned short __user *)(eip + 2))) + break; + if (__get_user(file, (char * __user *)(eip + 4)) || + (unsigned long)file < PAGE_OFFSET || __get_user(c, file)) + file = "<bad filename>"; - /* Here we know it was a BUG but file-n-line is unavailable */ -bug: - printk(KERN_EMERG "Kernel BUG\n"); + printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line); + return; + } while (0); +#endif + printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n"); } /* This is gone through when something in the kernel @@ -1238,8 +1249,10 @@ static int __init call_trace_setup(char *s) call_trace = -1; else if (strcmp(s, "both") == 0) call_trace = 0; - else if (strcmp(s, "new") == 0) + else if (strcmp(s, "newfallback") == 0) call_trace = 1; + else if (strcmp(s, "new") == 2) + call_trace = 2; return 1; } __setup("call_trace=", call_trace_setup); diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c index 4b75212ab6dd..efc7e7d5f4d0 100644 --- a/arch/i386/lib/usercopy.c +++ b/arch/i386/lib/usercopy.c @@ -843,7 +843,6 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { - might_sleep(); BUG_ON((long) n < 0); if (access_ok(VERIFY_WRITE, to, n)) n = __copy_to_user(to, from, n); @@ -870,7 +869,6 @@ EXPORT_SYMBOL(copy_to_user); unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { - might_sleep(); BUG_ON((long) n < 0); if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); diff --git a/arch/s390/defconfig b/arch/s390/defconfig index f4dfc10026d2..f1d4591eddbb 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -1,13 +1,16 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.17-rc1 -# Mon Apr 3 14:34:15 2006 +# Linux kernel version: 2.6.18-rc2 +# Thu Jul 27 13:51:07 2006 # CONFIG_MMU=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_S390=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" # # Code maturity level options @@ -25,6 +28,7 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set CONFIG_SYSCTL=y CONFIG_AUDIT=y # CONFIG_AUDITSYSCALL is not set @@ -43,10 +47,12 @@ CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_BASE_FULL=y +CONFIG_RT_MUTEXES=y CONFIG_FUTEX=y CONFIG_EPOLL=y CONFIG_SHMEM=y CONFIG_SLAB=y +CONFIG_VM_EVENT_COUNTERS=y # CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 # CONFIG_SLOB is not set @@ -94,7 +100,6 @@ CONFIG_HOTPLUG_CPU=y CONFIG_DEFAULT_MIGRATION_COST=1000000 CONFIG_COMPAT=y CONFIG_SYSVIPC_COMPAT=y -CONFIG_BINFMT_ELF32=y # # Code generation options @@ -115,6 +120,7 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y # CONFIG_SPARSEMEM_STATIC is not set CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_RESOURCES_64BIT=y # # I/O subsystem configuration @@ -142,6 +148,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y # CONFIG_APPLDATA_BASE is not set CONFIG_NO_IDLE_HZ=y CONFIG_NO_IDLE_HZ_INIT=y +CONFIG_S390_HYPFS_FS=y CONFIG_KEXEC=y # @@ -174,6 +181,8 @@ CONFIG_IP_FIB_HASH=y # CONFIG_INET_IPCOMP is not set # CONFIG_INET_XFRM_TUNNEL is not set # CONFIG_INET_TUNNEL is not set +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y CONFIG_INET_DIAG=y CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set @@ -186,7 +195,10 @@ CONFIG_IPV6=y # CONFIG_INET6_IPCOMP is not set # CONFIG_INET6_XFRM_TUNNEL is not set # CONFIG_INET6_TUNNEL is not set +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y # CONFIG_IPV6_TUNNEL is not set +# CONFIG_NETWORK_SECMARK is not set # CONFIG_NETFILTER is not set # @@ -263,6 +275,7 @@ CONFIG_NET_ESTIMATOR=y # Network testing # # CONFIG_NET_PKTGEN is not set +# CONFIG_NET_TCPPROBE is not set # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set @@ -276,6 +289,7 @@ CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_FW_LOADER is not set # CONFIG_DEBUG_DRIVER is not set +CONFIG_SYS_HYPERVISOR=y # # Connector - unified userspace <-> kernelspace linker @@ -334,6 +348,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 CONFIG_BLK_DEV_INITRD=y # CONFIG_CDROM_PKTCDVD is not set @@ -359,9 +374,7 @@ CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m # CONFIG_MD_RAID10 is not set -CONFIG_MD_RAID5=m -# CONFIG_MD_RAID5_RESHAPE is not set -# CONFIG_MD_RAID6 is not set +# CONFIG_MD_RAID456 is not set CONFIG_MD_MULTIPATH=m # CONFIG_MD_FAULTY is not set CONFIG_BLK_DEV_DM=y @@ -419,7 +432,8 @@ CONFIG_S390_TAPE_34XX=m # # Cryptographic devices # -CONFIG_Z90CRYPT=m +CONFIG_ZCRYPT=m +# CONFIG_ZCRYPT_MONOLITHIC is not set # # Network device support @@ -509,6 +523,7 @@ CONFIG_FS_MBCACHE=y # CONFIG_MINIX_FS is not set # CONFIG_ROMFS_FS is not set CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y # CONFIG_QUOTA is not set CONFIG_DNOTIFY=y # CONFIG_AUTOFS_FS is not set @@ -614,26 +629,36 @@ CONFIG_MSDOS_PARTITION=y # Instrumentation Support # # CONFIG_PROFILING is not set -# CONFIG_STATISTICS is not set +CONFIG_STATISTICS=y +CONFIG_KPROBES=y # # Kernel hacking # +CONFIG_TRACE_IRQFLAGS_SUPPORT=y # CONFIG_PRINTK_TIME is not set CONFIG_MAGIC_SYSRQ=y +# CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_KERNEL=y CONFIG_LOG_BUF_SHIFT=17 # CONFIG_DETECT_SOFTLOCKUP is not set # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set CONFIG_DEBUG_PREEMPT=y -CONFIG_DEBUG_MUTEXES=y +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set CONFIG_DEBUG_SPINLOCK_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set # CONFIG_DEBUG_INFO is not set CONFIG_DEBUG_FS=y # CONFIG_DEBUG_VM is not set +# CONFIG_FRAME_POINTER is not set # CONFIG_UNWIND_INFO is not set CONFIG_FORCED_INLINING=y # CONFIG_RCU_TORTURE_TEST is not set @@ -688,3 +713,4 @@ CONFIG_CRYPTO=y # CONFIG_CRC16 is not set CONFIG_CRC32=m # CONFIG_LIBCRC32C is not set +CONFIG_PLIST=y diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index d00de17b3778..a4dc61f3285e 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -273,7 +273,7 @@ startup_continue: .Lbss_end: .long _end .Lparmaddr: .long PARMAREA .Lsccbaddr: .long .Lsccb - .align 4096 + .org 0x12000 .Lsccb: .hword 0x1000 # length, one page .byte 0x00,0x00,0x00 @@ -290,7 +290,7 @@ startup_continue: .Lscpincr2: .quad 0x00 .fill 3984,1,0 - .align 4096 + .org 0x13000 #ifdef CONFIG_SHARED_KERNEL .org 0x100000 diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 47744fcca930..9d80c5b1ef95 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -268,7 +268,7 @@ startup_continue: .Lparmaddr: .quad PARMAREA - .align 4096 + .org 0x12000 .Lsccb: .hword 0x1000 # length, one page .byte 0x00,0x00,0x00 @@ -285,7 +285,7 @@ startup_continue: .Lscpincr2: .quad 0x00 .fill 3984,1,0 - .align 4096 + .org 0x13000 #ifdef CONFIG_SHARED_KERNEL .org 0x100000 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 1ca34f54ea8a..c902f059c7aa 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -877,31 +877,57 @@ static struct bin_attribute ipl_scp_data_attr = { static decl_subsys(ipl, NULL, NULL); +static int ipl_register_fcp_files(void) +{ + int rc; + + rc = sysfs_create_group(&ipl_subsys.kset.kobj, + &ipl_fcp_attr_group); + if (rc) + goto out; + rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj, + &ipl_parameter_attr); + if (rc) + goto out_ipl_parm; + rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj, + &ipl_scp_data_attr); + if (!rc) + goto out; + + sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr); + +out_ipl_parm: + sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group); +out: + return rc; +} + static int __init ipl_device_sysfs_register(void) { int rc; rc = firmware_register(&ipl_subsys); if (rc) - return rc; + goto out; switch (get_ipl_type()) { case ipl_type_ccw: - sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group); + rc = sysfs_create_group(&ipl_subsys.kset.kobj, + &ipl_ccw_attr_group); break; case ipl_type_fcp: - sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group); - sysfs_create_bin_file(&ipl_subsys.kset.kobj, - &ipl_parameter_attr); - sysfs_create_bin_file(&ipl_subsys.kset.kobj, - &ipl_scp_data_attr); + rc = ipl_register_fcp_files(); break; default: - sysfs_create_group(&ipl_subsys.kset.kobj, - &ipl_unknown_attr_group); + rc = sysfs_create_group(&ipl_subsys.kset.kobj, + &ipl_unknown_attr_group); break; } - return 0; + + if (rc) + firmware_unregister(&ipl_subsys); +out: + return rc; } __initcall(ipl_device_sysfs_register); diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c index adba9dfee35e..af90a5f9ab57 100644 --- a/arch/sparc/kernel/devices.c +++ b/arch/sparc/kernel/devices.c @@ -15,6 +15,7 @@ #include <asm/page.h> #include <asm/oplib.h> +#include <asm/prom.h> #include <asm/smp.h> #include <asm/system.h> #include <asm/cpudata.h> @@ -34,12 +35,6 @@ static int check_cpu_node(int nd, int *cur_inst, int (*compare)(int, int, void *), void *compare_arg, int *prom_node, int *mid) { - char node_str[128]; - - prom_getstring(nd, "device_type", node_str, sizeof(node_str)); - if (strcmp(node_str, "cpu")) - return -ENODEV; - if (!compare(nd, *cur_inst, compare_arg)) { if (prom_node) *prom_node = nd; @@ -59,20 +54,14 @@ static int check_cpu_node(int nd, int *cur_inst, static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg, int *prom_node, int *mid) { - int nd, cur_inst, err; + struct device_node *dp; + int cur_inst; - nd = prom_root_node; cur_inst = 0; - - err = check_cpu_node(nd, &cur_inst, compare, compare_arg, - prom_node, mid); - if (!err) - return 0; - - nd = prom_getchild(nd); - while ((nd = prom_getsibling(nd)) != 0) { - err = check_cpu_node(nd, &cur_inst, compare, compare_arg, - prom_node, mid); + for_each_node_by_type(dp, "cpu") { + int err = check_cpu_node(dp->node, &cur_inst, + compare, compare_arg, + prom_node, mid); if (!err) return 0; } diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index cde73327ca96..72f0201051a0 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c @@ -329,7 +329,7 @@ void handler_irq(int irq, struct pt_regs * regs) disable_pil_irq(irq); #ifdef CONFIG_SMP /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */ - if(irq < 10) + if((sparc_cpu_model==sun4m) && (irq < 10)) smp4m_irq_rotate(cpu); #endif action = sparc_irq[irq].action; diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c index 5a2faad5d043..97bf87e8cdde 100644 --- a/arch/sparc/kernel/of_device.c +++ b/arch/sparc/kernel/of_device.c @@ -596,14 +596,41 @@ static struct of_device * __init scan_one_device(struct device_node *dp, static int pil_to_sbus[] = { 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, }; - struct device_node *busp = dp->parent; + struct device_node *io_unit, *sbi = dp->parent; struct linux_prom_registers *regs; - int board = of_getintprop_default(busp, "board#", 0); - int slot; + int board, slot; + + while (sbi) { + if (!strcmp(sbi->name, "sbi")) + break; + + sbi = sbi->parent; + } + if (!sbi) + goto build_resources; regs = of_get_property(dp, "reg", NULL); + if (!regs) + goto build_resources; + slot = regs->which_io; + /* If SBI's parent is not io-unit or the io-unit lacks + * a "board#" property, something is very wrong. + */ + if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) { + printk("%s: Error, parent is not io-unit.\n", + sbi->full_name); + goto build_resources; + } + io_unit = sbi->parent; + board = of_getintprop_default(io_unit, "board#", -1); + if (board == -1) { + printk("%s: Error, lacks board# property.\n", + io_unit->full_name); + goto build_resources; + } + for (i = 0; i < op->num_irqs; i++) { int this_irq = op->irqs[i]; int sbusl = pil_to_sbus[this_irq]; @@ -617,6 +644,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp, } } +build_resources: build_device_resources(op, parent); op->dev.parent = parent; diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c index 4b06dcb00ebd..4ca9e5fc97f4 100644 --- a/arch/sparc/kernel/prom.c +++ b/arch/sparc/kernel/prom.c @@ -444,6 +444,7 @@ static struct property * __init build_one_prop(phandle node, char *prev, char *s static struct property *tmp = NULL; struct property *p; int len; + const char *name; if (tmp) { p = tmp; @@ -456,19 +457,21 @@ static struct property * __init build_one_prop(phandle node, char *prev, char *s p->name = (char *) (p + 1); if (special_name) { + strcpy(p->name, special_name); p->length = special_len; p->value = prom_early_alloc(special_len); memcpy(p->value, special_val, special_len); } else { if (prev == NULL) { - prom_firstprop(node, p->name); + name = prom_firstprop(node, NULL); } else { - prom_nextprop(node, prev, p->name); + name = prom_nextprop(node, prev, NULL); } - if (strlen(p->name) == 0) { + if (strlen(name) == 0) { tmp = p; return NULL; } + strcpy(p->name, name); p->length = prom_getproplen(node, p->name); if (p->length <= 0) { p->length = 0; diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index 6135d4faeeeb..e311ade1b490 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c @@ -87,6 +87,7 @@ void __cpuinit smp_store_cpu_info(int id) void __init smp_cpus_done(unsigned int max_cpus) { extern void smp4m_smp_done(void); + extern void smp4d_smp_done(void); unsigned long bogosum = 0; int cpu, num; @@ -100,8 +101,34 @@ void __init smp_cpus_done(unsigned int max_cpus) num, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); - BUG_ON(sparc_cpu_model != sun4m); - smp4m_smp_done(); + switch(sparc_cpu_model) { + case sun4: + printk("SUN4\n"); + BUG(); + break; + case sun4c: + printk("SUN4C\n"); + BUG(); + break; + case sun4m: + smp4m_smp_done(); + break; + case sun4d: + smp4d_smp_done(); + break; + case sun4e: + printk("SUN4E\n"); + BUG(); + break; + case sun4u: + printk("SUN4U\n"); + BUG(); + break; + default: + printk("UNKNOWN!\n"); + BUG(); + break; + }; } void cpu_panic(void) @@ -267,9 +294,9 @@ int setup_profiling_timer(unsigned int multiplier) void __init smp_prepare_cpus(unsigned int max_cpus) { extern void smp4m_boot_cpus(void); + extern void smp4d_boot_cpus(void); int i, cpuid, extra; - BUG_ON(sparc_cpu_model != sun4m); printk("Entering SMP Mode...\n"); extra = 0; @@ -283,7 +310,34 @@ void __init smp_prepare_cpus(unsigned int max_cpus) smp_store_cpu_info(boot_cpu_id); - smp4m_boot_cpus(); + switch(sparc_cpu_model) { + case sun4: + printk("SUN4\n"); + BUG(); + break; + case sun4c: + printk("SUN4C\n"); + BUG(); + break; + case sun4m: + smp4m_boot_cpus(); + break; + case sun4d: + smp4d_boot_cpus(); + break; + case sun4e: + printk("SUN4E\n"); + BUG(); + break; + case sun4u: + printk("SUN4U\n"); + BUG(); + break; + default: + printk("UNKNOWN!\n"); + BUG(); + break; + }; } /* Set this up early so that things like the scheduler can init @@ -323,9 +377,37 @@ void __init smp_prepare_boot_cpu(void) int __cpuinit __cpu_up(unsigned int cpu) { extern int smp4m_boot_one_cpu(int); - int ret; - - ret = smp4m_boot_one_cpu(cpu); + extern int smp4d_boot_one_cpu(int); + int ret=0; + + switch(sparc_cpu_model) { + case sun4: + printk("SUN4\n"); + BUG(); + break; + case sun4c: + printk("SUN4C\n"); + BUG(); + break; + case sun4m: + ret = smp4m_boot_one_cpu(cpu); + break; + case sun4d: + ret = smp4d_boot_one_cpu(cpu); + break; + case sun4e: + printk("SUN4E\n"); + BUG(); + break; + case sun4u: + printk("SUN4U\n"); + BUG(); + break; + default: + printk("UNKNOWN!\n"); + BUG(); + break; + }; if (!ret) { cpu_set(cpu, smp_commenced_mask); diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 5fb987fc3d63..4d441a554d35 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -237,7 +237,6 @@ EXPORT_SYMBOL(prom_node_has_property); EXPORT_SYMBOL(prom_setprop); EXPORT_SYMBOL(saved_command_line); EXPORT_SYMBOL(prom_apply_obio_ranges); -EXPORT_SYMBOL(prom_getname); EXPORT_SYMBOL(prom_feval); EXPORT_SYMBOL(prom_getbool); EXPORT_SYMBOL(prom_getstring); diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index b141b7ee6717..ba843f6a2832 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -43,15 +43,10 @@ extern ctxd_t *srmmu_ctx_table_phys; extern void calibrate_delay(void); extern volatile int smp_processors_ready; -extern int smp_num_cpus; static int smp_highest_cpu; extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern cpuinfo_sparc cpu_data[NR_CPUS]; extern unsigned char boot_cpu_id; -extern int smp_activated; -extern volatile int __cpu_number_map[NR_CPUS]; -extern volatile int __cpu_logical_map[NR_CPUS]; -extern volatile unsigned long ipi_count; extern volatile int smp_process_available; extern cpumask_t smp_commenced_mask; @@ -144,6 +139,8 @@ void __init smp4d_callin(void) spin_lock_irqsave(&sun4d_imsk_lock, flags); cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ spin_unlock_irqrestore(&sun4d_imsk_lock, flags); + cpu_set(cpuid, cpu_online_map); + } extern void init_IRQ(void); @@ -160,51 +157,24 @@ extern unsigned long trapbase_cpu3[]; void __init smp4d_boot_cpus(void) { - int cpucount = 0; - int i, mid; - - printk("Entering SMP Mode...\n"); - if (boot_cpu_id) current_set[0] = NULL; - - local_irq_enable(); - cpus_clear(cpu_present_map); - - /* XXX This whole thing has to go. See sparc64. */ - for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) - cpu_set(mid, cpu_present_map); - SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0])); - for(i=0; i < NR_CPUS; i++) - __cpu_number_map[i] = -1; - for(i=0; i < NR_CPUS; i++) - __cpu_logical_map[i] = -1; - __cpu_number_map[boot_cpu_id] = 0; - __cpu_logical_map[0] = boot_cpu_id; - current_thread_info()->cpu = boot_cpu_id; - smp_store_cpu_info(boot_cpu_id); smp_setup_percpu_timer(); local_flush_cache_all(); - if (cpu_find_by_instance(1, NULL, NULL)) - return; /* Not an MP box. */ - SMP_PRINTK(("Iterating over CPUs\n")); - for(i = 0; i < NR_CPUS; i++) { - if(i == boot_cpu_id) - continue; - - if (cpu_isset(i, cpu_present_map)) { +} + +int smp4d_boot_one_cpu(int i) +{ extern unsigned long sun4d_cpu_startup; unsigned long *entry = &sun4d_cpu_startup; struct task_struct *p; int timeout; - int no; + int cpu_node; + cpu_find_by_instance(i, &cpu_node,NULL); /* Cook up an idler for this guy. */ p = fork_idle(i); - cpucount++; current_set[i] = task_thread_info(p); - for (no = 0; !cpu_find_by_instance(no, NULL, &mid) - && mid != i; no++) ; /* * Initialize the contexts table @@ -216,9 +186,9 @@ void __init smp4d_boot_cpus(void) smp_penguin_ctable.reg_size = 0; /* whirrr, whirrr, whirrrrrrrrr... */ - SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, cpu_data(no).prom_node)); + SMP_PRINTK(("Starting CPU %d at %p \n", i, entry)); local_flush_cache_all(); - prom_startcpu(cpu_data(no).prom_node, + prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry); SMP_PRINTK(("prom_startcpu returned :)\n")); @@ -230,39 +200,30 @@ void __init smp4d_boot_cpus(void) udelay(200); } - if(cpu_callin_map[i]) { - /* Another "Red Snapper". */ - __cpu_number_map[i] = cpucount; - __cpu_logical_map[cpucount] = i; - } else { - cpucount--; - printk("Processor %d is stuck.\n", i); - } - } - if(!(cpu_callin_map[i])) { - cpu_clear(i, cpu_present_map); - __cpu_number_map[i] = -1; - } + if (!(cpu_callin_map[i])) { + printk("Processor %d is stuck.\n", i); + return -ENODEV; + } local_flush_cache_all(); - if(cpucount == 0) { - printk("Error: only one Processor found.\n"); - cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id()); - } else { - unsigned long bogosum = 0; - - for_each_present_cpu(i) { - bogosum += cpu_data(i).udelay_val; - smp_highest_cpu = i; + return 0; +} + +void __init smp4d_smp_done(void) +{ + int i, first; + int *prev; + + /* setup cpu list for irq rotation */ + first = 0; + prev = &first; + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i)) { + *prev = i; + prev = &cpu_data(i).next; } - SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); - printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", - cpucount + 1, - bogosum/(500000/HZ), - (bogosum/(5000/HZ))%100); - smp_activated = 1; - smp_num_cpus = cpucount + 1; - } + *prev = first; + local_flush_cache_all(); /* Free unneeded trap tables */ ClearPageReserved(virt_to_page(trapbase_cpu1)); @@ -334,7 +295,7 @@ void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, register int i; mask = cpumask_of_cpu(hard_smp4d_processor_id()); - cpus_andnot(mask, cpu_present_map, mask); + cpus_andnot(mask, cpu_online_map, mask); for(i = 0; i <= high; i++) { if (cpu_isset(i, mask)) { ccall_info.processors_in[i] = 0; diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c index 0cdfc9d294b4..a41c8a5c2007 100644 --- a/arch/sparc/kernel/sys_sparc.c +++ b/arch/sparc/kernel/sys_sparc.c @@ -465,21 +465,21 @@ sys_rt_sigaction(int sig, asmlinkage int sys_getdomainname(char __user *name, int len) { - int nlen; - int err = -EFAULT; + int nlen, err; + if (len < 0 || len > __NEW_UTS_LEN) + return -EINVAL; + down_read(&uts_sem); nlen = strlen(system_utsname.domainname) + 1; - if (nlen < len) len = nlen; - if (len > __NEW_UTS_LEN) - goto done; - if (copy_to_user(name, system_utsname.domainname, len)) - goto done; - err = 0; -done: + + err = -EFAULT; + if (!copy_to_user(name, system_utsname.domainname, len)) + err = 0; + up_read(&uts_sem); return err; } diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index 04eb1eab6e3e..845081b01267 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c @@ -225,6 +225,32 @@ static __inline__ int has_low_battery(void) return (data1 == data2); /* Was the write blocked? */ } +static void __init mostek_set_system_time(void) +{ + unsigned int year, mon, day, hour, min, sec; + struct mostek48t02 *mregs; + + mregs = (struct mostek48t02 *)mstk48t02_regs; + if(!mregs) { + prom_printf("Something wrong, clock regs not mapped yet.\n"); + prom_halt(); + } + spin_lock_irq(&mostek_lock); + mregs->creg |= MSTK_CREG_READ; + sec = MSTK_REG_SEC(mregs); + min = MSTK_REG_MIN(mregs); + hour = MSTK_REG_HOUR(mregs); + day = MSTK_REG_DOM(mregs); + mon = MSTK_REG_MONTH(mregs); + year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) ); + xtime.tv_sec = mktime(year, mon, day, hour, min, sec); + xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); + set_normalized_timespec(&wall_to_monotonic, + -xtime.tv_sec, -xtime.tv_nsec); + mregs->creg &= ~MSTK_CREG_READ; + spin_unlock_irq(&mostek_lock); +} + /* Probe for the real time clock chip on Sun4 */ static __inline__ void sun4_clock_probe(void) { @@ -273,6 +299,7 @@ static __inline__ void sun4_clock_probe(void) #endif } +#ifndef CONFIG_SUN4 static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) { struct device_node *dp = op->node; @@ -307,6 +334,8 @@ static int __devinit clock_probe(struct of_device *op, const struct of_device_id if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP) kick_start_clock(); + mostek_set_system_time(); + return 0; } @@ -325,56 +354,37 @@ static struct of_platform_driver clock_driver = { /* Probe for the mostek real time clock chip. */ -static void clock_init(void) +static int __init clock_init(void) { - of_register_driver(&clock_driver, &of_bus_type); + return of_register_driver(&clock_driver, &of_bus_type); } +/* Must be after subsys_initcall() so that busses are probed. Must + * be before device_initcall() because things like the RTC driver + * need to see the clock registers. + */ +fs_initcall(clock_init); +#endif /* !CONFIG_SUN4 */ + void __init sbus_time_init(void) { - unsigned int year, mon, day, hour, min, sec; - struct mostek48t02 *mregs; - -#ifdef CONFIG_SUN4 - int temp; - struct intersil *iregs; -#endif BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); btfixup(); if (ARCH_SUN4) sun4_clock_probe(); - else - clock_init(); sparc_init_timers(timer_interrupt); #ifdef CONFIG_SUN4 if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) { -#endif - mregs = (struct mostek48t02 *)mstk48t02_regs; - if(!mregs) { - prom_printf("Something wrong, clock regs not mapped yet.\n"); - prom_halt(); - } - spin_lock_irq(&mostek_lock); - mregs->creg |= MSTK_CREG_READ; - sec = MSTK_REG_SEC(mregs); - min = MSTK_REG_MIN(mregs); - hour = MSTK_REG_HOUR(mregs); - day = MSTK_REG_DOM(mregs); - mon = MSTK_REG_MONTH(mregs); - year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) ); - xtime.tv_sec = mktime(year, mon, day, hour, min, sec); - xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); - set_normalized_timespec(&wall_to_monotonic, - -xtime.tv_sec, -xtime.tv_nsec); - mregs->creg &= ~MSTK_CREG_READ; - spin_unlock_irq(&mostek_lock); -#ifdef CONFIG_SUN4 + mostek_set_system_time(); } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) { /* initialise the intersil on sun4 */ + unsigned int year, mon, day, hour, min, sec; + int temp; + struct intersil *iregs; iregs=intersil_clock; if(!iregs) { diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index 42c1c700c0a7..2bb1309003dd 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -64,6 +64,7 @@ iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus) sbus->iommu = (struct iommu_struct *)iounit; iounit->page_table = xpt; + spin_lock_init(&iounit->lock); for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); xpt < xptend;) diff --git a/arch/sparc/prom/tree.c b/arch/sparc/prom/tree.c index 2bf03ee8cde5..5ec246573a98 100644 --- a/arch/sparc/prom/tree.c +++ b/arch/sparc/prom/tree.c @@ -205,24 +205,6 @@ int prom_searchsiblings(int node_start, char *nodename) return 0; } -/* Gets name in the form prom v2+ uses it (name@x,yyyyy or name (if no reg)) */ -int prom_getname (int node, char *buffer, int len) -{ - int i; - struct linux_prom_registers reg[PROMREG_MAX]; - - i = prom_getproperty (node, "name", buffer, len); - if (i <= 0) return -1; - buffer [i] = 0; - len -= i; - i = prom_getproperty (node, "reg", (char *)reg, sizeof (reg)); - if (i <= 0) return 0; - if (len < 11) return -1; - buffer = strchr (buffer, 0); - sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr); - return 0; -} - /* Interal version of nextprop that does not alter return values. */ char * __prom_nextprop(int node, char * oprop) { diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig index 38353621069e..43d9229fca07 100644 --- a/arch/sparc64/defconfig +++ b/arch/sparc64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.18-rc1 -# Wed Jul 12 14:00:58 2006 +# Linux kernel version: 2.6.18-rc2 +# Fri Jul 21 14:19:24 2006 # CONFIG_SPARC=y CONFIG_SPARC64=y @@ -36,6 +36,7 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set CONFIG_SYSCTL=y # CONFIG_AUDIT is not set # CONFIG_IKCONFIG is not set @@ -1120,7 +1121,7 @@ CONFIG_USB_HIDDEV=y # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set # CONFIG_USB_LED is not set -# CONFIG_USB_CY7C63 is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set # CONFIG_USB_PHIDGETKIT is not set # CONFIG_USB_PHIDGETSERVO is not set @@ -1279,7 +1280,6 @@ CONFIG_RAMFS=y # CONFIG_NFSD is not set # CONFIG_SMB_FS is not set # CONFIG_CIFS is not set -# CONFIG_CIFS_DEBUG2 is not set # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index f8ef2f2b9b37..ec10f7edcf86 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c @@ -66,9 +66,6 @@ static int check_cpu_node(struct device_node *dp, int *cur_inst, void *compare_arg, struct device_node **dev_node, int *mid) { - if (strcmp(dp->type, "cpu")) - return -ENODEV; - if (!compare(dp, *cur_inst, compare_arg)) { if (dev_node) *dev_node = dp; diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 75684b56767e..c8e9dc9d68a9 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -551,9 +551,10 @@ setup_trap_table: save %sp, -192, %sp /* Force interrupts to be disabled. */ - rdpr %pstate, %o1 - andn %o1, PSTATE_IE, %o1 + rdpr %pstate, %l0 + andn %l0, PSTATE_IE, %o1 wrpr %o1, 0x0, %pstate + rdpr %pil, %l1 wrpr %g0, 15, %pil /* Make the firmware call to jump over to the Linux trap table. */ @@ -622,11 +623,9 @@ setup_trap_table: call init_irqwork_curcpu nop - /* Now we can turn interrupts back on. */ - rdpr %pstate, %o1 - or %o1, PSTATE_IE, %o1 - wrpr %o1, 0, %pstate - wrpr %g0, 0x0, %pil + /* Now we can restore interrupt state. */ + wrpr %l0, 0, %pstate + wrpr %l1, 0x0, %pil ret restore diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index 7064cee290ae..238bbf6de07d 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c @@ -542,9 +542,17 @@ static void __init build_device_resources(struct of_device *op, /* Convert to num-cells. */ num_reg /= 4; - /* Conver to num-entries. */ + /* Convert to num-entries. */ num_reg /= na + ns; + /* Prevent overruning the op->resources[] array. */ + if (num_reg > PROMREG_MAX) { + printk(KERN_WARNING "%s: Too many regs (%d), " + "limiting to %d.\n", + op->node->full_name, num_reg, PROMREG_MAX); + num_reg = PROMREG_MAX; + } + for (index = 0; index < num_reg; index++) { struct resource *r = &op->resource[index]; u32 addr[OF_MAX_ADDR_CELLS]; @@ -650,8 +658,22 @@ apply_interrupt_map(struct device_node *dp, struct device_node *pp, next: imap += (na + 3); } - if (i == imlen) + if (i == imlen) { + /* Psycho and Sabre PCI controllers can have 'interrupt-map' + * properties that do not include the on-board device + * interrupts. Instead, the device's 'interrupts' property + * is already a fully specified INO value. + * + * Handle this by deciding that, if we didn't get a + * match in the parent's 'interrupt-map', and the + * parent is an IRQ translater, then use the parent as + * our IRQ controller. + */ + if (pp->irq_trans) + return pp; + return NULL; + } *irq_p = irq; cp = of_find_node_by_phandle(handle); @@ -803,6 +825,14 @@ static struct of_device * __init scan_one_device(struct device_node *dp, op->num_irqs = 0; } + /* Prevent overruning the op->irqs[] array. */ + if (op->num_irqs > PROMINTR_MAX) { + printk(KERN_WARNING "%s: Too many irqs (%d), " + "limiting to %d.\n", + dp->full_name, op->num_irqs, PROMINTR_MAX); + op->num_irqs = PROMINTR_MAX; + } + build_device_resources(op, parent); for (i = 0; i < op->num_irqs; i++) op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]); diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 197a7ffd57ee..1ec0aab68c08 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1099,9 +1099,6 @@ static void pbm_register_toplevel_resources(struct pci_controller_info *p, { char *name = pbm->name; - sprintf(name, "PSYCHO%d PBM%c", - p->index, - (pbm == &p->pbm_A ? 'A' : 'B')); pbm->io_space.name = pbm->mem_space.name = name; request_resource(&ioport_resource, &pbm->io_space); @@ -1203,12 +1200,13 @@ static void psycho_pbm_init(struct pci_controller_info *p, pbm->io_space.flags = IORESOURCE_IO; pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE; pbm->mem_space.flags = IORESOURCE_MEM; - pbm_register_toplevel_resources(p, pbm); pbm->parent = p; pbm->prom_node = dp; pbm->name = dp->full_name; + pbm_register_toplevel_resources(p, pbm); + printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n", pbm->name, pbm->chip_version, pbm->chip_revision); diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c index c86007a2aa3f..5cc5ab63293f 100644 --- a/arch/sparc64/kernel/prom.c +++ b/arch/sparc64/kernel/prom.c @@ -344,10 +344,12 @@ static unsigned long __psycho_onboard_imap_off[] = { /*0x2f*/ PSYCHO_IMAP_CE, /*0x30*/ PSYCHO_IMAP_A_ERR, /*0x31*/ PSYCHO_IMAP_B_ERR, -/*0x32*/ PSYCHO_IMAP_PMGMT +/*0x32*/ PSYCHO_IMAP_PMGMT, +/*0x33*/ PSYCHO_IMAP_GFX, +/*0x34*/ PSYCHO_IMAP_EUPA, }; #define PSYCHO_ONBOARD_IRQ_BASE 0x20 -#define PSYCHO_ONBOARD_IRQ_LAST 0x32 +#define PSYCHO_ONBOARD_IRQ_LAST 0x34 #define psycho_onboard_imap_offset(__ino) \ __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE] @@ -529,6 +531,10 @@ static unsigned long __sabre_onboard_imap_off[] = { /*0x2e*/ SABRE_IMAP_UE, /*0x2f*/ SABRE_IMAP_CE, /*0x30*/ SABRE_IMAP_PCIERR, +/*0x31*/ 0 /* reserved */, +/*0x32*/ 0 /* reserved */, +/*0x33*/ SABRE_IMAP_GFX, +/*0x34*/ SABRE_IMAP_EUPA, }; #define SABRE_ONBOARD_IRQ_BASE 0x20 #define SABRE_ONBOARD_IRQ_LAST 0x30 @@ -895,6 +901,8 @@ static unsigned long sysio_irq_offsets[] = { SYSIO_IMAP_CE, SYSIO_IMAP_SBERR, SYSIO_IMAP_PMGMT, + SYSIO_IMAP_GFX, + SYSIO_IMAP_EUPA, }; #undef bogon diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 237524d87cab..beffc82a1e85 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -254,7 +254,6 @@ EXPORT_SYMBOL(prom_getproperty); EXPORT_SYMBOL(prom_node_has_property); EXPORT_SYMBOL(prom_setprop); EXPORT_SYMBOL(saved_command_line); -EXPORT_SYMBOL(prom_getname); EXPORT_SYMBOL(prom_finddevice); EXPORT_SYMBOL(prom_feval); EXPORT_SYMBOL(prom_getbool); diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 51c056df528e..054d0abdb7ee 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c @@ -701,21 +701,21 @@ extern void check_pending(int signum); asmlinkage long sys_getdomainname(char __user *name, int len) { - int nlen; - int err = -EFAULT; + int nlen, err; + + if (len < 0 || len > __NEW_UTS_LEN) + return -EINVAL; down_read(&uts_sem); nlen = strlen(system_utsname.domainname) + 1; - if (nlen < len) len = nlen; - if (len > __NEW_UTS_LEN) - goto done; - if (copy_to_user(name, system_utsname.domainname, len)) - goto done; - err = 0; -done: + + err = -EFAULT; + if (!copy_to_user(name, system_utsname.domainname, len)) + err = 0; + up_read(&uts_sem); return err; } diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index b43de647ba73..094d3e35be18 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -928,8 +928,6 @@ static void sparc64_start_timers(void) __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : /* no outputs */ : "r" (pstate)); - - local_irq_enable(); } struct freq_table { diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 1605967cce91..55ae802dc0ad 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kprobes.h> +#include <linux/kallsyms.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -132,6 +133,8 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", regs->tpc); + printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); + print_symbol("RPC: <%s>\n", regs->u_regs[15]); printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); __asm__("mov %%sp, %0" : "=r" (ksp)); show_stack(current, ksp); diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c index 49075abd7cbc..500f05e2cfcb 100644 --- a/arch/sparc64/prom/tree.c +++ b/arch/sparc64/prom/tree.c @@ -193,91 +193,6 @@ prom_searchsiblings(int node_start, const char *nodename) return 0; } -/* Gets name in the {name@x,yyyyy|name (if no reg)} form */ -int -prom_getname (int node, char *buffer, int len) -{ - int i, sbus = 0; - int pci = 0, ebus = 0, ide = 0; - struct linux_prom_registers *reg; - struct linux_prom64_registers reg64[PROMREG_MAX]; - - for (sbus = prom_getparent (node); sbus; sbus = prom_getparent (sbus)) { - i = prom_getproperty (sbus, "name", buffer, len); - if (i > 0) { - buffer [i] = 0; - if (!strcmp (buffer, "sbus")) - goto getit; - } - } - if ((pci = prom_getparent (node))) { - i = prom_getproperty (pci, "name", buffer, len); - if (i > 0) { - buffer [i] = 0; - if (!strcmp (buffer, "pci")) - goto getit; - } - pci = 0; - } - if ((ebus = prom_getparent (node))) { - i = prom_getproperty (ebus, "name", buffer, len); - if (i > 0) { - buffer[i] = 0; - if (!strcmp (buffer, "ebus")) - goto getit; - } - ebus = 0; - } - if ((ide = prom_getparent (node))) { - i = prom_getproperty (ide, "name", buffer, len); - if (i > 0) { - buffer [i] = 0; - if (!strcmp (buffer, "ide")) - goto getit; - } - ide = 0; - } -getit: - i = prom_getproperty (node, "name", buffer, len); - if (i <= 0) { - buffer [0] = 0; - return -1; - } - buffer [i] = 0; - len -= i; - i = prom_getproperty (node, "reg", (char *)reg64, sizeof (reg64)); - if (i <= 0) return 0; - if (len < 16) return -1; - buffer = strchr (buffer, 0); - if (sbus) { - reg = (struct linux_prom_registers *)reg64; - sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr); - } else if (pci) { - int dev, fn; - reg = (struct linux_prom_registers *)reg64; - fn = (reg[0].which_io >> 8) & 0x07; - dev = (reg[0].which_io >> 11) & 0x1f; - if (fn) - sprintf (buffer, "@%x,%x", dev, fn); - else - sprintf (buffer, "@%x", dev); - } else if (ebus) { - reg = (struct linux_prom_registers *)reg64; - sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr); - } else if (ide) { - reg = (struct linux_prom_registers *)reg64; - sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr); - } else if (i == 4) { /* Happens on 8042's children on Ultra/PCI. */ - reg = (struct linux_prom_registers *)reg64; - sprintf (buffer, "@%x", reg[0].which_io); - } else { - sprintf (buffer, "@%x,%x", - (unsigned int)(reg64[0].phys_addr >> 36), - (unsigned int)(reg64[0].phys_addr)); - } - return 0; -} - /* Return the first property type for node 'node'. * buffer should be at least 32B in length */ diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64 index dffd1184c956..9558a7cf34d5 100644 --- a/arch/um/Makefile-x86_64 +++ b/arch/um/Makefile-x86_64 @@ -11,6 +11,7 @@ USER_CFLAGS += -fno-builtin -m64 CHECKFLAGS += -m64 AFLAGS += -m64 LDFLAGS += -m elf_x86_64 +CPPFLAGS += -m64 ELF_ARCH := i386:x86-64 ELF_FORMAT := elf64-x86-64 diff --git a/arch/um/include/longjmp.h b/arch/um/include/longjmp.h index 8e7053013f7b..1b5c0131a12e 100644 --- a/arch/um/include/longjmp.h +++ b/arch/um/include/longjmp.h @@ -8,8 +8,8 @@ longjmp(*buf, val); \ } while(0) -#define UML_SETJMP(buf, enable) ({ \ - int n; \ +#define UML_SETJMP(buf) ({ \ + int n, enable; \ enable = get_signals(); \ n = setjmp(*buf); \ if(n != 0) \ diff --git a/arch/um/include/os.h b/arch/um/include/os.h index b6c52496e15a..5316e8a4a4fd 100644 --- a/arch/um/include/os.h +++ b/arch/um/include/os.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ @@ -15,9 +15,9 @@ #include "irq_user.h" #include "sysdep/tls.h" -#define OS_TYPE_FILE 1 -#define OS_TYPE_DIR 2 -#define OS_TYPE_SYMLINK 3 +#define OS_TYPE_FILE 1 +#define OS_TYPE_DIR 2 +#define OS_TYPE_SYMLINK 3 #define OS_TYPE_CHARDEV 4 #define OS_TYPE_BLOCKDEV 5 #define OS_TYPE_FIFO 6 @@ -61,68 +61,68 @@ struct openflags { }; #define OPENFLAGS() ((struct openflags) { .r = 0, .w = 0, .s = 0, .c = 0, \ - .t = 0, .a = 0, .e = 0, .cl = 0 }) + .t = 0, .a = 0, .e = 0, .cl = 0 }) static inline struct openflags of_read(struct openflags flags) { - flags.r = 1; - return(flags); + flags.r = 1; + return flags; } static inline struct openflags of_write(struct openflags flags) { - flags.w = 1; - return(flags); + flags.w = 1; + return flags; } static inline struct openflags of_rdwr(struct openflags flags) { - return(of_read(of_write(flags))); + return of_read(of_write(flags)); } static inline struct openflags of_set_rw(struct openflags flags, int r, int w) { flags.r = r; flags.w = w; - return(flags); + return flags; } static inline struct openflags of_sync(struct openflags flags) -{ - flags.s = 1; - return(flags); +{ + flags.s = 1; + return flags; } static inline struct openflags of_create(struct openflags flags) -{ - flags.c = 1; - return(flags); +{ + flags.c = 1; + return flags; } - + static inline struct openflags of_trunc(struct openflags flags) -{ - flags.t = 1; - return(flags); +{ + flags.t = 1; + return flags; } - + static inline struct openflags of_append(struct openflags flags) -{ - flags.a = 1; - return(flags); +{ + flags.a = 1; + return flags; } - + static inline struct openflags of_excl(struct openflags flags) -{ - flags.e = 1; - return(flags); +{ + flags.e = 1; + return flags; } static inline struct openflags of_cloexec(struct openflags flags) -{ - flags.cl = 1; - return(flags); +{ + flags.cl = 1; + return flags; } - + /* file.c */ extern int os_stat_file(const char *file_name, struct uml_stat *buf); extern int os_stat_fd(const int fd, struct uml_stat *buf); @@ -204,7 +204,7 @@ extern int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr); extern int os_map_memory(void *virt, int fd, unsigned long long off, unsigned long len, int r, int w, int x); -extern int os_protect_memory(void *addr, unsigned long len, +extern int os_protect_memory(void *addr, unsigned long len, int r, int w, int x); extern int os_unmap_memory(void *addr, int len); extern int os_drop_memory(void *addr, int length); diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c index abf14aaf905f..48cf88dd02d4 100644 --- a/arch/um/kernel/syscall.c +++ b/arch/um/kernel/syscall.c @@ -110,7 +110,7 @@ long sys_uname(struct old_utsname __user * name) if (!name) return -EFAULT; down_read(&uts_sem); - err = copy_to_user(name, utsname(), sizeof (*name)); + err = copy_to_user(name, &system_utsname, sizeof (*name)); up_read(&uts_sem); return err?-EFAULT:0; } @@ -126,21 +126,21 @@ long sys_olduname(struct oldold_utsname __user * name) down_read(&uts_sem); - error = __copy_to_user(&name->sysname, &utsname()->sysname, + error = __copy_to_user(&name->sysname,&system_utsname.sysname, __OLD_UTS_LEN); - error |= __put_user(0, name->sysname + __OLD_UTS_LEN); - error |= __copy_to_user(&name->nodename, &utsname()->nodename, + error |= __put_user(0,name->sysname+__OLD_UTS_LEN); + error |= __copy_to_user(&name->nodename,&system_utsname.nodename, __OLD_UTS_LEN); - error |= __put_user(0, name->nodename + __OLD_UTS_LEN); - error |= __copy_to_user(&name->release, &utsname()->release, + error |= __put_user(0,name->nodename+__OLD_UTS_LEN); + error |= __copy_to_user(&name->release,&system_utsname.release, __OLD_UTS_LEN); - error |= __put_user(0, name->release + __OLD_UTS_LEN); - error |= __copy_to_user(&name->version, &utsname()->version, + error |= __put_user(0,name->release+__OLD_UTS_LEN); + error |= __copy_to_user(&name->version,&system_utsname.version, __OLD_UTS_LEN); - error |= __put_user(0, name->version + __OLD_UTS_LEN); - error |= __copy_to_user(&name->machine, &utsname()->machine, + error |= __put_user(0,name->version+__OLD_UTS_LEN); + error |= __copy_to_user(&name->machine,&system_utsname.machine, __OLD_UTS_LEN); - error |= __put_user(0, name->machine + __OLD_UTS_LEN); + error |= __put_user(0,name->machine+__OLD_UTS_LEN); up_read(&uts_sem); diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S index 72acdce205e0..f8aeb448aab6 100644 --- a/arch/um/kernel/vmlinux.lds.S +++ b/arch/um/kernel/vmlinux.lds.S @@ -1,5 +1,3 @@ -/* in case the preprocessor is a 32bit one */ -#undef i386 #ifdef CONFIG_LD_SCRIPT_STATIC #include "uml.lds.S" #else diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c index b1cda818f5b5..b98d3ca2cd1b 100644 --- a/arch/um/os-Linux/process.c +++ b/arch/um/os-Linux/process.c @@ -273,12 +273,12 @@ void init_new_thread_signals(void) int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr) { jmp_buf buf; - int n, enable; + int n; *jmp_ptr = &buf; - n = UML_SETJMP(&buf, enable); + n = UML_SETJMP(&buf); if(n != 0) - return(n); + return n; (*fn)(arg); - return(0); + return 0; } diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index bf35572d9cfa..7baf90fda58b 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -435,7 +435,6 @@ void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr, { unsigned long flags; jmp_buf switch_buf, fork_buf; - int enable; *switch_buf_ptr = &switch_buf; *fork_buf_ptr = &fork_buf; @@ -450,7 +449,7 @@ void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr, */ flags = get_signals(); block_signals(); - if(UML_SETJMP(&fork_buf, enable) == 0) + if(UML_SETJMP(&fork_buf) == 0) new_thread_proc(stack, handler); remove_sigstack(); @@ -467,21 +466,19 @@ void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr, void thread_wait(void *sw, void *fb) { jmp_buf buf, **switch_buf = sw, *fork_buf; - int enable; *switch_buf = &buf; fork_buf = fb; - if(UML_SETJMP(&buf, enable) == 0) + if(UML_SETJMP(&buf) == 0) siglongjmp(*fork_buf, INIT_JMP_REMOVE_SIGSTACK); } void switch_threads(void *me, void *next) { jmp_buf my_buf, **me_ptr = me, *next_buf = next; - int enable; *me_ptr = &my_buf; - if(UML_SETJMP(&my_buf, enable) == 0) + if(UML_SETJMP(&my_buf) == 0) UML_LONGJMP(next_buf, 1); } @@ -495,14 +492,14 @@ static jmp_buf *cb_back; int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr) { jmp_buf **switch_buf = switch_buf_ptr; - int n, enable; + int n; set_handler(SIGWINCH, (__sighandler_t) sig_handler, SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM, SIGVTALRM, -1); *fork_buf_ptr = &initial_jmpbuf; - n = UML_SETJMP(&initial_jmpbuf, enable); + n = UML_SETJMP(&initial_jmpbuf); switch(n){ case INIT_JMP_NEW_THREAD: new_thread_proc((void *) stack, new_thread_handler); @@ -529,14 +526,13 @@ int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr) void initial_thread_cb_skas(void (*proc)(void *), void *arg) { jmp_buf here; - int enable; cb_proc = proc; cb_arg = arg; cb_back = &here; block_signals(); - if(UML_SETJMP(&here, enable) == 0) + if(UML_SETJMP(&here) == 0) UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK); unblock_signals(); diff --git a/arch/um/os-Linux/uaccess.c b/arch/um/os-Linux/uaccess.c index e523719330b2..865f6a6a2590 100644 --- a/arch/um/os-Linux/uaccess.c +++ b/arch/um/os-Linux/uaccess.c @@ -14,11 +14,10 @@ unsigned long __do_user_copy(void *to, const void *from, int n, int n), int *faulted_out) { unsigned long *faddrp = (unsigned long *) fault_addr, ret; - int enable; jmp_buf jbuf; *fault_catcher = &jbuf; - if(UML_SETJMP(&jbuf, enable) == 0){ + if(UML_SETJMP(&jbuf) == 0){ (*op)(to, from, n); ret = 0; *faulted_out = 0; diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig index 83d389b8ebd8..840d5d93d5cc 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86_64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.17-git22 -# Tue Jul 4 14:24:40 2006 +# Linux kernel version: 2.6.18-rc2 +# Tue Jul 18 17:13:20 2006 # CONFIG_X86_64=y CONFIG_64BIT=y @@ -37,6 +37,7 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set CONFIG_SYSCTL=y # CONFIG_AUDIT is not set CONFIG_IKCONFIG=y @@ -413,6 +414,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 CONFIG_BLK_DEV_INITRD=y # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set @@ -1195,7 +1197,7 @@ CONFIG_USB_MON=y # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set # CONFIG_USB_LED is not set -# CONFIG_USB_CY7C63 is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set # CONFIG_USB_PHIDGETKIT is not set # CONFIG_USB_PHIDGETSERVO is not set @@ -1373,7 +1375,6 @@ CONFIG_SUNRPC=y # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set # CONFIG_CIFS is not set -# CONFIG_CIFS_DEBUG2 is not set # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 9b5bb413a6e9..5d4a7d125ed0 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S @@ -103,7 +103,7 @@ ENTRY(ia32_sysenter_target) pushq %rax CFI_ADJUST_CFA_OFFSET 8 cld - SAVE_ARGS 0,0,1 + SAVE_ARGS 0,0,0 /* no need to do an access_ok check here because rbp has been 32bit zero extended */ 1: movl (%rbp),%r9d diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c index e71ed53b08fb..146924ba5df5 100644 --- a/arch/x86_64/kernel/pci-calgary.c +++ b/arch/x86_64/kernel/pci-calgary.c @@ -85,7 +85,8 @@ #define CSR_AGENT_MASK 0xffe0ffff #define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */ -#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */ +#define MAX_NUM_CHASSIS 8 /* max number of chassis */ +#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2) /* max dev->bus->number */ #define PHBS_PER_CALGARY 4 /* register offsets in Calgary's internal register space */ @@ -110,7 +111,8 @@ static const unsigned long phb_offsets[] = { 0xB000 /* PHB3 */ }; -void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES]; +static char bus_to_phb[MAX_PHB_BUS_NUM]; +void* tce_table_kva[MAX_PHB_BUS_NUM]; unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; static int translate_empty_slots __read_mostly = 0; static int calgary_detected __read_mostly = 0; @@ -119,7 +121,7 @@ static int calgary_detected __read_mostly = 0; * the bitmap of PHBs the user requested that we disable * translation on. */ -static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM); +static DECLARE_BITMAP(translation_disabled, MAX_PHB_BUS_NUM); static void tce_cache_blast(struct iommu_table *tbl); @@ -452,7 +454,7 @@ static struct dma_mapping_ops calgary_dma_ops = { static inline int busno_to_phbid(unsigned char num) { - return bus_to_phb(num) % PHBS_PER_CALGARY; + return bus_to_phb[num]; } static inline unsigned long split_queue_offset(unsigned char num) @@ -812,7 +814,7 @@ static int __init calgary_init(void) int i, ret = -ENODEV; struct pci_dev *dev = NULL; - for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) { + for (i = 0; i < MAX_PHB_BUS_NUM; i++) { dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CALGARY, dev); @@ -822,7 +824,7 @@ static int __init calgary_init(void) calgary_init_one_nontraslated(dev); continue; } - if (!tce_table_kva[i] && !translate_empty_slots) { + if (!tce_table_kva[dev->bus->number] && !translate_empty_slots) { pci_dev_put(dev); continue; } @@ -842,7 +844,7 @@ error: pci_dev_put(dev); continue; } - if (!tce_table_kva[i] && !translate_empty_slots) + if (!tce_table_kva[dev->bus->number] && !translate_empty_slots) continue; calgary_disable_translation(dev); calgary_free_tar(dev); @@ -876,9 +878,10 @@ static inline int __init determine_tce_table_size(u64 ram) void __init detect_calgary(void) { u32 val; - int bus, table_idx; + int bus; void *tbl; - int detected = 0; + int calgary_found = 0; + int phb = -1; /* * if the user specified iommu=off or iommu=soft or we found @@ -889,38 +892,46 @@ void __init detect_calgary(void) specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE); - for (bus = 0, table_idx = 0; - bus <= num_online_nodes() * MAX_PHB_BUS_NUM; - bus++) { - BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM); + for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) { + int dev; + + tce_table_kva[bus] = NULL; + bus_to_phb[bus] = -1; + if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY) continue; + + /* + * There are 4 PHBs per Calgary chip. Set phb to which phb (0-3) + * it is connected to releative to the clagary chip. + */ + phb = (phb + 1) % PHBS_PER_CALGARY; + if (test_bit(bus, translation_disabled)) { printk(KERN_INFO "Calgary: translation is disabled for " "PHB 0x%x\n", bus); /* skip this phb, don't allocate a tbl for it */ - tce_table_kva[table_idx] = NULL; - table_idx++; continue; } /* - * scan the first slot of the PCI bus to see if there - * are any devices present + * Scan the slots of the PCI bus to see if there is a device present. + * The parent bus will be the zero-ith device, so start at 1. */ - val = read_pci_config(bus, 1, 0, 0); - if (val != 0xffffffff || translate_empty_slots) { - tbl = alloc_tce_table(); - if (!tbl) - goto cleanup; - detected = 1; - } else - tbl = NULL; - - tce_table_kva[table_idx] = tbl; - table_idx++; + for (dev = 1; dev < 8; dev++) { + val = read_pci_config(bus, dev, 0, 0); + if (val != 0xffffffff || translate_empty_slots) { + tbl = alloc_tce_table(); + if (!tbl) + goto cleanup; + tce_table_kva[bus] = tbl; + bus_to_phb[bus] = phb; + calgary_found = 1; + break; + } + } } - if (detected) { + if (calgary_found) { iommu_detected = 1; calgary_detected = 1; printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. " @@ -929,9 +940,9 @@ void __init detect_calgary(void) return; cleanup: - for (--table_idx; table_idx >= 0; --table_idx) - if (tce_table_kva[table_idx]) - free_tce_table(tce_table_kva[table_idx]); + for (--bus; bus >= 0; --bus) + if (tce_table_kva[bus]) + free_tce_table(tce_table_kva[bus]); } int __init calgary_iommu_init(void) @@ -1002,7 +1013,7 @@ static int __init calgary_parse_options(char *p) if (p == endp) break; - if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) { + if (bridge < MAX_PHB_BUS_NUM) { printk(KERN_INFO "Calgary: disabling " "translation for PHB 0x%x\n", bridge); set_bit(bridge, translation_disabled); diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c index ebdb77fe2057..6a55f87ba97f 100644 --- a/arch/x86_64/kernel/pci-swiotlb.c +++ b/arch/x86_64/kernel/pci-swiotlb.c @@ -31,9 +31,10 @@ struct dma_mapping_ops swiotlb_dma_ops = { void pci_swiotlb_init(void) { /* don't initialize swiotlb if iommu=off (no_iommu=1) */ - if (!iommu_detected && !no_iommu && - (end_pfn > MAX_DMA32_PFN || force_iommu)) + if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN) swiotlb = 1; + if (swiotlb_force) + swiotlb = 1; if (swiotlb) { printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); swiotlb_init(); diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c index d3a9e79e954c..5530dda3f27a 100644 --- a/arch/x86_64/kernel/tce.c +++ b/arch/x86_64/kernel/tce.c @@ -96,7 +96,6 @@ static inline unsigned int table_size_to_number_of_entries(unsigned char size) static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) { unsigned int bitmapsz; - unsigned int tce_table_index; unsigned long bmppages; int ret; @@ -105,8 +104,7 @@ static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) /* set the tce table size - measured in entries */ tbl->it_size = table_size_to_number_of_entries(specified_table_size); - tce_table_index = bus_to_phb(tbl->it_busno); - tbl->it_base = (unsigned long)tce_table_kva[tce_table_index]; + tbl->it_base = (unsigned long)tce_table_kva[dev->bus->number]; if (!tbl->it_base) { printk(KERN_ERR "Calgary: iommu_table_setparms: " "no table allocated?!\n"); diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index b9ff75992c16..7a9b18224182 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -28,6 +28,7 @@ #include <linux/acpi.h> #ifdef CONFIG_ACPI #include <acpi/achware.h> /* for PM timer frequency */ +#include <acpi/acpi_bus.h> #endif #include <asm/8253pit.h> #include <asm/pgtable.h> @@ -193,7 +194,7 @@ unsigned long profile_pc(struct pt_regs *regs) is just accounted to the spinlock function. Better would be to write these functions in assembler again and check exactly. */ - if (in_lock_functions(pc)) { + if (!user_mode(regs) && in_lock_functions(pc)) { char *v = *(char **)regs->rsp; if ((v >= _stext && v <= _etext) || (v >= _sinittext && v <= _einittext) || @@ -953,11 +954,18 @@ __cpuinit int unsynchronized_tsc(void) #ifdef CONFIG_SMP if (apic_is_clustered_box()) return 1; - /* Intel systems are normally all synchronized. Exceptions - are handled in the check above. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - return 0; #endif + /* Most intel systems have synchronized TSCs except for + multi node systems */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { +#ifdef CONFIG_ACPI + /* But TSC doesn't tick in C3 so don't use it there */ + if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 100) + return 1; +#endif + return 0; + } + /* Assume multi socket systems are not synchronized */ return num_present_cpus() > 1; } diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index eb39a2775236..f7a9d1421078 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -254,7 +254,6 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s { const unsigned cpu = safe_smp_processor_id(); unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; - int i = 11; unsigned used = 0; printk("\nCall Trace:\n"); @@ -275,11 +274,20 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s if (unwind_init_blocked(&info, tsk) == 0) unw_ret = show_trace_unwind(&info, NULL); } - if (unw_ret > 0) { - if (call_trace > 0) + if (unw_ret > 0 && !arch_unw_user_mode(&info)) { +#ifdef CONFIG_STACK_UNWIND + unsigned long rip = info.regs.rip; + print_symbol("DWARF2 unwinder stuck at %s\n", rip); + if (call_trace == 1) { + printk("Leftover inexact backtrace:\n"); + stack = (unsigned long *)info.regs.rsp; + } else if (call_trace > 1) return; - printk("Legacy call trace:"); - i = 18; + else + printk("Full inexact backtrace again:\n"); +#else + printk("Inexact backtrace:\n"); +#endif } } @@ -1118,8 +1126,10 @@ static int __init call_trace_setup(char *s) call_trace = -1; else if (strcmp(s, "both") == 0) call_trace = 0; - else if (strcmp(s, "new") == 0) + else if (strcmp(s, "newfallback") == 0) call_trace = 1; + else if (strcmp(s, "new") == 0) + call_trace = 2; return 1; } __setup("call_trace=", call_trace_setup); diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c index b50a7c7c47f8..3acf60ded2a0 100644 --- a/arch/x86_64/pci/k8-bus.c +++ b/arch/x86_64/pci/k8-bus.c @@ -2,7 +2,6 @@ #include <linux/pci.h> #include <asm/mpspec.h> #include <linux/cpumask.h> -#include <asm/k8.h> /* * This discovers the pcibus <-> node mapping on AMD K8. @@ -19,6 +18,7 @@ #define NR_LDT_BUS_NUMBER_REGISTERS 3 #define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF) #define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF) +#define PCI_DEVICE_ID_K8HTCONFIG 0x1100 /** * fill_mp_bus_to_cpumask() @@ -28,7 +28,8 @@ __init static int fill_mp_bus_to_cpumask(void) { - int i, j, k; + struct pci_dev *nb_dev = NULL; + int i, j; u32 ldtbus, nid; static int lbnr[3] = { LDT_BUS_NUMBER_REGISTER_0, @@ -36,9 +37,8 @@ fill_mp_bus_to_cpumask(void) LDT_BUS_NUMBER_REGISTER_2 }; - cache_k8_northbridges(); - for (k = 0; k < num_k8_northbridges; k++) { - struct pci_dev *nb_dev = k8_northbridges[k]; + while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) { pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid); for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) { diff --git a/block/blktrace.c b/block/blktrace.c index b8c0702777ff..265f7a830619 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -80,7 +80,7 @@ static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC #define trace_sync_bit(rw) \ (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) #define trace_ahead_bit(rw) \ - (((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0)) + (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD)) /* * The worker for the various blk_add_trace*() types. Fills out a diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 102ebc2c5c34..aae3123bf3ee 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -936,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) * seeks. so allow a little bit of time for him to submit a new rq */ if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) - sl = 2; + sl = min(sl, msecs_to_jiffies(2)); mod_timer(&cfqd->idle_slice_timer, jiffies + sl); return 1; diff --git a/block/ioctl.c b/block/ioctl.c index 9cfa2e1ecb24..309760b7e37f 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -72,7 +72,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user bdevp = bdget_disk(disk, part); if (!bdevp) return -ENOMEM; - mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdevp->bd_mutex, BD_MUTEX_PARTITION); if (bdevp->bd_openers) { mutex_unlock(&bdevp->bd_mutex); bdput(bdevp); @@ -82,7 +82,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user fsync_bdev(bdevp); invalidate_bdev(bdevp, 0); - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_WHOLE); delete_partition(disk, part); mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdevp->bd_mutex); diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 93d94749310b..b5382cedf0c0 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -400,6 +400,16 @@ config BLK_DEV_RAM_SIZE what are you doing. If you are using IBM S/390, then set this to 8192. +config BLK_DEV_RAM_BLOCKSIZE + int "Default RAM disk block size (bytes)" + depends on BLK_DEV_RAM + default "1024" + help + The default value is 1024 kilobytes. PAGE_SIZE is a much more + efficient choice however. The default is kept to ensure initrd + setups function - apparently needed by the rd_load_image routine + that supposes the filesystem in the image uses a 1024 blocksize. + config BLK_DEV_INITRD bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" depends on BROKEN || !FRV diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 1c4df22dfd2a..7b0eca703a67 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1233,6 +1233,50 @@ static inline void complete_buffers(struct bio *bio, int status) } } +static void cciss_check_queues(ctlr_info_t *h) +{ + int start_queue = h->next_to_run; + int i; + + /* check to see if we have maxed out the number of commands that can + * be placed on the queue. If so then exit. We do this check here + * in case the interrupt we serviced was from an ioctl and did not + * free any new commands. + */ + if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) + return; + + /* We have room on the queue for more commands. Now we need to queue + * them up. We will also keep track of the next queue to run so + * that every queue gets a chance to be started first. + */ + for (i = 0; i < h->highest_lun + 1; i++) { + int curr_queue = (start_queue + i) % (h->highest_lun + 1); + /* make sure the disk has been added and the drive is real + * because this can be called from the middle of init_one. + */ + if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) + continue; + blk_start_queue(h->gendisk[curr_queue]->queue); + + /* check to see if we have maxed out the number of commands + * that can be placed on the queue. + */ + if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) { + if (curr_queue == start_queue) { + h->next_to_run = + (start_queue + 1) % (h->highest_lun + 1); + break; + } else { + h->next_to_run = curr_queue; + break; + } + } else { + curr_queue = (curr_queue + 1) % (h->highest_lun + 1); + } + } +} + static void cciss_softirq_done(struct request *rq) { CommandList_struct *cmd = rq->completion_data; @@ -1264,6 +1308,7 @@ static void cciss_softirq_done(struct request *rq) spin_lock_irqsave(&h->lock, flags); end_that_request_last(rq, rq->errors); cmd_free(h, cmd, 1); + cciss_check_queues(h); spin_unlock_irqrestore(&h->lock, flags); } @@ -2528,8 +2573,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) CommandList_struct *c; unsigned long flags; __u32 a, a1, a2; - int j; - int start_queue = h->next_to_run; if (interrupt_not_for_us(h)) return IRQ_NONE; @@ -2588,45 +2631,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) } } - /* check to see if we have maxed out the number of commands that can - * be placed on the queue. If so then exit. We do this check here - * in case the interrupt we serviced was from an ioctl and did not - * free any new commands. - */ - if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) - goto cleanup; - - /* We have room on the queue for more commands. Now we need to queue - * them up. We will also keep track of the next queue to run so - * that every queue gets a chance to be started first. - */ - for (j = 0; j < h->highest_lun + 1; j++) { - int curr_queue = (start_queue + j) % (h->highest_lun + 1); - /* make sure the disk has been added and the drive is real - * because this can be called from the middle of init_one. - */ - if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) - continue; - blk_start_queue(h->gendisk[curr_queue]->queue); - - /* check to see if we have maxed out the number of commands - * that can be placed on the queue. - */ - if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) { - if (curr_queue == start_queue) { - h->next_to_run = - (start_queue + 1) % (h->highest_lun + 1); - goto cleanup; - } else { - h->next_to_run = curr_queue; - goto cleanup; - } - } else { - curr_queue = (curr_queue + 1) % (h->highest_lun + 1); - } - } - - cleanup: spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return IRQ_HANDLED; } diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 757f42dd8e86..78082edc14b4 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -1739,8 +1739,6 @@ static void getgeometry(int ctlr) (log_index < id_ctlr_buf->nr_drvs) && (log_unit < NWD); log_unit++) { - struct gendisk *disk = ida_gendisk[ctlr][log_unit]; - size = sizeof(sense_log_drv_stat_t); /* diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 3cf246abb5ec..a3f64bfe6b58 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -84,7 +84,7 @@ int rd_size = CONFIG_BLK_DEV_RAM_SIZE; /* Size of the RAM disks */ * behaviour. The default is still BLOCK_SIZE (needed by rd_load_image that * supposes the filesystem in the image uses a BLOCK_SIZE blocksize). */ -static int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */ +static int rd_blocksize = CONFIG_BLK_DEV_RAM_BLOCKSIZE; /* * Copyright (C) 2000 Linus Torvalds. diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c index 6a0c2230f82f..e2d4beac7420 100644 --- a/drivers/bluetooth/hci_usb.c +++ b/drivers/bluetooth/hci_usb.c @@ -67,6 +67,8 @@ static int ignore = 0; static int ignore_dga = 0; static int ignore_csr = 0; static int ignore_sniffer = 0; +static int disable_scofix = 0; +static int force_scofix = 0; static int reset = 0; #ifdef CONFIG_BT_HCIUSB_SCO @@ -107,9 +109,12 @@ static struct usb_device_id blacklist_ids[] = { { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE }, /* Broadcom BCM2035 */ - { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_BROKEN_ISOC }, + { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, + /* IBM/Lenovo ThinkPad with Broadcom chip */ + { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU }, + /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, @@ -119,11 +124,13 @@ static struct usb_device_id blacklist_ids[] = { /* ISSC Bluetooth Adapter v3.1 */ { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET }, - /* RTX Telecom based adapter with buggy SCO support */ + /* RTX Telecom based adapters with buggy SCO support */ { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, + { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC }, - /* Belkin F8T012 */ + /* Belkin F8T012 and F8T013 devices */ { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU }, + { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_WRONG_SCO_MTU }, /* Digianswer devices */ { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER }, @@ -990,8 +997,10 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id if (reset || id->driver_info & HCI_RESET) set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); - if (id->driver_info & HCI_WRONG_SCO_MTU) - set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); + if (force_scofix || id->driver_info & HCI_WRONG_SCO_MTU) { + if (!disable_scofix) + set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); + } if (id->driver_info & HCI_SNIFFER) { if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) @@ -1161,6 +1170,12 @@ MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001"); module_param(ignore_sniffer, bool, 0644); MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002"); +module_param(disable_scofix, bool, 0644); +MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size"); + +module_param(force_scofix, bool, 0644); +MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size"); + module_param(reset, bool, 0644); MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c index 5b91e4e25641..7719bd75810b 100644 --- a/drivers/char/nsc_gpio.c +++ b/drivers/char/nsc_gpio.c @@ -68,13 +68,11 @@ ssize_t nsc_gpio_write(struct file *file, const char __user *data, amp->gpio_config(m, ~1, 0); break; case 'T': - dev_dbg(dev, "GPIO%d output is push pull\n", - m); + dev_dbg(dev, "GPIO%d output is push pull\n", m); amp->gpio_config(m, ~2, 2); break; case 't': - dev_dbg(dev, "GPIO%d output is open drain\n", - m); + dev_dbg(dev, "GPIO%d output is open drain\n", m); amp->gpio_config(m, ~2, 0); break; case 'P': diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c index 11bd78c80628..645eb81cb5a9 100644 --- a/drivers/char/pc8736x_gpio.c +++ b/drivers/char/pc8736x_gpio.c @@ -212,22 +212,21 @@ static void pc8736x_gpio_change(unsigned index) pc8736x_gpio_set(index, !pc8736x_gpio_current(index)); } -static struct nsc_gpio_ops pc8736x_access = { +static struct nsc_gpio_ops pc8736x_gpio_ops = { .owner = THIS_MODULE, .gpio_config = pc8736x_gpio_configure, .gpio_dump = nsc_gpio_dump, .gpio_get = pc8736x_gpio_get, .gpio_set = pc8736x_gpio_set, - .gpio_set_high = pc8736x_gpio_set_high, - .gpio_set_low = pc8736x_gpio_set_low, .gpio_change = pc8736x_gpio_change, .gpio_current = pc8736x_gpio_current }; +EXPORT_SYMBOL(pc8736x_gpio_ops); static int pc8736x_gpio_open(struct inode *inode, struct file *file) { unsigned m = iminor(inode); - file->private_data = &pc8736x_access; + file->private_data = &pc8736x_gpio_ops; dev_dbg(&pdev->dev, "open %d\n", m); @@ -236,7 +235,7 @@ static int pc8736x_gpio_open(struct inode *inode, struct file *file) return nonseekable_open(inode, file); } -static const struct file_operations pc8736x_gpio_fops = { +static const struct file_operations pc8736x_gpio_fileops = { .owner = THIS_MODULE, .open = pc8736x_gpio_open, .write = nsc_gpio_write, @@ -278,7 +277,7 @@ static int __init pc8736x_gpio_init(void) dev_err(&pdev->dev, "no device found\n"); goto undo_platform_dev_add; } - pc8736x_access.dev = &pdev->dev; + pc8736x_gpio_ops.dev = &pdev->dev; /* Verify that chip and it's GPIO unit are both enabled. My BIOS does this, so I take minimum action here @@ -328,7 +327,7 @@ static int __init pc8736x_gpio_init(void) pc8736x_init_shadow(); /* ignore minor errs, and succeed */ - cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fops); + cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops); cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT); return 0; @@ -355,7 +354,5 @@ static void __exit pc8736x_gpio_cleanup(void) platform_device_put(pdev); } -EXPORT_SYMBOL(pc8736x_access); - module_init(pc8736x_gpio_init); module_exit(pc8736x_gpio_cleanup); diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 17bc8abd5df5..00f574cbb0d4 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -1174,8 +1174,12 @@ static void dcd_change(MGSLPC_INFO *info) else info->input_signal_events.dcd_down++; #ifdef CONFIG_HDLC - if (info->netcount) - hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, info->netdev); + if (info->netcount) { + if (info->serial_signals & SerialSignal_DCD) + netif_carrier_on(info->netdev); + else + netif_carrier_off(info->netdev); + } #endif wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); @@ -4251,8 +4255,10 @@ static int hdlcdev_open(struct net_device *dev) spin_lock_irqsave(&info->lock, flags); get_signals(info); spin_unlock_irqrestore(&info->lock, flags); - hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); - + if (info->serial_signals & SerialSignal_DCD) + netif_carrier_on(dev); + else + netif_carrier_off(dev); return 0; } diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c index 425c58719db6..b956c7babd18 100644 --- a/drivers/char/scx200_gpio.c +++ b/drivers/char/scx200_gpio.c @@ -5,7 +5,6 @@ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */ -#include <linux/config.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/module.h> @@ -22,37 +21,37 @@ #include <linux/scx200_gpio.h> #include <linux/nsc_gpio.h> -#define NAME "scx200_gpio" -#define DEVNAME NAME +#define DRVNAME "scx200_gpio" static struct platform_device *pdev; MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); -MODULE_DESCRIPTION("NatSemi SCx200 GPIO Pin Driver"); +MODULE_DESCRIPTION("NatSemi/AMD SCx200 GPIO Pin Driver"); MODULE_LICENSE("GPL"); static int major = 0; /* default to dynamic major */ module_param(major, int, 0); MODULE_PARM_DESC(major, "Major device number"); -struct nsc_gpio_ops scx200_access = { +#define MAX_PINS 32 /* 64 later, when known ok */ + +struct nsc_gpio_ops scx200_gpio_ops = { .owner = THIS_MODULE, .gpio_config = scx200_gpio_configure, .gpio_dump = nsc_gpio_dump, .gpio_get = scx200_gpio_get, .gpio_set = scx200_gpio_set, - .gpio_set_high = scx200_gpio_set_high, - .gpio_set_low = scx200_gpio_set_low, .gpio_change = scx200_gpio_change, .gpio_current = scx200_gpio_current }; +EXPORT_SYMBOL(scx200_gpio_ops); static int scx200_gpio_open(struct inode *inode, struct file *file) { unsigned m = iminor(inode); - file->private_data = &scx200_access; + file->private_data = &scx200_gpio_ops; - if (m > 63) + if (m >= MAX_PINS) return -EINVAL; return nonseekable_open(inode, file); } @@ -62,8 +61,7 @@ static int scx200_gpio_release(struct inode *inode, struct file *file) return 0; } - -static const struct file_operations scx200_gpio_fops = { +static const struct file_operations scx200_gpio_fileops = { .owner = THIS_MODULE, .write = nsc_gpio_write, .read = nsc_gpio_read, @@ -71,21 +69,20 @@ static const struct file_operations scx200_gpio_fops = { .release = scx200_gpio_release, }; -struct cdev *scx200_devices; -static int num_pins = 32; +struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ static int __init scx200_gpio_init(void) { - int rc, i; - dev_t dev = MKDEV(major, 0); + int rc; + dev_t devid; if (!scx200_gpio_present()) { - printk(KERN_ERR NAME ": no SCx200 gpio present\n"); + printk(KERN_ERR DRVNAME ": no SCx200 gpio present\n"); return -ENODEV; } /* support dev_dbg() with pdev->dev */ - pdev = platform_device_alloc(DEVNAME, 0); + pdev = platform_device_alloc(DRVNAME, 0); if (!pdev) return -ENOMEM; @@ -94,37 +91,25 @@ static int __init scx200_gpio_init(void) goto undo_malloc; /* nsc_gpio uses dev_dbg(), so needs this */ - scx200_access.dev = &pdev->dev; - - if (major) - rc = register_chrdev_region(dev, num_pins, "scx200_gpio"); - else { - rc = alloc_chrdev_region(&dev, 0, num_pins, "scx200_gpio"); - major = MAJOR(dev); + scx200_gpio_ops.dev = &pdev->dev; + + if (major) { + devid = MKDEV(major, 0); + rc = register_chrdev_region(devid, MAX_PINS, "scx200_gpio"); + } else { + rc = alloc_chrdev_region(&devid, 0, MAX_PINS, "scx200_gpio"); + major = MAJOR(devid); } if (rc < 0) { dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc); goto undo_platform_device_add; } - scx200_devices = kzalloc(num_pins * sizeof(struct cdev), GFP_KERNEL); - if (!scx200_devices) { - rc = -ENOMEM; - goto undo_chrdev_region; - } - for (i = 0; i < num_pins; i++) { - struct cdev *cdev = &scx200_devices[i]; - cdev_init(cdev, &scx200_gpio_fops); - cdev->owner = THIS_MODULE; - rc = cdev_add(cdev, MKDEV(major, i), 1); - /* tolerate 'minor' errors */ - if (rc) - dev_err(&pdev->dev, "Error %d on minor %d", rc, i); - } + + cdev_init(&scx200_gpio_cdev, &scx200_gpio_fileops); + cdev_add(&scx200_gpio_cdev, devid, MAX_PINS); return 0; /* succeed */ -undo_chrdev_region: - unregister_chrdev_region(dev, num_pins); undo_platform_device_add: platform_device_del(pdev); undo_malloc: @@ -135,10 +120,11 @@ undo_malloc: static void __exit scx200_gpio_cleanup(void) { - kfree(scx200_devices); - unregister_chrdev_region(MKDEV(major, 0), num_pins); + cdev_del(&scx200_gpio_cdev); + /* cdev_put(&scx200_gpio_cdev); */ + + unregister_chrdev_region(MKDEV(major, 0), MAX_PINS); platform_device_unregister(pdev); - /* kfree(pdev); */ } module_init(scx200_gpio_init); diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index df782dd1098c..78b1b1a2732b 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c @@ -1344,8 +1344,12 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info ) } else info->input_signal_events.dcd_down++; #ifdef CONFIG_HDLC - if (info->netcount) - hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev); + if (info->netcount) { + if (status & MISCSTATUS_DCD) + netif_carrier_on(info->netdev); + else + netif_carrier_off(info->netdev); + } #endif } if (status & MISCSTATUS_CTS_LATCHED) @@ -7844,8 +7848,10 @@ static int hdlcdev_open(struct net_device *dev) spin_lock_irqsave(&info->irq_spinlock, flags); usc_get_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock, flags); - hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); - + if (info->serial_signals & SerialSignal_DCD) + netif_carrier_on(dev); + else + netif_carrier_off(dev); return 0; } diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index e829594195c1..b2dbbdb1bf81 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c @@ -1497,8 +1497,10 @@ static int hdlcdev_open(struct net_device *dev) spin_lock_irqsave(&info->lock, flags); get_signals(info); spin_unlock_irqrestore(&info->lock, flags); - hdlc_set_carrier(info->signals & SerialSignal_DCD, dev); - + if (info->signals & SerialSignal_DCD) + netif_carrier_on(dev); + else + netif_carrier_off(dev); return 0; } @@ -1997,8 +1999,12 @@ static void dcd_change(struct slgt_info *info) info->input_signal_events.dcd_down++; } #ifdef CONFIG_HDLC - if (info->netcount) - hdlc_set_carrier(info->signals & SerialSignal_DCD, info->netdev); + if (info->netcount) { + if (info->signals & SerialSignal_DCD) + netif_carrier_on(info->netdev); + else + netif_carrier_off(info->netdev); + } #endif wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index 1e443a233f51..66f3754fbbdf 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c @@ -1752,8 +1752,10 @@ static int hdlcdev_open(struct net_device *dev) spin_lock_irqsave(&info->lock, flags); get_signals(info); spin_unlock_irqrestore(&info->lock, flags); - hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); - + if (info->serial_signals & SerialSignal_DCD) + netif_carrier_on(dev); + else + netif_carrier_off(dev); return 0; } @@ -2522,8 +2524,12 @@ void isr_io_pin( SLMP_INFO *info, u16 status ) } else info->input_signal_events.dcd_down++; #ifdef CONFIG_HDLC - if (info->netcount) - hdlc_set_carrier(status & SerialSignal_DCD, info->netdev); + if (info->netcount) { + if (status & SerialSignal_DCD) + netif_carrier_on(info->netdev); + else + netif_carrier_off(info->netdev); + } #endif } if (status & MISCSTATUS_CTS_LATCHED) diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 6889e7db3aff..a082a2e34252 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -1141,6 +1141,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend put_device(dev); clear_bit(chip->dev_num, dev_mask); kfree(chip); + kfree(devname); return NULL; } diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 3232b1932597..ee7ac6f43c65 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -424,6 +424,7 @@ static irqreturn_t tis_int_handler(int irq, void *dev_id, struct pt_regs *regs) iowrite32(interrupt, chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); + ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); return IRQ_HANDLED; } @@ -431,23 +432,19 @@ static int interrupts = 1; module_param(interrupts, bool, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); -static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, - const struct pnp_device_id *pnp_id) +static int tpm_tis_init(struct device *dev, resource_size_t start, + resource_size_t len) { u32 vendor, intfcaps, intmask; int rc, i; - unsigned long start, len; struct tpm_chip *chip; - start = pnp_mem_start(pnp_dev, 0); - len = pnp_mem_len(pnp_dev, 0); - if (!start) start = TIS_MEM_BASE; if (!len) len = TIS_MEM_LEN; - if (!(chip = tpm_register_hardware(&pnp_dev->dev, &tpm_tis))) + if (!(chip = tpm_register_hardware(dev, &tpm_tis))) return -ENODEV; chip->vendor.iobase = ioremap(start, len); @@ -464,7 +461,7 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); - dev_info(&pnp_dev->dev, + dev_info(dev, "1.2 TPM (device-id 0x%X, rev-id %d)\n", vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); @@ -472,26 +469,26 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, intfcaps = ioread32(chip->vendor.iobase + TPM_INTF_CAPS(chip->vendor.locality)); - dev_dbg(&pnp_dev->dev, "TPM interface capabilities (0x%x):\n", + dev_dbg(dev, "TPM interface capabilities (0x%x):\n", intfcaps); if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) - dev_dbg(&pnp_dev->dev, "\tBurst Count Static\n"); + dev_dbg(dev, "\tBurst Count Static\n"); if (intfcaps & TPM_INTF_CMD_READY_INT) - dev_dbg(&pnp_dev->dev, "\tCommand Ready Int Support\n"); + dev_dbg(dev, "\tCommand Ready Int Support\n"); if (intfcaps & TPM_INTF_INT_EDGE_FALLING) - dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Falling\n"); + dev_dbg(dev, "\tInterrupt Edge Falling\n"); if (intfcaps & TPM_INTF_INT_EDGE_RISING) - dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Rising\n"); + dev_dbg(dev, "\tInterrupt Edge Rising\n"); if (intfcaps & TPM_INTF_INT_LEVEL_LOW) - dev_dbg(&pnp_dev->dev, "\tInterrupt Level Low\n"); + dev_dbg(dev, "\tInterrupt Level Low\n"); if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) - dev_dbg(&pnp_dev->dev, "\tInterrupt Level High\n"); + dev_dbg(dev, "\tInterrupt Level High\n"); if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) - dev_dbg(&pnp_dev->dev, "\tLocality Change Int Support\n"); + dev_dbg(dev, "\tLocality Change Int Support\n"); if (intfcaps & TPM_INTF_STS_VALID_INT) - dev_dbg(&pnp_dev->dev, "\tSts Valid Int Support\n"); + dev_dbg(dev, "\tSts Valid Int Support\n"); if (intfcaps & TPM_INTF_DATA_AVAIL_INT) - dev_dbg(&pnp_dev->dev, "\tData Avail Int Support\n"); + dev_dbg(dev, "\tData Avail Int Support\n"); if (request_locality(chip, 0) != 0) { rc = -ENODEV; @@ -594,6 +591,16 @@ out_err: return rc; } +static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, + const struct pnp_device_id *pnp_id) +{ + resource_size_t start, len; + start = pnp_mem_start(pnp_dev, 0); + len = pnp_mem_len(pnp_dev, 0); + + return tpm_tis_init(&pnp_dev->dev, start, len); +} + static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg) { return tpm_pm_suspend(&dev->dev, msg); @@ -628,8 +635,36 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); +static struct device_driver tis_drv = { + .name = "tpm_tis", + .bus = &platform_bus_type, + .owner = THIS_MODULE, + .suspend = tpm_pm_suspend, + .resume = tpm_pm_resume, +}; + +static struct platform_device *pdev; + +static int force; +module_param(force, bool, 0444); +MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); static int __init init_tis(void) { + int rc; + + if (force) { + rc = driver_register(&tis_drv); + if (rc < 0) + return rc; + if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0))) + return PTR_ERR(pdev); + if((rc=tpm_tis_init(&pdev->dev, 0, 0)) != 0) { + platform_device_unregister(pdev); + driver_unregister(&tis_drv); + } + return rc; + } + return pnp_register_driver(&tis_pnp_driver); } @@ -654,7 +689,11 @@ static void __exit cleanup_tis(void) tpm_remove_hardware(chip->dev); } spin_unlock(&tis_lock); - pnp_unregister_driver(&tis_pnp_driver); + if (force) { + platform_device_unregister(pdev); + driver_unregister(&tis_drv); + } else + pnp_unregister_driver(&tis_pnp_driver); } module_init(init_tis); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8d328186f774..bc1088d9b379 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -364,10 +364,12 @@ static ssize_t store_##file_name \ if (ret != 1) \ return -EINVAL; \ \ + lock_cpu_hotplug(); \ mutex_lock(&policy->lock); \ ret = __cpufreq_set_policy(policy, &new_policy); \ policy->user_policy.object = policy->object; \ mutex_unlock(&policy->lock); \ + unlock_cpu_hotplug(); \ \ return ret ? ret : count; \ } @@ -1197,20 +1199,18 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); *********************************************************************/ +/* Must be called with lock_cpu_hotplug held */ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { int retval = -EINVAL; - lock_cpu_hotplug(); dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); - unlock_cpu_hotplug(); - return retval; } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); @@ -1225,17 +1225,23 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, if (!policy) return -EINVAL; + lock_cpu_hotplug(); mutex_lock(&policy->lock); ret = __cpufreq_driver_target(policy, target_freq, relation); mutex_unlock(&policy->lock); + unlock_cpu_hotplug(); cpufreq_cpu_put(policy); return ret; } EXPORT_SYMBOL_GPL(cpufreq_driver_target); +/* + * Locking: Must be called with the lock_cpu_hotplug() lock held + * when "event" is CPUFREQ_GOV_LIMITS + */ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) { @@ -1257,24 +1263,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) } -int cpufreq_governor(unsigned int cpu, unsigned int event) -{ - int ret = 0; - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - - if (!policy) - return -EINVAL; - - mutex_lock(&policy->lock); - ret = __cpufreq_governor(policy, event); - mutex_unlock(&policy->lock); - - cpufreq_cpu_put(policy); - return ret; -} -EXPORT_SYMBOL_GPL(cpufreq_governor); - - int cpufreq_register_governor(struct cpufreq_governor *governor) { struct cpufreq_governor *t; @@ -1342,6 +1330,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) EXPORT_SYMBOL(cpufreq_get_policy); +/* + * Locking: Must be called with the lock_cpu_hotplug() lock held + */ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) { int ret = 0; @@ -1436,6 +1427,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) if (!data) return -EINVAL; + lock_cpu_hotplug(); + /* lock this CPU */ mutex_lock(&data->lock); @@ -1446,6 +1439,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) data->user_policy.governor = data->governor; mutex_unlock(&data->lock); + + unlock_cpu_hotplug(); cpufreq_cpu_put(data); return ret; @@ -1469,6 +1464,7 @@ int cpufreq_update_policy(unsigned int cpu) if (!data) return -ENODEV; + lock_cpu_hotplug(); mutex_lock(&data->lock); dprintk("updating policy for CPU %u\n", cpu); @@ -1494,7 +1490,7 @@ int cpufreq_update_policy(unsigned int cpu) ret = __cpufreq_set_policy(data, &policy); mutex_unlock(&data->lock); - + unlock_cpu_hotplug(); cpufreq_cpu_put(data); return ret; } diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index b3ebc8f01975..c4c578defabf 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -525,7 +525,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: - lock_cpu_hotplug(); mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( @@ -536,7 +535,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); - unlock_cpu_hotplug(); break; } return 0; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 87299924e735..52cf1f021825 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -239,6 +239,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) total_ticks = (unsigned int) cputime64_sub(cur_jiffies, this_dbs_info->prev_cpu_wall); this_dbs_info->prev_cpu_wall = cur_jiffies; + if (!total_ticks) + return; /* * Every sampling_rate, we check, if current idle time is less * than 20% (default), then we try to increase frequency @@ -304,7 +306,12 @@ static void do_dbs_timer(void *data) unsigned int cpu = smp_processor_id(); struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); + if (!dbs_info->enable) + return; + + lock_cpu_hotplug(); dbs_check_cpu(dbs_info); + unlock_cpu_hotplug(); queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); } @@ -319,11 +326,11 @@ static inline void dbs_timer_init(unsigned int cpu) return; } -static inline void dbs_timer_exit(unsigned int cpu) +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); - - cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work); + dbs_info->enable = 0; + cancel_delayed_work(&dbs_info->work); + flush_workqueue(kondemand_wq); } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, @@ -396,8 +403,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, case CPUFREQ_GOV_STOP: mutex_lock(&dbs_mutex); - dbs_timer_exit(policy->cpu); - this_dbs_info->enable = 0; + dbs_timer_exit(this_dbs_info); sysfs_remove_group(&policy->kobj, &dbs_attr_group); dbs_enable--; if (dbs_enable == 0) @@ -408,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: - lock_cpu_hotplug(); mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, @@ -419,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); - unlock_cpu_hotplug(); break; } return 0; diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 44ae5e5b94cf..a06c204589cd 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -18,6 +18,7 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/cpufreq.h> +#include <linux/cpu.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/sysfs.h> @@ -70,6 +71,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); + lock_cpu_hotplug(); mutex_lock(&userspace_mutex); if (!cpu_is_managed[policy->cpu]) goto err; @@ -92,6 +94,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) err: mutex_unlock(&userspace_mutex); + unlock_cpu_hotplug(); return ret; } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 17ee684144f9..b643d71298a9 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -59,6 +59,9 @@ #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) +/* Whenever making any changes to the following + * structure *make sure* you keep E, d_data + * and cword aligned on 16 Bytes boundaries!!! */ struct aes_ctx { struct { struct cword encrypt; @@ -66,8 +69,10 @@ struct aes_ctx { } cword; u32 *D; int key_length; - u32 E[AES_EXTENDED_KEY_SIZE]; - u32 d_data[AES_EXTENDED_KEY_SIZE]; + u32 E[AES_EXTENDED_KEY_SIZE] + __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + u32 d_data[AES_EXTENDED_KEY_SIZE] + __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); }; /* ====== Key management routines ====== */ diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c index 78bf46d917b7..dbd4d6c3698e 100644 --- a/drivers/dma/ioatdma.c +++ b/drivers/dma/ioatdma.c @@ -828,7 +828,7 @@ static int __init ioat_init_module(void) /* if forced, worst case is that rmmod hangs */ __unsafe(THIS_MODULE); - return pci_module_init(&ioat_pci_drv); + return pci_register_driver(&ioat_pci_drv); } module_init(ioat_init_module); diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c index 66d03f242d3c..1a159e8843ca 100644 --- a/drivers/fc4/fc.c +++ b/drivers/fc4/fc.c @@ -429,7 +429,7 @@ static inline void fcp_scsi_receive(fc_channel *fc, int token, int status, fc_hd if (fcmd->data) { if (SCpnt->use_sg) - dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->buffer, + dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->request_buffer, SCpnt->use_sg, SCpnt->sc_data_direction); else @@ -810,7 +810,7 @@ static int fcp_scsi_queue_it(fc_channel *fc, Scsi_Cmnd *SCpnt, fcp_cmnd *fcmd, i SCpnt->request_bufflen, SCpnt->sc_data_direction); } else { - struct scatterlist *sg = (struct scatterlist *)SCpnt->buffer; + struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer; int nents; FCD(("XXX: Use_sg %d %d\n", SCpnt->use_sg, sg->length)) diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index f712e4cfd9dc..7cf3eb023521 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive) * not available so we don't need to recheck that. */ capacity = idedisk_capacity(drive); - barrier = ide_id_has_flush_cache(id) && + barrier = ide_id_has_flush_cache(id) && !drive->noflush && (drive->addressing == 0 || capacity <= (1ULL << 28) || ide_id_has_flush_cache_ext(id)); diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 98918fb6b2ce..7c3a13e1cf64 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -750,7 +750,7 @@ void ide_dma_verbose(ide_drive_t *drive) goto bug_dma_off; printk(", DMA"); } else if (id->field_valid & 1) { - printk(", BUG"); + goto bug_dma_off; } return; bug_dma_off: diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 05fbd9298db7..defd4b4bd374 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -1539,7 +1539,7 @@ static int __init ide_setup(char *s) const char *hd_words[] = { "none", "noprobe", "nowerr", "cdrom", "serialize", "autotune", "noautotune", "minus8", "swapdata", "bswap", - "minus11", "remap", "remap63", "scsi", NULL }; + "noflush", "remap", "remap63", "scsi", NULL }; unit = s[2] - 'a'; hw = unit / MAX_DRIVES; unit = unit % MAX_DRIVES; @@ -1578,6 +1578,9 @@ static int __init ide_setup(char *s) case -10: /* "bswap" */ drive->bswap = 1; goto done; + case -11: /* noflush */ + drive->noflush = 1; + goto done; case -12: /* "remap" */ drive->remap_0_to_1 = 1; goto done; diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 3cb04424d351..e9bad185968a 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c @@ -498,9 +498,14 @@ static int config_chipset_for_dma (ide_drive_t *drive) { u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); - config_it821x_chipset_for_pio(drive, !speed); - it821x_tune_chipset(drive, speed); - return ide_dma_enable(drive); + if (speed) { + config_it821x_chipset_for_pio(drive, 0); + it821x_tune_chipset(drive, speed); + + return ide_dma_enable(drive); + } + + return 0; } /** diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 3f6705f3083a..f85c97f7500a 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -701,7 +701,7 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) } } -void ib_destroy_cm_id(struct ib_cm_id *cm_id) +static void cm_destroy_id(struct ib_cm_id *cm_id, int err) { struct cm_id_private *cm_id_priv; struct cm_work *work; @@ -735,12 +735,22 @@ retest: sizeof cm_id_priv->av.port->cm_dev->ca_guid, NULL, 0); break; + case IB_CM_REQ_RCVD: + if (err == -ENOMEM) { + /* Do not reject to allow future retries. */ + cm_reset_to_idle(cm_id_priv); + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + } else { + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, + NULL, 0, NULL, 0); + } + break; case IB_CM_MRA_REQ_RCVD: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* Fall through */ - case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: @@ -775,6 +785,11 @@ retest: kfree(cm_id_priv->private_data); kfree(cm_id_priv); } + +void ib_destroy_cm_id(struct ib_cm_id *cm_id) +{ + cm_destroy_id(cm_id, 0); +} EXPORT_SYMBOL(ib_destroy_cm_id); int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, @@ -1163,7 +1178,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv, } cm_deref_id(cm_id_priv); if (ret) - ib_destroy_cm_id(&cm_id_priv->id); + cm_destroy_id(&cm_id_priv->id, ret); } static void cm_format_mra(struct cm_mra_msg *mra_msg, diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 863f64befc7c..d6f99d5720fc 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -262,14 +262,14 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv) static int cma_acquire_ib_dev(struct rdma_id_private *id_priv) { struct cma_device *cma_dev; - union ib_gid *gid; + union ib_gid gid; int ret = -ENODEV; - gid = ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr); + ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid), mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) { - ret = ib_find_cached_gid(cma_dev->device, gid, + ret = ib_find_cached_gid(cma_dev->device, &gid, &id_priv->id.port_num, NULL); if (!ret) { cma_attach_to_dev(id_priv, cma_dev); @@ -812,6 +812,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) cma_modify_qp_err(&id_priv->id); status = ib_event->param.rej_rcvd.reason; event = RDMA_CM_EVENT_REJECTED; + private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; break; default: printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", @@ -1134,8 +1135,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, struct ib_sa_path_rec path_rec; memset(&path_rec, 0, sizeof path_rec); - path_rec.sgid = *ib_addr_get_sgid(addr); - path_rec.dgid = *ib_addr_get_dgid(addr); + ib_addr_get_sgid(addr, &path_rec.sgid); + ib_addr_get_dgid(addr, &path_rec.dgid); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); path_rec.numb_path = 1; @@ -1263,7 +1264,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) { struct cma_device *cma_dev; struct ib_port_attr port_attr; - union ib_gid *gid; + union ib_gid gid; u16 pkey; int ret; u8 p; @@ -1284,8 +1285,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) } port_found: - gid = ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr); - ret = ib_get_cached_gid(cma_dev->device, p, 0, gid); + ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); if (ret) goto out; @@ -1293,6 +1293,7 @@ port_found: if (ret) goto out; + ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); id_priv->id.port_num = p; cma_attach_to_dev(id_priv, cma_dev); @@ -1339,6 +1340,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) { struct cma_work *work; struct sockaddr_in *src_in, *dst_in; + union ib_gid gid; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); @@ -1351,8 +1353,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) goto err; } - ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, - ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr)); + ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); + ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 615fe9cc6c56..86a3b2d401db 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -426,7 +426,7 @@ EXPORT_SYMBOL(ib_flush_fmr_pool); struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, u64 *page_list, int list_len, - u64 *io_virtual_address) + u64 io_virtual_address) { struct ib_fmr_pool *pool = pool_handle; struct ib_pool_fmr *fmr; @@ -440,7 +440,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, fmr = ib_fmr_cache_lookup(pool, page_list, list_len, - *io_virtual_address); + io_virtual_address); if (fmr) { /* found in cache */ ++fmr->ref_count; @@ -464,7 +464,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, spin_unlock_irqrestore(&pool->pool_lock, flags); result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, - *io_virtual_address); + io_virtual_address); if (result) { spin_lock_irqsave(&pool->pool_lock, flags); @@ -481,7 +481,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, fmr->ref_count = 1; if (pool->cache_bucket) { - fmr->io_virtual_address = *io_virtual_address; + fmr->io_virtual_address = io_virtual_address; fmr->page_list_len = list_len; memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 5ed4dab52a6f..1c3cfbbe6a97 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -167,6 +167,15 @@ static int is_vendor_method_in_use( return 0; } +int ib_response_mad(struct ib_mad *mad) +{ + return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) || + (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || + ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) && + (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP))); +} +EXPORT_SYMBOL(ib_response_mad); + /* * ib_register_mad_agent - Register to send/receive MADs */ @@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) } EXPORT_SYMBOL(ib_unregister_mad_agent); -static inline int response_mad(struct ib_mad *mad) -{ - /* Trap represses are responses although response bit is reset */ - return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || - (mad->mad_hdr.method & IB_MGMT_METHOD_RESP)); -} - static void dequeue_mad(struct ib_mad_list_head *mad_list) { struct ib_mad_queue *mad_queue; @@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: - if (response_mad(&mad_priv->mad.mad) && + if (ib_response_mad(&mad_priv->mad.mad) && mad_agent_priv->agent.recv_handler) { local->mad_priv = mad_priv; local->recv_mad_agent = mad_agent_priv; @@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, unsigned long flags; spin_lock_irqsave(&port_priv->reg_lock, flags); - if (response_mad(mad)) { + if (ib_response_mad(mad)) { u32 hi_tid; struct ib_mad_agent_private *entry; @@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } /* Complete corresponding request */ - if (response_mad(mad_recv_wc->recv_buf.mad)) { + if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e911c99ff843..aeda484ffd82 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -488,13 +488,13 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) spin_unlock_irqrestore(&tid_lock, flags); } -static int send_mad(struct ib_sa_query *query, int timeout_ms) +static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) { unsigned long flags; int ret, id; retry: - if (!idr_pre_get(&query_idr, GFP_ATOMIC)) + if (!idr_pre_get(&query_idr, gfp_mask)) return -ENOMEM; spin_lock_irqsave(&idr_lock, flags); ret = idr_get_new(&query_idr, query, &id); @@ -630,7 +630,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, *sa_query = &query->sa_query; - ret = send_mad(&query->sa_query, timeout_ms); + ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); if (ret < 0) goto err2; @@ -752,7 +752,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, *sa_query = &query->sa_query; - ret = send_mad(&query->sa_query, timeout_ms); + ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); if (ret < 0) goto err2; @@ -844,7 +844,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, *sa_query = &query->sa_query; - ret = send_mad(&query->sa_query, timeout_ms); + ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); if (ret < 0) goto err2; diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index afe70a549c2f..1273f8807e84 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -112,8 +112,10 @@ struct ib_umad_device { struct ib_umad_file { struct ib_umad_port *port; struct list_head recv_list; + struct list_head send_list; struct list_head port_list; spinlock_t recv_lock; + spinlock_t send_lock; wait_queue_head_t recv_wait; struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; int agents_dead; @@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file, return ret; } +static void dequeue_send(struct ib_umad_file *file, + struct ib_umad_packet *packet) + { + spin_lock_irq(&file->send_lock); + list_del(&packet->list); + spin_unlock_irq(&file->send_lock); + } + static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *send_wc) { struct ib_umad_file *file = agent->context; struct ib_umad_packet *packet = send_wc->send_buf->context[0]; + dequeue_send(file, packet); ib_destroy_ah(packet->msg->ah); ib_free_send_mad(packet->msg); @@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) return 0; } +static int same_destination(struct ib_user_mad_hdr *hdr1, + struct ib_user_mad_hdr *hdr2) +{ + if (!hdr1->grh_present && !hdr2->grh_present) + return (hdr1->lid == hdr2->lid); + + if (hdr1->grh_present && hdr2->grh_present) + return !memcmp(hdr1->gid, hdr2->gid, 16); + + return 0; +} + +static int is_duplicate(struct ib_umad_file *file, + struct ib_umad_packet *packet) +{ + struct ib_umad_packet *sent_packet; + struct ib_mad_hdr *sent_hdr, *hdr; + + hdr = (struct ib_mad_hdr *) packet->mad.data; + list_for_each_entry(sent_packet, &file->send_list, list) { + sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; + + if ((hdr->tid != sent_hdr->tid) || + (hdr->mgmt_class != sent_hdr->mgmt_class)) + continue; + + /* + * No need to be overly clever here. If two new operations have + * the same TID, reject the second as a duplicate. This is more + * restrictive than required by the spec. + */ + if (!ib_response_mad((struct ib_mad *) hdr)) { + if (!ib_response_mad((struct ib_mad *) sent_hdr)) + return 1; + continue; + } else if (!ib_response_mad((struct ib_mad *) sent_hdr)) + continue; + + if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) + return 1; + } + + return 0; +} + static ssize_t ib_umad_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { @@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, struct ib_ah_attr ah_attr; struct ib_ah *ah; struct ib_rmpp_mad *rmpp_mad; - u8 method; __be64 *tid; int ret, data_len, hdr_len, copy_offset, rmpp_active; @@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, } /* - * If userspace is generating a request that will generate a - * response, we need to make sure the high-order part of the - * transaction ID matches the agent being used to send the - * MAD. + * Set the high-order part of the transaction ID to make MADs from + * different agents unique, and allow routing responses back to the + * original requestor. */ - method = ((struct ib_mad_hdr *) packet->msg->mad)->method; - - if (!(method & IB_MGMT_METHOD_RESP) && - method != IB_MGMT_METHOD_TRAP_REPRESS && - method != IB_MGMT_METHOD_SEND) { + if (!ib_response_mad(packet->msg->mad)) { tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | (be64_to_cpup(tid) & 0xffffffff)); + rmpp_mad->mad_hdr.tid = *tid; + } + + spin_lock_irq(&file->send_lock); + ret = is_duplicate(file, packet); + if (!ret) + list_add_tail(&packet->list, &file->send_list); + spin_unlock_irq(&file->send_lock); + if (ret) { + ret = -EINVAL; + goto err_msg; } ret = ib_post_send_mad(packet->msg, NULL); if (ret) - goto err_msg; + goto err_send; up_read(&file->port->mutex); return count; +err_send: + dequeue_send(file, packet); err_msg: ib_free_send_mad(packet->msg); err_ah: @@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp) } spin_lock_init(&file->recv_lock); + spin_lock_init(&file->send_lock); INIT_LIST_HEAD(&file->recv_list); + INIT_LIST_HEAD(&file->send_list); init_waitqueue_head(&file->recv_wait); file->port = port; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index bdf5d5098190..30923eb68ec7 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -42,6 +42,13 @@ #include "uverbs.h" +static struct lock_class_key pd_lock_key; +static struct lock_class_key mr_lock_key; +static struct lock_class_key cq_lock_key; +static struct lock_class_key qp_lock_key; +static struct lock_class_key ah_lock_key; +static struct lock_class_key srq_lock_key; + #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ do { \ (udata)->inbuf = (void __user *) (ibuf); \ @@ -76,12 +83,13 @@ */ static void init_uobj(struct ib_uobject *uobj, u64 user_handle, - struct ib_ucontext *context) + struct ib_ucontext *context, struct lock_class_key *key) { uobj->user_handle = user_handle; uobj->context = context; kref_init(&uobj->ref); init_rwsem(&uobj->mutex); + lockdep_set_class(&uobj->mutex, key); uobj->live = 0; } @@ -470,7 +478,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, if (!uobj) return -ENOMEM; - init_uobj(uobj, 0, file->ucontext); + init_uobj(uobj, 0, file->ucontext, &pd_lock_key); down_write(&uobj->mutex); pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, @@ -591,7 +599,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, if (!obj) return -ENOMEM; - init_uobj(&obj->uobject, 0, file->ucontext); + init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key); down_write(&obj->uobject.mutex); /* @@ -770,7 +778,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, if (!obj) return -ENOMEM; - init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); + init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); down_write(&obj->uobject.mutex); if (cmd.comp_channel >= 0) { @@ -1051,13 +1059,14 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, if (!obj) return -ENOMEM; - init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext); + init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); down_write(&obj->uevent.uobject.mutex); + srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; pd = idr_read_pd(cmd.pd_handle, file->ucontext); scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); - rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext); - srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; + rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? + scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext); if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { ret = -EINVAL; @@ -1125,7 +1134,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, put_pd_read(pd); put_cq_read(scq); - put_cq_read(rcq); + if (rcq != scq) + put_cq_read(rcq); if (srq) put_srq_read(srq); @@ -1150,7 +1160,7 @@ err_put: put_pd_read(pd); if (scq) put_cq_read(scq); - if (rcq) + if (rcq && rcq != scq) put_cq_read(rcq); if (srq) put_srq_read(srq); @@ -1751,7 +1761,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, if (!uobj) return -ENOMEM; - init_uobj(uobj, cmd.user_handle, file->ucontext); + init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); down_write(&uobj->mutex); pd = idr_read_pd(cmd.pd_handle, file->ucontext); @@ -1775,7 +1785,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, ah = ib_create_ah(pd, &attr); if (IS_ERR(ah)) { ret = PTR_ERR(ah); - goto err; + goto err_put; } ah->uobject = uobj; @@ -1811,6 +1821,9 @@ err_copy: err_destroy: ib_destroy_ah(ah); +err_put: + put_pd_read(pd); + err: put_uobj_write(uobj); return ret; @@ -1963,7 +1976,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, if (!obj) return -ENOMEM; - init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); + init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); down_write(&obj->uobject.mutex); pd = idr_read_pd(cmd.pd_handle, file->ucontext); @@ -1984,7 +1997,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, srq = pd->device->create_srq(pd, &attr, &udata); if (IS_ERR(srq)) { ret = PTR_ERR(srq); - goto err; + goto err_put; } srq->device = pd->device; @@ -2029,6 +2042,9 @@ err_copy: err_destroy: ib_destroy_srq(srq); +err_put: + put_pd_read(pd); + err: put_uobj_write(&obj->uobject); return ret; diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 823131d58b34..f98518d912b5 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -859,6 +859,38 @@ static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail, __ipath_layer_rcv_lid(dd, hdr); } +static void ipath_rcv_hdrerr(struct ipath_devdata *dd, + u32 eflags, + u32 l, + u32 etail, + u64 *rc) +{ + char emsg[128]; + struct ipath_message_header *hdr; + + get_rhf_errstring(eflags, emsg, sizeof emsg); + hdr = (struct ipath_message_header *)&rc[1]; + ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u " + "tlen=%x opcode=%x egridx=%x: %s\n", + eflags, l, + ipath_hdrget_rcv_type((__le32 *) rc), + ipath_hdrget_length_in_bytes((__le32 *) rc), + be32_to_cpu(hdr->bth[0]) >> 24, + etail, emsg); + + /* Count local link integrity errors. */ + if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) { + u8 n = (dd->ipath_ibcctrl >> + INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & + INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; + + if (++dd->ipath_lli_counter > n) { + dd->ipath_lli_counter = 0; + dd->ipath_lli_errors++; + } + } +} + /* * ipath_kreceive - receive a packet * @dd: the infinipath device @@ -875,7 +907,6 @@ void ipath_kreceive(struct ipath_devdata *dd) struct ipath_message_header *hdr; u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0; static u64 totcalls; /* stats, may eventually remove */ - char emsg[128]; if (!dd->ipath_hdrqtailptr) { ipath_dev_err(dd, @@ -938,26 +969,9 @@ reloop: "%x\n", etype); } - if (eflags & ~(INFINIPATH_RHF_H_TIDERR | - INFINIPATH_RHF_H_IHDRERR)) { - get_rhf_errstring(eflags, emsg, sizeof emsg); - ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u " - "tlen=%x opcode=%x egridx=%x: %s\n", - eflags, l, etype, tlen, bthbytes[0], - ipath_hdrget_index((__le32 *) rc), emsg); - /* Count local link integrity errors. */ - if (eflags & (INFINIPATH_RHF_H_ICRCERR | - INFINIPATH_RHF_H_VCRCERR)) { - u8 n = (dd->ipath_ibcctrl >> - INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & - INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; - - if (++dd->ipath_lli_counter > n) { - dd->ipath_lli_counter = 0; - dd->ipath_lli_errors++; - } - } - } else if (etype == RCVHQ_RCV_TYPE_NON_KD) { + if (unlikely(eflags)) + ipath_rcv_hdrerr(dd, eflags, l, etail, rc); + else if (etype == RCVHQ_RCV_TYPE_NON_KD) { int ret = __ipath_verbs_rcv(dd, rc + 1, ebuf, tlen); if (ret == -ENODEV) @@ -981,25 +995,7 @@ reloop: else if (etype == RCVHQ_RCV_TYPE_EXPECTED) ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", be32_to_cpu(hdr->bth[0]) & 0xff); - else if (eflags & (INFINIPATH_RHF_H_TIDERR | - INFINIPATH_RHF_H_IHDRERR)) { - /* - * This is a type 3 packet, only the LRH is in the - * rcvhdrq, the rest of the header is in the eager - * buffer. - */ - u8 opcode; - if (ebuf) { - bthbytes = (u8 *) ebuf; - opcode = *bthbytes; - } - else - opcode = 0; - get_rhf_errstring(eflags, emsg, sizeof emsg); - ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, " - "len %x\n", eflags, emsg, opcode, etail, - tlen); - } else { + else { /* * error packet, type of error unknown. * Probably type 3, but we don't know, so don't diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index 46773c673a1a..a5ca279370aa 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c @@ -197,6 +197,21 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, size_t off; int ret; + /* + * We use RKEY == zero for physical addresses + * (see ipath_get_dma_mr). + */ + if (rkey == 0) { + sge->mr = NULL; + sge->vaddr = phys_to_virt(vaddr); + sge->length = len; + sge->sge_length = len; + ss->sg_list = NULL; + ss->num_sge = 1; + ret = 1; + goto bail; + } + mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; if (unlikely(mr == NULL || mr->lkey != rkey)) { ret = 0; diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 56ac336dd1ec..d70a9b6b5239 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -191,10 +191,6 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; - while (length > sge->sge_length) { - length -= sge->sge_length; - ss->sge = *ss->sg_list++; - } while (length) { u32 len = sge->length; @@ -627,6 +623,7 @@ static int ipath_query_device(struct ib_device *ibdev, props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SYS_IMAGE_GUID; + props->page_size_cap = PAGE_SIZE; props->vendor_id = ipath_layer_get_vendorid(dev->dd); props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); props->hw_ver = ipath_layer_get_pcirev(dev->dd); diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index b12aa03be251..e215041b2db9 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -303,9 +303,10 @@ int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr) memset(attr, 0, sizeof *attr); attr->dlid = be16_to_cpu(ah->av->dlid); attr->sl = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; - attr->static_rate = ah->av->msg_sr & 0x7; - attr->src_path_bits = ah->av->g_slid & 0x7F; attr->port_num = be32_to_cpu(ah->av->port_pd) >> 24; + attr->static_rate = mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7, + attr->port_num); + attr->src_path_bits = ah->av->g_slid & 0x7F; attr->ah_flags = mthca_ah_grh_present(ah) ? IB_AH_GRH : 0; if (attr->ah_flags) { diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index d0f7731802c9..deabc14b4ea4 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -778,11 +778,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) ((dev->fw_ver & 0xffff0000ull) >> 16) | ((dev->fw_ver & 0x0000ffffull) << 16); + MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); + dev->cmd.max_cmds = 1 << lg; + mthca_dbg(dev, "FW version %012llx, max commands %d\n", (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); - MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); - dev->cmd.max_cmds = 1 << lg; MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET); MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET); diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 490fc783bb0c..cd8b6721ac9c 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -222,9 +222,8 @@ static void *get_send_wqe(struct mthca_qp *qp, int n) (PAGE_SIZE - 1)); } -static void mthca_wq_init(struct mthca_wq *wq) +static void mthca_wq_reset(struct mthca_wq *wq) { - /* mthca_alloc_qp_common() initializes the locks */ wq->next_ind = 0; wq->last_comp = wq->max - 1; wq->head = 0; @@ -845,10 +844,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); - mthca_wq_init(&qp->sq); + mthca_wq_reset(&qp->sq); qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); - mthca_wq_init(&qp->rq); + mthca_wq_reset(&qp->rq); qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); if (mthca_is_memfree(dev)) { @@ -1112,9 +1111,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, qp->atomic_rd_en = 0; qp->resp_depth = 0; qp->sq_policy = send_policy; - mthca_wq_init(&qp->sq); - mthca_wq_init(&qp->rq); - /* these are initialized separately so lockdep can tell them apart */ + mthca_wq_reset(&qp->sq); + mthca_wq_reset(&qp->rq); + spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index fab417c5cf43..b60a9d79ae54 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -370,7 +370,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, return -EINVAL; if (attr_mask & IB_SRQ_LIMIT) { - if (attr->srq_limit > srq->max) + u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; + if (attr->srq_limit > max_wr) return -EINVAL; mutex_lock(&srq->mutex); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 3f89f5e19036..474aa214ab57 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -212,6 +212,7 @@ struct ipoib_path { struct ipoib_neigh { struct ipoib_ah *ah; + union ib_gid dgid; struct sk_buff_head queue; struct neighbour *neighbour; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 1c6ea1c682a5..cf71d2a5515c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -404,6 +404,8 @@ static void path_rec_completion(int status, list_for_each_entry(neigh, &path->neigh_list, list) { kref_get(&path->ah->ref); neigh->ah = path->ah; + memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, + sizeof(union ib_gid)); while ((skb = __skb_dequeue(&neigh->queue))) __skb_queue_tail(&skqueue, skb); @@ -510,6 +512,8 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) if (path->ah) { kref_get(&path->ah->ref); neigh->ah = path->ah; + memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, + sizeof(union ib_gid)); ipoib_send(dev, skb, path->ah, be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); @@ -633,6 +637,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) neigh = *to_ipoib_neigh(skb->dst->neighbour); if (likely(neigh->ah)) { + if (unlikely(memcmp(&neigh->dgid.raw, + skb->dst->neighbour->ha + 4, + sizeof(union ib_gid)))) { + spin_lock(&priv->lock); + /* + * It's safe to call ipoib_put_ah() inside + * priv->lock here, because we know that + * path->ah will always hold one more reference, + * so ipoib_put_ah() will never do more than + * decrement the ref count. + */ + ipoib_put_ah(neigh->ah); + list_del(&neigh->list); + ipoib_neigh_free(neigh); + spin_unlock(&priv->lock); + ipoib_path_lookup(skb, dev); + goto out; + } + ipoib_send(dev, skb, neigh->ah, be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); goto out; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index ab40488182b3..b5e6a7be603d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -264,6 +264,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, if (!ah) { ipoib_warn(priv, "ib_address_create failed\n"); } else { + spin_lock_irq(&priv->lock); + mcast->ah = ah; + spin_unlock_irq(&priv->lock); + ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT " AV %p, LID 0x%04x, SL %d\n", IPOIB_GID_ARG(mcast->mcmember.mgid), @@ -271,10 +275,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, be16_to_cpu(mcast->mcmember.mlid), mcast->mcmember.sl); } - - spin_lock_irq(&priv->lock); - mcast->ah = ah; - spin_unlock_irq(&priv->lock); } /* actually send any queued packets */ diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ff117bbf81b4..72febf1f8ff8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -594,7 +594,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn, mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool, page_list, page_vec->length, - &io_addr); + io_addr); if (IS_ERR(mem)) { status = (int)PTR_ERR(mem); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 4e22afef7206..8f472e7113b4 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -615,9 +615,10 @@ static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, - dma_pages, page_cnt, &io_addr); + dma_pages, page_cnt, io_addr); if (IS_ERR(req->fmr)) { ret = PTR_ERR(req->fmr); + req->fmr = NULL; goto out; } diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c index 35ee52f9b79e..713c4a8aa77d 100644 --- a/drivers/leds/leds-net48xx.c +++ b/drivers/leds/leds-net48xx.c @@ -18,6 +18,7 @@ #include <asm/io.h> #include <linux/scx200_gpio.h> +#define DRVNAME "net48xx-led" #define NET48XX_ERROR_LED_GPIO 20 static struct platform_device *pdev; @@ -66,13 +67,13 @@ static int net48xx_led_remove(struct platform_device *pdev) } static struct platform_driver net48xx_led_driver = { - .driver.owner = THIS_MODULE, .probe = net48xx_led_probe, .remove = net48xx_led_remove, .suspend = net48xx_led_suspend, .resume = net48xx_led_resume, .driver = { - .name = "net48xx-led", + .name = DRVNAME, + .owner = THIS_MODULE, }, }; @@ -89,7 +90,7 @@ static int __init net48xx_led_init(void) if (ret < 0) goto out; - pdev = platform_device_register_simple("net48xx-led", -1, NULL, 0); + pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); if (IS_ERR(pdev)) { ret = PTR_ERR(pdev); platform_driver_unregister(&net48xx_led_driver); diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig index bbc229852881..ea31d8470510 100644 --- a/drivers/message/fusion/Kconfig +++ b/drivers/message/fusion/Kconfig @@ -48,10 +48,8 @@ config FUSION_SAS List of supported controllers: LSISAS1064 - LSISAS1066 LSISAS1068 LSISAS1064E - LSISAS1066E LSISAS1068E config FUSION_MAX_SGE diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile index b114236f4395..341691390e86 100644 --- a/drivers/message/fusion/Makefile +++ b/drivers/message/fusion/Makefile @@ -9,7 +9,6 @@ #EXTRA_CFLAGS += -DMPT_DEBUG_EXIT #EXTRA_CFLAGS += -DMPT_DEBUG_FAIL - # # driver/module specifics... # diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 43308df64623..29d0635cce1d 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -436,8 +436,6 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) */ if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) { freereq = 0; - devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p does not return Request frame\n", - ioc->name, pEvReply)); } else { devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", ioc->name, pEvReply)); @@ -678,19 +676,19 @@ int mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx) { MPT_ADAPTER *ioc; + const struct pci_device_id *id; - if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) { + if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) return -EINVAL; - } MptDeviceDriverHandlers[cb_idx] = dd_cbfunc; /* call per pci device probe entry point */ list_for_each_entry(ioc, &ioc_list, list) { - if(dd_cbfunc->probe) { - dd_cbfunc->probe(ioc->pcidev, - ioc->pcidev->driver->id_table); - } + id = ioc->pcidev->driver ? + ioc->pcidev->driver->id_table : NULL; + if (dd_cbfunc->probe) + dd_cbfunc->probe(ioc->pcidev, id); } return 0; @@ -1056,9 +1054,8 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init) dinitprintk((MYIOC_s_INFO_FMT "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n", - ioc->name, - ioc->HostPageBuffer, - ioc->HostPageBuffer_dma, + ioc->name, ioc->HostPageBuffer, + (u32)ioc->HostPageBuffer_dma, host_page_buffer_sz)); ioc->alloc_total += host_page_buffer_sz; ioc->HostPageBuffer_sz = host_page_buffer_sz; @@ -1380,6 +1377,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) printk(KERN_WARNING MYNAM ": WARNING - %s did not initialize properly! (%d)\n", ioc->name, r); + list_del(&ioc->list); if (ioc->alt_ioc) ioc->alt_ioc->alt_ioc = NULL; @@ -1762,9 +1760,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) * chips (mpt_adapter_disable, * mpt_diag_reset) */ - ioc->cached_fw = NULL; ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n", ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw)); + ioc->alt_ioc->cached_fw = NULL; } } else { printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); @@ -1885,7 +1883,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) /* FIXME? Examine results here? */ } -out: + out: if ((ret != 0) && irq_allocated) { free_irq(ioc->pci_irq, ioc); if (mpt_msi_enable) @@ -2670,6 +2668,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", ioc->name, count)); + ioc->aen_event_read_flag=0; return r; } @@ -2737,6 +2736,8 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size) if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) { ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */ ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma; + ioc->alloc_total += size; + ioc->alt_ioc->alloc_total -= size; } else { if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) ) ioc->alloc_total += size; @@ -3166,6 +3167,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag) static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) { + MPT_ADAPTER *iocp=NULL; u32 diag0val; u32 doorbell; int hard_reset_done = 0; @@ -3301,17 +3303,23 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) /* FIXME? Examine results here? */ } - if (ioc->cached_fw) { + if (ioc->cached_fw) + iocp = ioc; + else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) + iocp = ioc->alt_ioc; + if (iocp) { /* If the DownloadBoot operation fails, the * IOC will be left unusable. This is a fatal error * case. _diag_reset will return < 0 */ for (count = 0; count < 30; count ++) { - diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + diag0val = CHIPREG_READ32(&iocp->chip->Diagnostic); if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) { break; } + dprintk((MYIOC_s_INFO_FMT "cached_fw: diag0val=%x count=%d\n", + iocp->name, diag0val, count)); /* wait 1 sec */ if (sleepFlag == CAN_SLEEP) { msleep (1000); @@ -3320,7 +3328,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) } } if ((count = mpt_downloadboot(ioc, - (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) { + (MpiFwHeader_t *)iocp->cached_fw, sleepFlag)) < 0) { printk(KERN_WARNING MYNAM ": firmware downloadboot failure (%d)!\n", count); } @@ -3907,18 +3915,18 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag) if (sleepFlag == CAN_SLEEP) { while (--cntdn) { + msleep (1); intstat = CHIPREG_READ32(&ioc->chip->IntStatus); if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) break; - msleep (1); count++; } } else { while (--cntdn) { + mdelay (1); intstat = CHIPREG_READ32(&ioc->chip->IntStatus); if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) break; - mdelay (1); count++; } } @@ -4883,6 +4891,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc) pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma); if (!pIoc4) return; + ioc->alloc_total += iocpage4sz; } else { ioc4_dma = ioc->spi_data.IocPg4_dma; iocpage4sz = ioc->spi_data.IocPg4Sz; @@ -4899,6 +4908,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc) } else { pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma); ioc->spi_data.pIocPg4 = NULL; + ioc->alloc_total -= iocpage4sz; } } @@ -5030,19 +5040,18 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp) EventAck_t *pAck; if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { - printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK " - "request frame for Event=%x EventContext=%x EventData=%x!\n", - ioc->name, evnp->Event, le32_to_cpu(evnp->EventContext), - le32_to_cpu(evnp->Data[0])); + dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n", + ioc->name,__FUNCTION__)); return -1; } - memset(pAck, 0, sizeof(*pAck)); - dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name)); + devtverboseprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name)); pAck->Function = MPI_FUNCTION_EVENT_ACK; pAck->ChainOffset = 0; + pAck->Reserved[0] = pAck->Reserved[1] = 0; pAck->MsgFlags = 0; + pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0; pAck->Event = evnp->Event; pAck->EventContext = evnp->EventContext; @@ -5704,9 +5713,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) break; case MPI_EVENT_EVENT_CHANGE: if (evData0) - ds = "Events(ON) Change"; + ds = "Events ON"; else - ds = "Events(OFF) Change"; + ds = "Events OFF"; break; case MPI_EVENT_INTEGRATED_RAID: { @@ -5777,8 +5786,27 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) break; case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Device Status Change: No Persistancy " - "Added: id=%d", id); + "SAS Device Status Change: No Persistancy: id=%d", id); + break; + case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: Internal Device Reset : id=%d", id); + break; + case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: Internal Task Abort : id=%d", id); + break; + case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: Internal Abort Task Set : id=%d", id); + break; + case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: Internal Clear Task Set : id=%d", id); + break; + case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: Internal Query Task : id=%d", id); break; default: snprintf(evStr, EVENT_DESCR_STR_SZ, @@ -6034,7 +6062,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply * @ioc: Pointer to MPT_ADAPTER structure * @log_info: U32 LogInfo reply word from the IOC * - * Refer to lsi/fc_log.h. + * Refer to lsi/mpi_log_fc.h. */ static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info) @@ -6131,8 +6159,10 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info) "Invalid SAS Address", /* 01h */ NULL, /* 02h */ "Invalid Page", /* 03h */ - NULL, /* 04h */ - "Task Terminated" /* 05h */ + "Diag Message Error", /* 04h */ + "Task Terminated", /* 05h */ + "Enclosure Management", /* 06h */ + "Target Mode" /* 07h */ }; static char *pl_code_str[] = { NULL, /* 00h */ @@ -6158,7 +6188,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info) "IO Executed", /* 14h */ "Persistant Reservation Out Not Affiliation Owner", /* 15h */ "Open Transmit DMA Abort", /* 16h */ - NULL, /* 17h */ + "IO Device Missing Delay Retry", /* 17h */ NULL, /* 18h */ NULL, /* 19h */ NULL, /* 1Ah */ @@ -6238,7 +6268,7 @@ static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf) { u32 status = ioc_status & MPI_IOCSTATUS_MASK; - char *desc = ""; + char *desc = NULL; switch (status) { case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */ @@ -6348,7 +6378,7 @@ mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf) desc = "Others"; break; } - if (desc != "") + if (desc != NULL) printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc); } @@ -6386,7 +6416,6 @@ EXPORT_SYMBOL(mpt_alloc_fw_memory); EXPORT_SYMBOL(mpt_free_fw_memory); EXPORT_SYMBOL(mptbase_sas_persist_operation); - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * fusion_init - Fusion MPT base driver initialization routine. diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index a5ce10b67d02..d4cb144ab402 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -75,8 +75,8 @@ #define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.00" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.00" +#define MPT_LINUX_VERSION_COMMON "3.04.01" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.01" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -307,8 +307,8 @@ typedef struct _SYSIF_REGS u32 HostIndex; /* 50 Host Index register */ u32 Reserved4[15]; /* 54-8F */ u32 Fubar; /* 90 For Fubar usage */ - u32 Reserved5[1050];/* 94-10F8 */ - u32 Reset_1078; /* 10FC Reset 1078 */ + u32 Reserved5[1050];/* 94-10F8 */ + u32 Reset_1078; /* 10FC Reset 1078 */ } SYSIF_REGS; /* @@ -363,6 +363,7 @@ typedef struct _VirtDevice { #define MPT_TARGET_FLAGS_VALID_56 0x10 #define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20 #define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40 +#define MPT_TARGET_FLAGS_LED_ON 0x80 /* * /proc/mpt interface @@ -634,7 +635,6 @@ typedef struct _MPT_ADAPTER u16 handle; int sas_index; /* index refrencing */ MPT_SAS_MGMT sas_mgmt; - int num_ports; struct work_struct sas_persist_task; struct work_struct fc_setup_reset_work; @@ -644,7 +644,6 @@ typedef struct _MPT_ADAPTER struct work_struct fc_rescan_work; char fc_rescan_work_q_name[KOBJ_NAME_LEN]; struct workqueue_struct *fc_rescan_work_q; - u8 port_serial_number; } MPT_ADAPTER; /* @@ -982,7 +981,7 @@ typedef struct _MPT_SCSI_HOST { wait_queue_head_t scandv_waitq; int scandv_wait_done; long last_queue_full; - u8 mpt_pq_filter; + u16 tm_iocstatus; } MPT_SCSI_HOST; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index b4967bb8a7d6..30975ccd9947 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -2332,7 +2332,7 @@ done_free_mem: } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/* Prototype Routine for the HP HOST INFO command. +/* Prototype Routine for the HOST INFO command. * * Outputs: None. * Return: 0 if successful @@ -2568,7 +2568,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/* Prototype Routine for the HP TARGET INFO command. +/* Prototype Routine for the TARGET INFO command. * * Outputs: None. * Return: 0 if successful diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h index a2f8a97992e6..043941882c6e 100644 --- a/drivers/message/fusion/mptctl.h +++ b/drivers/message/fusion/mptctl.h @@ -354,9 +354,6 @@ struct mpt_ioctl_command32 { /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/* - * HP Specific IOCTL Defines and Structures - */ #define CPQFCTS_IOC_MAGIC 'Z' #define HP_IOC_MAGIC 'Z' @@ -364,8 +361,6 @@ struct mpt_ioctl_command32 { #define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t) #define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t) -/* All HP IOCTLs must include this header - */ typedef struct _hp_header { unsigned int iocnum; unsigned int host; diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index a8f2fa985455..90da7d63b08e 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -77,10 +77,6 @@ MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); /* Command line args */ -static int mpt_pq_filter = 0; -module_param(mpt_pq_filter, int, 0); -MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)"); - #define MPTFC_DEV_LOSS_TMO (60) static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */ module_param(mptfc_dev_loss_tmo, int, 0); @@ -513,8 +509,7 @@ mptfc_slave_alloc(struct scsi_device *sdev) if (vtarget->num_luns == 0) { vtarget->ioc_id = hd->ioc->id; - vtarget->tflags = MPT_TARGET_FLAGS_Q_YES | - MPT_TARGET_FLAGS_VALID_INQUIRY; + vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; hd->Targets[sdev->id] = vtarget; } @@ -1129,13 +1124,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) hd->timer.data = (unsigned long) hd; hd->timer.function = mptscsih_timer_expired; - hd->mpt_pq_filter = mpt_pq_filter; - - ddvprintk((MYIOC_s_INFO_FMT - "mpt_pq_filter %x\n", - ioc->name, - mpt_pq_filter)); - init_waitqueue_head(&hd->scandv_waitq); hd->scandv_wait_done = 0; hd->last_queue_full = 0; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index f7bd8b11ed3b..f66f2203143a 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -67,20 +67,19 @@ #define my_VERSION MPT_LINUX_VERSION_COMMON #define MYNAM "mptsas" +/* + * Reserved channel for integrated raid + */ +#define MPTSAS_RAID_CHANNEL 1 + MODULE_AUTHOR(MODULEAUTHOR); MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); -static int mpt_pq_filter; -module_param(mpt_pq_filter, int, 0); -MODULE_PARM_DESC(mpt_pq_filter, - "Enable peripheral qualifier filter: enable=1 " - "(default=0)"); - static int mpt_pt_clear; module_param(mpt_pt_clear, int, 0); MODULE_PARM_DESC(mpt_pt_clear, - "Clear persistency table: enable=1 " + " Clear persistency table: enable=1 " "(default=MPTSCSIH_PT_CLEAR=0)"); static int mptsasDoneCtx = -1; @@ -144,7 +143,6 @@ struct mptsas_devinfo { * Specific details on ports, wide/narrow */ struct mptsas_portinfo_details{ - u8 port_id; /* port number provided to transport */ u16 num_phys; /* number of phys belong to this port */ u64 phy_bitmask; /* TODO, extend support for 255 phys */ struct sas_rphy *rphy; /* transport layer rphy object */ @@ -350,10 +348,10 @@ mptsas_port_delete(struct mptsas_portinfo_details * port_details) port_info = port_details->port_info; phy_info = port_info->phy_info; - dsaswideprintk((KERN_DEBUG "%s: [%p]: port=%02d num_phys=%02d " + dsaswideprintk((KERN_DEBUG "%s: [%p]: num_phys=%02d " "bitmask=0x%016llX\n", - __FUNCTION__, port_details, port_details->port_id, - port_details->num_phys, port_details->phy_bitmask)); + __FUNCTION__, port_details, port_details->num_phys, + port_details->phy_bitmask)); for (i = 0; i < port_info->num_phys; i++, phy_info++) { if(phy_info->port_details != port_details) @@ -462,9 +460,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) * phy be removed by firmware events. */ dsaswideprintk((KERN_DEBUG - "%s: [%p]: port=%d deleting phy = %d\n", - __FUNCTION__, port_details, - port_details->port_id, i)); + "%s: [%p]: deleting phy = %d\n", + __FUNCTION__, port_details, i)); port_details->num_phys--; port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); @@ -493,7 +490,6 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) goto out; port_details->num_phys = 1; port_details->port_info = port_info; - port_details->port_id = ioc->port_serial_number++; if (phy_info->phy_id < 64 ) port_details->phy_bitmask |= (1 << phy_info->phy_id); @@ -525,12 +521,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) mptsas_get_port(phy_info_cmp); port_details->starget = mptsas_get_starget(phy_info_cmp); - port_details->port_id = - phy_info_cmp->port_details->port_id; port_details->num_phys = phy_info_cmp->port_details->num_phys; -// port_info->port_serial_number--; - ioc->port_serial_number--; if (!phy_info_cmp->port_details->num_phys) kfree(phy_info_cmp->port_details); } else @@ -554,11 +546,11 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) if (!port_details) continue; dsaswideprintk((KERN_DEBUG - "%s: [%p]: phy_id=%02d port_id=%02d num_phys=%02d " + "%s: [%p]: phy_id=%02d num_phys=%02d " "bitmask=0x%016llX\n", __FUNCTION__, - port_details, i, port_details->port_id, - port_details->num_phys, port_details->phy_bitmask)); + port_details, i, port_details->num_phys, + port_details->phy_bitmask)); dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n", port_details->port, port_details->rphy)); } @@ -651,16 +643,13 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, static int mptsas_slave_configure(struct scsi_device *sdev) { - struct Scsi_Host *host = sdev->host; - MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata; - /* - * RAID volumes placed beyond the last expected port. - * Ignore sending sas mode pages in that case.. - */ - if (sdev->channel < hd->ioc->num_ports) - sas_read_port_mode_page(sdev); + if (sdev->channel == MPTSAS_RAID_CHANNEL) + goto out; + + sas_read_port_mode_page(sdev); + out: return mptscsih_slave_configure(sdev); } @@ -689,10 +678,7 @@ mptsas_target_alloc(struct scsi_target *starget) hd->Targets[target_id] = vtarget; - /* - * RAID volumes placed beyond the last expected port. - */ - if (starget->channel == hd->ioc->num_ports) + if (starget->channel == MPTSAS_RAID_CHANNEL) goto out; rphy = dev_to_rphy(starget->dev.parent); @@ -743,7 +729,7 @@ mptsas_target_destroy(struct scsi_target *starget) if (!starget->hostdata) return; - if (starget->channel == hd->ioc->num_ports) + if (starget->channel == MPTSAS_RAID_CHANNEL) goto out; rphy = dev_to_rphy(starget->dev.parent); @@ -783,10 +769,7 @@ mptsas_slave_alloc(struct scsi_device *sdev) starget = scsi_target(sdev); vdev->vtarget = starget->hostdata; - /* - * RAID volumes placed beyond the last expected port. - */ - if (sdev->channel == hd->ioc->num_ports) + if (sdev->channel == MPTSAS_RAID_CHANNEL) goto out; rphy = dev_to_rphy(sdev->sdev_target->dev.parent); @@ -1608,11 +1591,7 @@ static int mptsas_probe_one_phy(struct device *dev, if (phy_info->sas_port_add_phy) { if (!port) { - port = sas_port_alloc(dev, - phy_info->port_details->port_id); - dsaswideprintk((KERN_DEBUG - "sas_port_alloc: port=%p dev=%p port_id=%d\n", - port, dev, phy_info->port_details->port_id)); + port = sas_port_alloc_num(dev); if (!port) { error = -ENOMEM; goto out; @@ -1625,6 +1604,9 @@ static int mptsas_probe_one_phy(struct device *dev, goto out; } mptsas_set_port(phy_info, port); + dsaswideprintk((KERN_DEBUG + "sas_port_alloc: port=%p dev=%p port_id=%d\n", + port, dev, port->port_identifier)); } dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n", phy_info->phy_id)); @@ -1736,7 +1718,6 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) hba = NULL; } mutex_unlock(&ioc->sas_topology_mutex); - ioc->num_ports = port_info->num_phys; for (i = 0; i < port_info->num_phys; i++) { mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], @@ -1939,7 +1920,8 @@ mptsas_delete_expander_phys(MPT_ADAPTER *ioc) expander_sas_address) continue; #ifdef MPT_DEBUG_SAS_WIDE - dev_printk(KERN_DEBUG, &port->dev, "delete\n"); + dev_printk(KERN_DEBUG, &port->dev, + "delete port (%d)\n", port->port_identifier); #endif sas_port_delete(port); mptsas_port_delete(phy_info->port_details); @@ -1984,7 +1966,7 @@ mptsas_scan_sas_topology(MPT_ADAPTER *ioc) if (!ioc->raid_data.pIocPg2->NumActiveVolumes) goto out; for (i=0; i<ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { - scsi_add_device(ioc->sh, ioc->num_ports, + scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); } out: @@ -2185,7 +2167,8 @@ mptsas_hotplug_work(void *arg) ioc->name, ds, ev->channel, ev->id, phy_info->phy_id); #ifdef MPT_DEBUG_SAS_WIDE - dev_printk(KERN_DEBUG, &port->dev, "delete\n"); + dev_printk(KERN_DEBUG, &port->dev, + "delete port (%d)\n", port->port_identifier); #endif sas_port_delete(port); mptsas_port_delete(phy_info->port_details); @@ -2289,35 +2272,26 @@ mptsas_hotplug_work(void *arg) mptsas_set_rphy(phy_info, rphy); break; case MPTSAS_ADD_RAID: - sdev = scsi_device_lookup( - ioc->sh, - ioc->num_ports, - ev->id, - 0); + sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, + ev->id, 0); if (sdev) { scsi_device_put(sdev); break; } printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, id %d\n", - ioc->name, ioc->num_ports, ev->id); - scsi_add_device(ioc->sh, - ioc->num_ports, - ev->id, - 0); + ioc->name, MPTSAS_RAID_CHANNEL, ev->id); + scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0); mpt_findImVolumes(ioc); break; case MPTSAS_DEL_RAID: - sdev = scsi_device_lookup( - ioc->sh, - ioc->num_ports, - ev->id, - 0); + sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, + ev->id, 0); if (!sdev) break; printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, id %d\n", - ioc->name, ioc->num_ports, ev->id); + ioc->name, MPTSAS_RAID_CHANNEL, ev->id); vdevice = sdev->hostdata; vdevice->vtarget->deleted = 1; mptsas_target_reset(ioc, vdevice->vtarget); @@ -2723,7 +2697,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) hd->timer.data = (unsigned long) hd; hd->timer.function = mptscsih_timer_expired; - hd->mpt_pq_filter = mpt_pq_filter; ioc->sas_data.ptClear = mpt_pt_clear; if (ioc->sas_data.ptClear==1) { @@ -2731,12 +2704,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT); } - ddvprintk((MYIOC_s_INFO_FMT - "mpt_pq_filter %x mpt_pq_filter %x\n", - ioc->name, - mpt_pq_filter, - mpt_pq_filter)); - init_waitqueue_head(&hd->scandv_waitq); hd->scandv_wait_done = 0; hd->last_queue_full = 0; diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 8242b16e3168..30524dc54b16 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -66,6 +66,7 @@ #include "mptbase.h" #include "mptscsih.h" +#include "lsi/mpi_log_sas.h" /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #define my_NAME "Fusion MPT SCSI Host driver" @@ -127,7 +128,7 @@ static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx); static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd); static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout ); -static u32 SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc); +static int SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc); static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout); @@ -497,6 +498,34 @@ nextSGEset: return SUCCESS; } /* mptscsih_AddSGE() */ +static void +mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget, + U32 SlotStatus) +{ + MPT_FRAME_HDR *mf; + SEPRequest_t *SEPMsg; + + if (ioc->bus_type == FC) + return; + + if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { + dfailprintk((MYIOC_s_WARN_FMT "%s: no msg frames!!\n", + ioc->name,__FUNCTION__)); + return; + } + + SEPMsg = (SEPRequest_t *)mf; + SEPMsg->Function = MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + SEPMsg->Bus = vtarget->bus_id; + SEPMsg->TargetID = vtarget->target_id; + SEPMsg->Action = MPI_SEP_REQ_ACTION_WRITE_STATUS; + SEPMsg->SlotStatus = SlotStatus; + devtverboseprintk((MYIOC_s_WARN_FMT + "Sending SEP cmd=%x id=%d bus=%d\n", + ioc->name, SlotStatus, SEPMsg->TargetID, SEPMsg->Bus)); + mpt_put_msg_frame(ioc->DoneCtx, ioc, mf); +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptscsih_io_done - Main SCSI IO callback routine registered to @@ -520,6 +549,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) SCSIIORequest_t *pScsiReq; SCSIIOReply_t *pScsiReply; u16 req_idx, req_idx_MR; + VirtDevice *vdev; + VirtTarget *vtarget; hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; @@ -538,6 +569,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) } sc = hd->ScsiLookup[req_idx]; + hd->ScsiLookup[req_idx] = NULL; if (sc == NULL) { MPIHeader_t *hdr = (MPIHeader_t *)mf; @@ -553,6 +585,12 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) return 1; } + if ((unsigned char *)mf != sc->host_scribble) { + mptscsih_freeChainBuffers(ioc, req_idx); + return 1; + } + + sc->host_scribble = NULL; sc->result = DID_OK << 16; /* Set default reply as OK */ pScsiReq = (SCSIIORequest_t *) mf; pScsiReply = (SCSIIOReply_t *) mr; @@ -640,10 +678,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) hd->sel_timeout[pScsiReq->TargetID]++; + + vdev = sc->device->hostdata; + if (!vdev) + break; + vtarget = vdev->vtarget; + if (vtarget->tflags & MPT_TARGET_FLAGS_LED_ON) { + mptscsih_issue_sep_command(ioc, vtarget, + MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED); + vtarget->tflags &= ~MPT_TARGET_FLAGS_LED_ON; + } break; - case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ + if ( ioc->bus_type == SAS ) { + u16 ioc_status = le16_to_cpu(pScsiReply->IOCStatus); + if (ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo); + log_info &=SAS_LOGINFO_MASK; + if (log_info == SAS_LOGINFO_NEXUS_LOSS) { + sc->result = (DID_BUS_BUSY << 16); + break; + } + } + } + + /* + * Allow non-SAS & non-NEXUS_LOSS to drop into below code + */ + + case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ /* Linux handles an unsolicited DID_RESET better * than an unsolicited DID_ABORT. @@ -658,7 +722,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) sc->result=DID_SOFT_ERROR << 16; else /* Sufficient data transfer occurred */ sc->result = (DID_OK << 16) | scsi_status; - dreplyprintk((KERN_NOTICE + dreplyprintk((KERN_NOTICE "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id)); break; @@ -784,8 +848,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) sc->request_bufflen, sc->sc_data_direction); } - hd->ScsiLookup[req_idx] = NULL; - sc->scsi_done(sc); /* Issue the command callback */ /* Free Chain buffers */ @@ -827,9 +889,17 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n", mf, SCpnt)); + /* Free Chain buffers */ + mptscsih_freeChainBuffers(ioc, ii); + + /* Free Message frames */ + mpt_free_msg_frame(ioc, mf); + + if ((unsigned char *)mf != SCpnt->host_scribble) + continue; + /* Set status, free OS resources (SG DMA buffers) * Do OS callback - * Free driver resources (chain, msg buffers) */ if (SCpnt->use_sg) { pci_unmap_sg(ioc->pcidev, @@ -845,12 +915,6 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) SCpnt->result = DID_RESET << 16; SCpnt->host_scribble = NULL; - /* Free Chain buffers */ - mptscsih_freeChainBuffers(ioc, ii); - - /* Free Message frames */ - mpt_free_msg_frame(ioc, mf); - SCpnt->scsi_done(SCpnt); /* Issue the command callback */ } } @@ -887,10 +951,10 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice) if ((sc = hd->ScsiLookup[ii]) != NULL) { mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii); - + if (mf == NULL) + continue; dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n", hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1])); - if ((mf->TargetID != ((u8)vdevice->vtarget->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun))) continue; @@ -899,6 +963,8 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice) hd->ScsiLookup[ii] = NULL; mptscsih_freeChainBuffers(hd->ioc, ii); mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); + if ((unsigned char *)mf != sc->host_scribble) + continue; if (sc->use_sg) { pci_unmap_sg(hd->ioc->pcidev, (struct scatterlist *) sc->request_buffer, @@ -1341,8 +1407,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) goto fail; } + SCpnt->host_scribble = (unsigned char *)mf; hd->ScsiLookup[my_idx] = SCpnt; - SCpnt->host_scribble = NULL; mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf); dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n", @@ -1529,6 +1595,12 @@ mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, in rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP); } + /* + * Check IOCStatus from TM reply message + */ + if (hd->tm_iocstatus != MPI_IOCSTATUS_SUCCESS) + rc = FAILED; + dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc)); return rc; @@ -1654,6 +1726,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) int scpnt_idx; int retval; VirtDevice *vdev; + ulong sn = SCpnt->serial_number; /* If we can't locate our host adapter structure, return FAILED status. */ @@ -1707,6 +1780,11 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun, ctx2abort, mptscsih_get_tm_timeout(hd->ioc)); + if (SCPNT_TO_LOOKUP_IDX(SCpnt) == scpnt_idx && + SCpnt->serial_number == sn) { + retval = FAILED; + } + printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", hd->ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); @@ -2023,6 +2101,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply); iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; + hd->tm_iocstatus = iocstatus; dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n", ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo))); /* Error? (anything non-zero?) */ @@ -2401,6 +2480,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12]; ioc->eventContext++; + if (hd->ioc->pcidev->vendor == + PCI_VENDOR_ID_IBM) { + mptscsih_issue_sep_command(hd->ioc, + vdev->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); + vdev->vtarget->tflags |= + MPT_TARGET_FLAGS_LED_ON; + } } } } else { @@ -2409,7 +2495,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR } } -static u32 +static int SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc) { MPT_SCSI_HOST *hd; diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 0a1ff762205f..e4cc3dd5fc9f 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -83,10 +83,6 @@ static int mpt_saf_te = MPTSCSIH_SAF_TE; module_param(mpt_saf_te, int, 0); MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)"); -static int mpt_pq_filter = 0; -module_param(mpt_pq_filter, int, 0); -MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)"); - static void mptspi_write_offset(struct scsi_target *, int); static void mptspi_write_width(struct scsi_target *, int); static int mptspi_write_spi_device_pg1(struct scsi_target *, @@ -1047,14 +1043,12 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) hd->timer.function = mptscsih_timer_expired; ioc->spi_data.Saf_Te = mpt_saf_te; - hd->mpt_pq_filter = mpt_pq_filter; hd->negoNvram = MPT_SCSICFG_USE_NVRAM; ddvprintk((MYIOC_s_INFO_FMT - "saf_te %x mpt_pq_filter %x\n", + "saf_te %x\n", ioc->name, - mpt_saf_te, - mpt_pq_filter)); + mpt_saf_te)); ioc->spi_data.noQas = 0; init_waitqueue_head(&hd->scandv_waitq); diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 36d511729f71..2146cf74425e 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -132,6 +132,7 @@ static int __init dummy_init_module(void) for (i = 0; i < numdummies && !err; i++) err = dummy_init_one(i); if (err) { + i--; while (--i >= 0) dummy_free_one(i); } diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index f411bbb44f86..d304297c496c 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -110,6 +110,9 @@ struct e1000_adapter; #define E1000_MIN_RXD 80 #define E1000_MAX_82544_RXD 4096 +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + /* Supported Rx Buffer Sizes */ #define E1000_RXBUFFER_128 128 /* Used for packet split */ #define E1000_RXBUFFER_256 256 /* Used for packet split */ diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 6d3d41934503..da62db897426 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; #else #define DRIVERNAPI "-NAPI" #endif -#define DRV_VERSION "7.1.9-k2"DRIVERNAPI +#define DRV_VERSION "7.1.9-k4"DRIVERNAPI char e1000_driver_version[] = DRV_VERSION; static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -1068,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter) pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); - adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; @@ -3148,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = E1000_RXBUFFER_16384; /* adjust allocation if LPE protects us, and we aren't using SBP */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 if (!adapter->hw.tbi_compatibility_on && ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) @@ -3387,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) E1000_WRITE_REG(hw, IMC, ~0); E1000_WRITE_FLUSH(hw); } - if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) - __netif_rx_schedule(&adapter->polling_netdev[0]); + if (likely(netif_rx_schedule_prep(netdev))) + __netif_rx_schedule(netdev); else e1000_irq_enable(adapter); #else @@ -3431,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget) { struct e1000_adapter *adapter; int work_to_do = min(*budget, poll_dev->quota); - int tx_cleaned = 0, i = 0, work_done = 0; + int tx_cleaned = 0, work_done = 0; /* Must NOT use netdev_priv macro here. */ adapter = poll_dev->priv; /* Keep link state information with original netdev */ - if (!netif_carrier_ok(adapter->netdev)) + if (!netif_carrier_ok(poll_dev)) goto quit_polling; - while (poll_dev != &adapter->polling_netdev[i]) { - i++; - BUG_ON(i == adapter->num_rx_queues); + /* e1000_clean is called per-cpu. This lock protects + * tx_ring[0] from being cleaned by multiple cpus + * simultaneously. A failure obtaining the lock means + * tx_ring[0] is currently being cleaned anyway. */ + if (spin_trylock(&adapter->tx_queue_lock)) { + tx_cleaned = e1000_clean_tx_irq(adapter, + &adapter->tx_ring[0]); + spin_unlock(&adapter->tx_queue_lock); } - if (likely(adapter->num_tx_queues == 1)) { - /* e1000_clean is called per-cpu. This lock protects - * tx_ring[0] from being cleaned by multiple cpus - * simultaneously. A failure obtaining the lock means - * tx_ring[0] is currently being cleaned anyway. */ - if (spin_trylock(&adapter->tx_queue_lock)) { - tx_cleaned = e1000_clean_tx_irq(adapter, - &adapter->tx_ring[0]); - spin_unlock(&adapter->tx_queue_lock); - } - } else - tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); - - adapter->clean_rx(adapter, &adapter->rx_ring[i], + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, work_to_do); *budget -= work_done; @@ -3466,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget) /* If no Tx and not enough Rx work done, exit the polling mode */ if ((!tx_cleaned && (work_done == 0)) || - !netif_running(adapter->netdev)) { + !netif_running(poll_dev)) { quit_polling: netif_rx_complete(poll_dev); e1000_irq_enable(adapter); @@ -3681,6 +3672,9 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, length = le16_to_cpu(rx_desc->length); + /* adjust length to remove Ethernet CRC */ + length -= 4; + if (unlikely(!(status & E1000_RXD_STAT_EOP))) { /* All receives must fit into a single buffer */ E1000_DBG("%s: Receive packet consumed multiple" @@ -3885,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, pci_dma_sync_single_for_device(pdev, ps_page_dma->ps_page_dma[0], PAGE_SIZE, PCI_DMA_FROMDEVICE); + /* remove the CRC */ + l1 -= 4; skb_put(skb, l1); - length += l1; goto copydone; } /* if */ } @@ -3905,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, skb->truesize += length; } + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive */ + pskb_trim(skb, skb->len - 4); + copydone: e1000_rx_checksum(adapter, staterr, le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); @@ -4752,6 +4751,7 @@ static void e1000_netpoll(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_clean_tx_irq(adapter, adapter->tx_ring); diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 3a42afab5036..43e3f33ed5e2 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -271,6 +271,7 @@ static int __init ifb_init_module(void) for (i = 0; i < numifbs && !err; i++) err = ifb_init_one(i); if (err) { + i--; while (--i >= 0) ifb_free_one(i); } diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 07ca9480a6fe..c3e52c806b13 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -620,7 +620,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) return -ENXIO; } dev_info(&mgp->pdev->dev, "handoff confirmed\n"); - myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); + myri10ge_dummy_rdma(mgp, 1); return 0; } diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 82200bfaa8ed..7de9a07b2ac2 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev, /* Chip internal frequency for clock calculations */ static inline u32 hwkhz(const struct skge_hw *hw) { - if (hw->chip_id == CHIP_ID_GENESIS) - return 53215; /* or: 53.125 MHz */ - else - return 78215; /* or: 78.125 MHz */ + return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125; } /* Chip HZ to microseconds */ diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index d98f28c34e5c..de91609ca112 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -50,7 +50,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "1.4" +#define DRV_VERSION "1.5" #define PFX DRV_NAME " " /* @@ -2204,9 +2204,6 @@ static int sky2_poll(struct net_device *dev0, int *budget) int work_done = 0; u32 status = sky2_read32(hw, B0_Y2_SP_EISR); - if (!~status) - goto out; - if (status & Y2_IS_HW_ERR) sky2_hw_intr(hw); @@ -2243,7 +2240,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) if (sky2_more_work(hw)) return 1; -out: + netif_rx_complete(dev0); sky2_read32(hw, B0_Y2_SP_LISR); diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index fb1d5a8a45cf..647f62e9707d 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl); * * returns the content of the specified SMMIO register. */ -static u32 +static inline u32 spider_net_read_reg(struct spider_net_card *card, u32 reg) { u32 value; @@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg) * @reg: register to write to * @value: value to write into the specified SMMIO register */ -static void +static inline void spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) { value = cpu_to_le32(value); @@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev) * * returns the status as in the dmac_cmd_status field of the descriptor */ -static enum spider_net_descr_status +static inline int spider_net_get_descr_status(struct spider_net_descr *descr) { - u32 cmd_status; - - cmd_status = descr->dmac_cmd_status; - cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; - /* no need to mask out any bits, as cmd_status is 32 bits wide only - * (and unsigned) */ - return cmd_status; -} - -/** - * spider_net_set_descr_status -- sets the status of a descriptor - * @descr: descriptor to change - * @status: status to set in the descriptor - * - * changes the status to the specified value. Doesn't change other bits - * in the status - */ -static void -spider_net_set_descr_status(struct spider_net_descr *descr, - enum spider_net_descr_status status) -{ - u32 cmd_status; - /* read the status */ - cmd_status = descr->dmac_cmd_status; - /* clean the upper 4 bits */ - cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; - /* add the status to it */ - cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; - /* and write it back */ - descr->dmac_cmd_status = cmd_status; + return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; } /** @@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card, static int spider_net_init_chain(struct spider_net_card *card, struct spider_net_descr_chain *chain, - struct spider_net_descr *start_descr, int no) + struct spider_net_descr *start_descr, + int direction, int no) { int i; struct spider_net_descr *descr; dma_addr_t buf; - atomic_set(&card->rx_chain_refill,0); - descr = start_descr; memset(descr, 0, sizeof(*descr) * no); /* set up the hardware pointers in each descriptor */ for (i=0; i<no; i++, descr++) { - spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); + descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; buf = pci_map_single(card->pdev, descr, SPIDER_NET_DESCR_SIZE, - PCI_DMA_BIDIRECTIONAL); + direction); if (buf == DMA_ERROR_CODE) goto iommu_error; @@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card, start_descr->prev = descr-1; descr = start_descr; - for (i=0; i < no; i++, descr++) { - descr->next_descr_addr = descr->next->bus_addr; - } + if (direction == PCI_DMA_FROMDEVICE) + for (i=0; i < no; i++, descr++) + descr->next_descr_addr = descr->next->bus_addr; + spin_lock_init(&chain->lock); chain->head = start_descr; chain->tail = start_descr; @@ -375,7 +346,7 @@ iommu_error: if (descr->bus_addr) pci_unmap_single(card->pdev, descr->bus_addr, SPIDER_NET_DESCR_SIZE, - PCI_DMA_BIDIRECTIONAL); + direction); return -ENOMEM; } @@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) dev_kfree_skb(descr->skb); pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, - PCI_DMA_BIDIRECTIONAL); + PCI_DMA_FROMDEVICE); } descr = descr->next; } @@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); /* io-mmu-map the skb */ buf = pci_map_single(card->pdev, descr->skb->data, - SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); + SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); descr->buf_addr = buf; if (buf == DMA_ERROR_CODE) { dev_kfree_skb_any(descr->skb); if (netif_msg_rx_err(card) && net_ratelimit()) pr_err("Could not iommu-map rx buffer\n"); - spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); + descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; } else { - descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED; + descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | + SPIDER_NET_DMAC_NOINTR_COMPLETE; } return error; @@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, * chip by writing to the appropriate register. DMA is enabled in * spider_net_enable_rxdmac. */ -static void +static inline void spider_net_enable_rxchtails(struct spider_net_card *card) { /* assume chain is aligned correctly */ @@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card) * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN * in the GDADMACCNTR register */ -static void +static inline void spider_net_enable_rxdmac(struct spider_net_card *card) { wmb(); @@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card) static void spider_net_refill_rx_chain(struct spider_net_card *card) { - struct spider_net_descr_chain *chain; - - chain = &card->rx_chain; + struct spider_net_descr_chain *chain = &card->rx_chain; + unsigned long flags; /* one context doing the refill (and a second context seeing that * and omitting it) is ok. If called by NAPI, we'll be called again * as spider_net_decode_one_descr is called several times. If some * interrupt calls us, the NAPI is about to clean up anyway. */ - if (atomic_inc_return(&card->rx_chain_refill) == 1) - while (spider_net_get_descr_status(chain->head) == - SPIDER_NET_DESCR_NOT_IN_USE) { - if (spider_net_prepare_rx_descr(card, chain->head)) - break; - chain->head = chain->head->next; - } + if (!spin_trylock_irqsave(&chain->lock, flags)) + return; + + while (spider_net_get_descr_status(chain->head) == + SPIDER_NET_DESCR_NOT_IN_USE) { + if (spider_net_prepare_rx_descr(card, chain->head)) + break; + chain->head = chain->head->next; + } - atomic_dec(&card->rx_chain_refill); + spin_unlock_irqrestore(&chain->lock, flags); } /** @@ -554,111 +527,6 @@ error: } /** - * spider_net_release_tx_descr - processes a used tx descriptor - * @card: card structure - * @descr: descriptor to release - * - * releases a used tx descriptor (unmapping, freeing of skb) - */ -static void -spider_net_release_tx_descr(struct spider_net_card *card, - struct spider_net_descr *descr) -{ - struct sk_buff *skb; - - /* unmap the skb */ - skb = descr->skb; - pci_unmap_single(card->pdev, descr->buf_addr, skb->len, - PCI_DMA_BIDIRECTIONAL); - - dev_kfree_skb_any(skb); - - /* set status to not used */ - spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); -} - -/** - * spider_net_release_tx_chain - processes sent tx descriptors - * @card: adapter structure - * @brutal: if set, don't care about whether descriptor seems to be in use - * - * returns 0 if the tx ring is empty, otherwise 1. - * - * spider_net_release_tx_chain releases the tx descriptors that spider has - * finished with (if non-brutal) or simply release tx descriptors (if brutal). - * If some other context is calling this function, we return 1 so that we're - * scheduled again (if we were scheduled) and will not loose initiative. - */ -static int -spider_net_release_tx_chain(struct spider_net_card *card, int brutal) -{ - struct spider_net_descr_chain *tx_chain = &card->tx_chain; - enum spider_net_descr_status status; - - if (atomic_inc_return(&card->tx_chain_release) != 1) { - atomic_dec(&card->tx_chain_release); - return 1; - } - - for (;;) { - status = spider_net_get_descr_status(tx_chain->tail); - switch (status) { - case SPIDER_NET_DESCR_CARDOWNED: - if (!brutal) - goto out; - /* fallthrough, if we release the descriptors - * brutally (then we don't care about - * SPIDER_NET_DESCR_CARDOWNED) */ - case SPIDER_NET_DESCR_RESPONSE_ERROR: - case SPIDER_NET_DESCR_PROTECTION_ERROR: - case SPIDER_NET_DESCR_FORCE_END: - if (netif_msg_tx_err(card)) - pr_err("%s: forcing end of tx descriptor " - "with status x%02x\n", - card->netdev->name, status); - card->netdev_stats.tx_dropped++; - break; - - case SPIDER_NET_DESCR_COMPLETE: - card->netdev_stats.tx_packets++; - card->netdev_stats.tx_bytes += - tx_chain->tail->skb->len; - break; - - default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */ - goto out; - } - spider_net_release_tx_descr(card, tx_chain->tail); - tx_chain->tail = tx_chain->tail->next; - } -out: - atomic_dec(&card->tx_chain_release); - - netif_wake_queue(card->netdev); - - if (status == SPIDER_NET_DESCR_CARDOWNED) - return 1; - return 0; -} - -/** - * spider_net_cleanup_tx_ring - cleans up the TX ring - * @card: card structure - * - * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use - * interrupts to cleanup our TX ring) and returns sent packets to the stack - * by freeing them - */ -static void -spider_net_cleanup_tx_ring(struct spider_net_card *card) -{ - if ( (spider_net_release_tx_chain(card, 0)) && - (card->netdev->flags & IFF_UP) ) { - mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); - } -} - -/** * spider_net_get_multicast_hash - generates hash for multicast filter table * @addr: multicast address * @@ -761,97 +629,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card) } /** - * spider_net_stop - called upon ifconfig down - * @netdev: interface device structure - * - * always returns 0 - */ -int -spider_net_stop(struct net_device *netdev) -{ - struct spider_net_card *card = netdev_priv(netdev); - - tasklet_kill(&card->rxram_full_tl); - netif_poll_disable(netdev); - netif_carrier_off(netdev); - netif_stop_queue(netdev); - del_timer_sync(&card->tx_timer); - - /* disable/mask all interrupts */ - spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); - spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); - spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); - - /* free_irq(netdev->irq, netdev);*/ - free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); - - spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, - SPIDER_NET_DMA_TX_FEND_VALUE); - - /* turn off DMA, force end */ - spider_net_disable_rxdmac(card); - - /* release chains */ - spider_net_release_tx_chain(card, 1); - - spider_net_free_chain(card, &card->tx_chain); - spider_net_free_chain(card, &card->rx_chain); - - return 0; -} - -/** - * spider_net_get_next_tx_descr - returns the next available tx descriptor - * @card: device structure to get descriptor from - * - * returns the address of the next descriptor, or NULL if not available. - */ -static struct spider_net_descr * -spider_net_get_next_tx_descr(struct spider_net_card *card) -{ - /* check, if head points to not-in-use descr */ - if ( spider_net_get_descr_status(card->tx_chain.head) == - SPIDER_NET_DESCR_NOT_IN_USE ) { - return card->tx_chain.head; - } else { - return NULL; - } -} - -/** - * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field - * @descr: descriptor structure to fill out - * @skb: packet to consider - * - * fills out the command and status field of the descriptor structure, - * depending on hardware checksum settings. - */ -static void -spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, - struct sk_buff *skb) -{ - /* make sure the other fields in the descriptor are written */ - wmb(); - - if (skb->ip_summed != CHECKSUM_HW) { - descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; - return; - } - - /* is packet ip? - * if yes: tcp? udp? */ - if (skb->protocol == htons(ETH_P_IP)) { - if (skb->nh.iph->protocol == IPPROTO_TCP) - descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; - else if (skb->nh.iph->protocol == IPPROTO_UDP) - descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; - else /* the stack should checksum non-tcp and non-udp - packets on his own: NETIF_F_IP_CSUM */ - descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; - } -} - -/** * spider_net_prepare_tx_descr - fill tx descriptor with skb data * @card: card structure * @descr: descriptor structure to fill out @@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, */ static int spider_net_prepare_tx_descr(struct spider_net_card *card, - struct spider_net_descr *descr, struct sk_buff *skb) { + struct spider_net_descr *descr = card->tx_chain.head; dma_addr_t buf; - buf = pci_map_single(card->pdev, skb->data, - skb->len, PCI_DMA_BIDIRECTIONAL); + buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (buf == DMA_ERROR_CODE) { if (netif_msg_tx_err(card) && net_ratelimit()) pr_err("could not iommu-map packet (%p, %i). " @@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, descr->buf_addr = buf; descr->buf_size = skb->len; + descr->next_descr_addr = 0; descr->skb = skb; descr->data_status = 0; - spider_net_set_txdescr_cmdstat(descr,skb); + descr->dmac_cmd_status = + SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; + if (skb->protocol == htons(ETH_P_IP)) + switch (skb->nh.iph->protocol) { + case IPPROTO_TCP: + descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; + break; + case IPPROTO_UDP: + descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; + break; + } + + descr->prev->next_descr_addr = descr->bus_addr; + + return 0; +} + +/** + * spider_net_release_tx_descr - processes a used tx descriptor + * @card: card structure + * @descr: descriptor to release + * + * releases a used tx descriptor (unmapping, freeing of skb) + */ +static inline void +spider_net_release_tx_descr(struct spider_net_card *card) +{ + struct spider_net_descr *descr = card->tx_chain.tail; + struct sk_buff *skb; + + card->tx_chain.tail = card->tx_chain.tail->next; + descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; + + /* unmap the skb */ + skb = descr->skb; + pci_unmap_single(card->pdev, descr->buf_addr, skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); +} + +/** + * spider_net_release_tx_chain - processes sent tx descriptors + * @card: adapter structure + * @brutal: if set, don't care about whether descriptor seems to be in use + * + * returns 0 if the tx ring is empty, otherwise 1. + * + * spider_net_release_tx_chain releases the tx descriptors that spider has + * finished with (if non-brutal) or simply release tx descriptors (if brutal). + * If some other context is calling this function, we return 1 so that we're + * scheduled again (if we were scheduled) and will not loose initiative. + */ +static int +spider_net_release_tx_chain(struct spider_net_card *card, int brutal) +{ + struct spider_net_descr_chain *chain = &card->tx_chain; + int status; + + spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR); + + while (chain->tail != chain->head) { + status = spider_net_get_descr_status(chain->tail); + switch (status) { + case SPIDER_NET_DESCR_COMPLETE: + card->netdev_stats.tx_packets++; + card->netdev_stats.tx_bytes += chain->tail->skb->len; + break; + + case SPIDER_NET_DESCR_CARDOWNED: + if (!brutal) + return 1; + /* fallthrough, if we release the descriptors + * brutally (then we don't care about + * SPIDER_NET_DESCR_CARDOWNED) */ + + case SPIDER_NET_DESCR_RESPONSE_ERROR: + case SPIDER_NET_DESCR_PROTECTION_ERROR: + case SPIDER_NET_DESCR_FORCE_END: + if (netif_msg_tx_err(card)) + pr_err("%s: forcing end of tx descriptor " + "with status x%02x\n", + card->netdev->name, status); + card->netdev_stats.tx_errors++; + break; + + default: + card->netdev_stats.tx_dropped++; + return 1; + } + spider_net_release_tx_descr(card); + } return 0; } @@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, * spider_net_kick_tx_dma writes the current tx chain head as start address * of the tx descriptor chain and enables the transmission DMA engine */ -static void -spider_net_kick_tx_dma(struct spider_net_card *card, - struct spider_net_descr *descr) +static inline void +spider_net_kick_tx_dma(struct spider_net_card *card) { - /* this is the only descriptor in the output chain. - * Enable TX DMA */ + struct spider_net_descr *descr; - spider_net_write_reg(card, SPIDER_NET_GDTDCHA, - descr->bus_addr); + if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) & + SPIDER_NET_TX_DMA_EN) + goto out; - spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, - SPIDER_NET_DMA_TX_VALUE); + descr = card->tx_chain.tail; + for (;;) { + if (spider_net_get_descr_status(descr) == + SPIDER_NET_DESCR_CARDOWNED) { + spider_net_write_reg(card, SPIDER_NET_GDTDCHA, + descr->bus_addr); + spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, + SPIDER_NET_DMA_TX_VALUE); + break; + } + if (descr == card->tx_chain.head) + break; + descr = descr->next; + } + +out: + mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); } /** @@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card, * @skb: packet to send out * @netdev: interface device structure * - * returns 0 on success, <0 on failure + * returns 0 on success, !0 on failure */ static int spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct spider_net_card *card = netdev_priv(netdev); - struct spider_net_descr *descr; + struct spider_net_descr_chain *chain = &card->tx_chain; + struct spider_net_descr *descr = chain->head; + unsigned long flags; int result; + spin_lock_irqsave(&chain->lock, flags); + spider_net_release_tx_chain(card, 0); - descr = spider_net_get_next_tx_descr(card); + if (chain->head->next == chain->tail->prev) { + card->netdev_stats.tx_dropped++; + result = NETDEV_TX_LOCKED; + goto out; + } - if (!descr) - goto error; + if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) { + result = NETDEV_TX_LOCKED; + goto out; + } - result = spider_net_prepare_tx_descr(card, descr, skb); - if (result) - goto error; + if (spider_net_prepare_tx_descr(card, skb) != 0) { + card->netdev_stats.tx_dropped++; + result = NETDEV_TX_BUSY; + goto out; + } + + result = NETDEV_TX_OK; + spider_net_kick_tx_dma(card); card->tx_chain.head = card->tx_chain.head->next; - if (spider_net_get_descr_status(descr->prev) != - SPIDER_NET_DESCR_CARDOWNED) { - /* make sure the current descriptor is in memory. Then - * kicking it on again makes sense, if the previous is not - * card-owned anymore. Check the previous descriptor twice - * to omit an mb() in heavy traffic cases */ - mb(); - if (spider_net_get_descr_status(descr->prev) != - SPIDER_NET_DESCR_CARDOWNED) - spider_net_kick_tx_dma(card, descr); - } +out: + spin_unlock_irqrestore(&chain->lock, flags); + netif_wake_queue(netdev); + return result; +} - mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); +/** + * spider_net_cleanup_tx_ring - cleans up the TX ring + * @card: card structure + * + * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use + * interrupts to cleanup our TX ring) and returns sent packets to the stack + * by freeing them + */ +static void +spider_net_cleanup_tx_ring(struct spider_net_card *card) +{ + unsigned long flags; - return NETDEV_TX_OK; + spin_lock_irqsave(&card->tx_chain.lock, flags); -error: - card->netdev_stats.tx_dropped++; - return NETDEV_TX_BUSY; + if ((spider_net_release_tx_chain(card, 0) != 0) && + (card->netdev->flags & IFF_UP)) + spider_net_kick_tx_dma(card); + + spin_unlock_irqrestore(&card->tx_chain.lock, flags); } /** @@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, /* unmap descriptor */ pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, - PCI_DMA_BIDIRECTIONAL); + PCI_DMA_FROMDEVICE); /* the cases we'll throw away the packet immediately */ if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { @@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, static int spider_net_decode_one_descr(struct spider_net_card *card, int napi) { - enum spider_net_descr_status status; - struct spider_net_descr *descr; - struct spider_net_descr_chain *chain; + struct spider_net_descr_chain *chain = &card->rx_chain; + struct spider_net_descr *descr = chain->tail; + int status; int result; - chain = &card->rx_chain; - descr = chain->tail; - status = spider_net_get_descr_status(descr); if (status == SPIDER_NET_DESCR_CARDOWNED) { @@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) card->netdev->name, status); card->netdev_stats.rx_dropped++; pci_unmap_single(card->pdev, descr->buf_addr, - SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); + SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); dev_kfree_skb_irq(descr->skb); goto refill; } @@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) /* ok, we've got a packet in descr */ result = spider_net_pass_skb_up(descr, card, napi); refill: - spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); + descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; /* change the descriptor state: */ if (!napi) spider_net_refill_rx_chain(card); @@ -1291,21 +1191,6 @@ spider_net_set_mac(struct net_device *netdev, void *p) } /** - * spider_net_enable_txdmac - enables a TX DMA controller - * @card: card structure - * - * spider_net_enable_txdmac enables the TX DMA controller by setting the - * descriptor chain tail address - */ -static void -spider_net_enable_txdmac(struct spider_net_card *card) -{ - /* assume chain is aligned correctly */ - spider_net_write_reg(card, SPIDER_NET_GDTDCHA, - card->tx_chain.tail->bus_addr); -} - -/** * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt * @card: card structure * @@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card) { SPIDER_NET_GMRWOLCTRL, 0 }, { SPIDER_NET_GTESTMD, 0x10000000 }, { SPIDER_NET_GTTQMSK, 0x00400040 }, - { SPIDER_NET_GTESTMD, 0 }, { SPIDER_NET_GMACINTEN, 0 }, @@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card) spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); - /* set chain tail adress for TX chain */ - spider_net_enable_txdmac(card); - spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, SPIDER_NET_LENLMT_VALUE); spider_net_write_reg(card, SPIDER_NET_GMACMODE, @@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card) SPIDER_NET_INT1_MASK_VALUE); spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, SPIDER_NET_INT2_MASK_VALUE); + + spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, + SPIDER_NET_GDTDCEIDIS); } /** @@ -1728,10 +1612,12 @@ spider_net_open(struct net_device *netdev) result = -ENOMEM; if (spider_net_init_chain(card, &card->tx_chain, - card->descr, tx_descriptors)) + card->descr, + PCI_DMA_TODEVICE, tx_descriptors)) goto alloc_tx_failed; if (spider_net_init_chain(card, &card->rx_chain, - card->descr + tx_descriptors, rx_descriptors)) + card->descr + tx_descriptors, + PCI_DMA_FROMDEVICE, rx_descriptors)) goto alloc_rx_failed; /* allocate rx skbs */ @@ -1938,7 +1824,7 @@ spider_net_workaround_rxramfull(struct spider_net_card *card) /* empty sequencer data */ for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; sequencer++) { - spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + + spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + sequencer * 8, 0x0); for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + @@ -1955,6 +1841,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card) } /** + * spider_net_stop - called upon ifconfig down + * @netdev: interface device structure + * + * always returns 0 + */ +int +spider_net_stop(struct net_device *netdev) +{ + struct spider_net_card *card = netdev_priv(netdev); + + tasklet_kill(&card->rxram_full_tl); + netif_poll_disable(netdev); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + del_timer_sync(&card->tx_timer); + + /* disable/mask all interrupts */ + spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); + spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); + spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); + + /* free_irq(netdev->irq, netdev);*/ + free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); + + spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, + SPIDER_NET_DMA_TX_FEND_VALUE); + + /* turn off DMA, force end */ + spider_net_disable_rxdmac(card); + + /* release chains */ + if (spin_trylock(&card->tx_chain.lock)) { + spider_net_release_tx_chain(card, 1); + spin_unlock(&card->tx_chain.lock); + } + + spider_net_free_chain(card, &card->tx_chain); + spider_net_free_chain(card, &card->rx_chain); + + return 0; +} + +/** * spider_net_tx_timeout_task - task scheduled by the watchdog timeout * function (to be called not under interrupt status) * @data: data, is interface device structure @@ -1982,7 +1911,7 @@ spider_net_tx_timeout_task(void *data) goto out; spider_net_open(netdev); - spider_net_kick_tx_dma(card, card->tx_chain.head); + spider_net_kick_tx_dma(card); netif_device_attach(netdev); out: @@ -2065,7 +1994,6 @@ spider_net_setup_netdev(struct spider_net_card *card) pci_set_drvdata(card->pdev, netdev); - atomic_set(&card->tx_chain_release,0); card->rxram_full_tl.data = (unsigned long) card; card->rxram_full_tl.func = (void (*)(unsigned long)) spider_net_handle_rxram_full; @@ -2079,7 +2007,7 @@ spider_net_setup_netdev(struct spider_net_card *card) spider_net_setup_netdev_ops(netdev); - netdev->features = NETIF_F_HW_CSUM; + netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | * NETIF_F_HW_VLAN_FILTER */ diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index 3b8d951cf73c..f6dcf180ae3d 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h @@ -208,7 +208,10 @@ extern char spider_net_driver_name[]; #define SPIDER_NET_DMA_RX_VALUE 0x80000000 #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 /* to set TX_DMA_EN */ -#define SPIDER_NET_DMA_TX_VALUE 0x80000000 +#define SPIDER_NET_TX_DMA_EN 0x80000000 +#define SPIDER_NET_GDTDCEIDIS 0x00000002 +#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ + SPIDER_NET_GDTDCEIDIS #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ @@ -329,55 +332,23 @@ enum spider_net_int2_status { (~SPIDER_NET_TXINT) & \ (~SPIDER_NET_RXINT) ) -#define SPIDER_NET_GPREXEC 0x80000000 -#define SPIDER_NET_GPRDAT_MASK 0x0000ffff +#define SPIDER_NET_GPREXEC 0x80000000 +#define SPIDER_NET_GPRDAT_MASK 0x0000ffff -/* descriptor bits - * - * 1010 descriptor ready - * 0 descr in middle of chain - * 000 fixed to 0 - * - * 0 no interrupt on completion - * 000 fixed to 0 - * 1 no ipsec processing - * 1 last descriptor for this frame - * 00 no checksum - * 10 tcp checksum - * 11 udp checksum - * - * 00 fixed to 0 - * 0 fixed to 0 - * 0 no interrupt on response errors - * 0 no interrupt on invalid descr - * 0 no interrupt on dma process termination - * 0 no interrupt on descr chain end - * 0 no interrupt on descr complete - * - * 000 fixed to 0 - * 0 response error interrupt status - * 0 invalid descr status - * 0 dma termination status - * 0 descr chain end status - * 0 descr complete status */ -#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000 -#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000 -#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000 -#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28 -#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff - -/* descr ready, descr is in middle of chain, get interrupt on completion */ -#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 - -enum spider_net_descr_status { - SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ - SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ - SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */ - SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */ - SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */ - SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */ - SPIDER_NET_DESCR_NOT_IN_USE /* any other value */ -}; +#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000 +#define SPIDER_NET_DMAC_NOCS 0x00040000 +#define SPIDER_NET_DMAC_TCP 0x00020000 +#define SPIDER_NET_DMAC_UDP 0x00030000 +#define SPIDER_NET_TXDCEST 0x08000000 + +#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000 +#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */ +#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 struct spider_net_descr { /* as defined by the hardware */ @@ -398,7 +369,7 @@ struct spider_net_descr { } __attribute__((aligned(32))); struct spider_net_descr_chain { - /* we walk from tail to head */ + spinlock_t lock; struct spider_net_descr *head; struct spider_net_descr *tail; }; @@ -453,8 +424,6 @@ struct spider_net_card { struct spider_net_descr_chain tx_chain; struct spider_net_descr_chain rx_chain; - atomic_t rx_chain_refill; - atomic_t tx_chain_release; struct net_device_stats netdev_stats; diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 8673fd4c08c7..c6f5bc3c042f 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -3255,12 +3255,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev) } static struct pci_device_id happymeal_pci_ids[] = { - { - .vendor = PCI_VENDOR_ID_SUN, - .device = PCI_DEVICE_ID_SUN_HAPPYMEAL, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, + { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, { } /* Terminating entry */ }; @@ -3275,7 +3270,7 @@ static struct pci_driver hme_pci_driver = { static int __init happy_meal_pci_init(void) { - return pci_module_init(&hme_pci_driver); + return pci_register_driver(&hme_pci_driver); } static void happy_meal_pci_exit(void) diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 1ef9fd39a79a..0e3fdf7c6dd3 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void) { if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || (idprom->id_machtype == (SM_SUN4|SM_4_470))) { - memset(&sun4_sdev, 0, sizeof(sdev)); + memset(&sun4_sdev, 0, sizeof(struct sbus_dev)); sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; sun4_sdev.irqs[0] = 6; return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); @@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void) static int __exit sunlance_sun4_remove(void) { - struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); + struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); struct net_device *net_dev = lp->dev; unregister_netdevice(net_dev); - lance_free_hwresources(root_lance_dev); + lance_free_hwresources(lp); free_netdev(net_dev); - dev_set_drvdata(&sun4_sdev->dev, NULL); + dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL); return 0; } diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ce6f3be86da0..1b8138f641e3 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -68,8 +68,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.62" -#define DRV_MODULE_RELDATE "June 30, 2006" +#define DRV_MODULE_VERSION "3.63" +#define DRV_MODULE_RELDATE "July 25, 2006" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, static int tg3_init_hw(struct tg3 *, int); static int tg3_halt(struct tg3 *, int, int); +/* Restart hardware after configuration changes, self-test, etc. + * Invoked with tp->lock held. + */ +static int tg3_restart_hw(struct tg3 *tp, int reset_phy) +{ + int err; + + err = tg3_init_hw(tp, reset_phy); + if (err) { + printk(KERN_ERR PFX "%s: Failed to re-initialize device, " + "aborting.\n", tp->dev->name); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); + del_timer_sync(&tp->timer); + tp->irq_sync = 0; + netif_poll_enable(tp->dev); + dev_close(tp->dev); + tg3_full_lock(tp, 0); + } + return err; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void tg3_poll_controller(struct net_device *dev) { @@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data) } tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); - tg3_init_hw(tp, 1); + if (tg3_init_hw(tp, 1)) + goto out; tg3_netif_start(tp); if (restart_timer) mod_timer(&tp->timer, jiffies + 1); +out: tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; tg3_full_unlock(tp); @@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, static int tg3_change_mtu(struct net_device *dev, int new_mtu) { struct tg3 *tp = netdev_priv(dev); + int err; if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) return -EINVAL; @@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) tg3_set_mtu(dev, tp, new_mtu); - tg3_init_hw(tp, 0); + err = tg3_restart_hw(tp, 0); - tg3_netif_start(tp); + if (!err) + tg3_netif_start(tp); tg3_full_unlock(tp); - return 0; + return err; } /* Free up pending packets in all rx/tx rings. @@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp) * end up in the driver. tp->{tx,}lock are held and thus * we may not sleep. */ -static void tg3_init_rings(struct tg3 *tp) +static int tg3_init_rings(struct tg3 *tp) { u32 i; @@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp) /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, - -1, i) < 0) + if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { + printk(KERN_WARNING PFX + "%s: Using a smaller RX standard ring, " + "only %d out of %d buffers were allocated " + "successfully.\n", + tp->dev->name, i, tp->rx_pending); + if (i == 0) + return -ENOMEM; + tp->rx_pending = i; break; + } } if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { for (i = 0; i < tp->rx_jumbo_pending; i++) { if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, - -1, i) < 0) + -1, i) < 0) { + printk(KERN_WARNING PFX + "%s: Using a smaller RX jumbo ring, " + "only %d out of %d buffers were " + "allocated successfully.\n", + tp->dev->name, i, tp->rx_jumbo_pending); + if (i == 0) { + tg3_free_rings(tp); + return -ENOMEM; + } + tp->rx_jumbo_pending = i; break; + } } } + return 0; } /* @@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) { struct tg3 *tp = netdev_priv(dev); struct sockaddr *addr = p; + int err = 0; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; @@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) tg3_full_lock(tp, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_init_hw(tp, 0); - - tg3_netif_start(tp); + err = tg3_restart_hw(tp, 0); + if (!err) + tg3_netif_start(tp); tg3_full_unlock(tp); } else { spin_lock_bh(&tp->lock); @@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) spin_unlock_bh(&tp->lock); } - return 0; + return err; } /* tp->lock is held. */ @@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) * can only do this after the hardware has been * successfully reset. */ - tg3_init_rings(tp); + err = tg3_init_rings(tp); + if (err) + return err; /* This value is determined during the probe time DMA * engine test, tg3_test_dma. @@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct tg3 *tp = netdev_priv(dev); - int irq_sync = 0; + int irq_sync = 0, err = 0; if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || @@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_init_hw(tp, 1); - tg3_netif_start(tp); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); } tg3_full_unlock(tp); - return 0; + return err; } static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) @@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct tg3 *tp = netdev_priv(dev); - int irq_sync = 0; + int irq_sync = 0, err = 0; if (netif_running(dev)) { tg3_netif_stop(tp); @@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_init_hw(tp, 1); - tg3_netif_start(tp); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); } tg3_full_unlock(tp); - return 0; + return err; } static u32 tg3_get_rx_csum(struct net_device *dev) @@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp) if (!netif_running(tp->dev)) return TG3_LOOPBACK_FAILED; - tg3_reset_hw(tp, 1); + err = tg3_reset_hw(tp, 1); + if (err) + return TG3_LOOPBACK_FAILED; if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) err |= TG3_MAC_LOOPBACK_FAILED; @@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); if (netif_running(dev)) { tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; - tg3_init_hw(tp, 1); - tg3_netif_start(tp); + if (!tg3_restart_hw(tp, 1)) + tg3_netif_start(tp); } tg3_full_unlock(tp); @@ -11699,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) tg3_full_lock(tp, 0); tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; - tg3_init_hw(tp, 1); + if (tg3_restart_hw(tp, 1)) + goto out; tp->timer.expires = jiffies + tp->timer_offset; add_timer(&tp->timer); @@ -11707,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_attach(dev); tg3_netif_start(tp); +out: tg3_full_unlock(tp); } @@ -11733,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev) tg3_full_lock(tp, 0); tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; - tg3_init_hw(tp, 1); + err = tg3_restart_hw(tp, 1); + if (err) + goto out; tp->timer.expires = jiffies + tp->timer_offset; add_timer(&tp->timer); tg3_netif_start(tp); +out: tg3_full_unlock(tp); - return 0; + return err; } static struct pci_driver tg3_driver = { diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index f5b0078eb4ad..aa9cd92f46b2 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -2742,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs) if (PHYSR0 & PHYSR0_SPDG) status |= VELOCITY_SPEED_1000; - if (PHYSR0 & PHYSR0_SPD10) + else if (PHYSR0 & PHYSR0_SPD10) status |= VELOCITY_SPEED_10; else status |= VELOCITY_SPEED_100; @@ -2851,8 +2851,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd u32 status; status = check_connection_type(vptr->mac_regs); - cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; - if (status & VELOCITY_SPEED_100) + cmd->supported = SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + if (status & VELOCITY_SPEED_1000) + cmd->speed = SPEED_1000; + else if (status & VELOCITY_SPEED_100) cmd->speed = SPEED_100; else cmd->speed = SPEED_10; @@ -2896,7 +2905,7 @@ static u32 velocity_get_link(struct net_device *dev) { struct velocity_info *vptr = netdev_priv(dev); struct mac_regs __iomem * regs = vptr->mac_regs; - return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 0 : 1; + return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0; } static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 2c09ec908a3f..435e91ec4620 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -197,7 +197,6 @@ static int c101_open(struct net_device *dev) sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); set_carrier(port); - printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port)); /* enable MSCI1 CDCD interrupt */ sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); @@ -449,4 +448,5 @@ module_exit(c101_cleanup); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("Moxa C101 serial port driver"); MODULE_LICENSE("GPL v2"); -module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */ +module_param(hw, charp, 0444); +MODULE_PARM_DESC(hw, "irq,ram:irq,..."); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index b81263eaede0..fbaab5bf71eb 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -107,6 +107,7 @@ int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr) dev->hard_header = NULL; dev->type = ARPHRD_PPP; dev->addr_len = 0; + netif_dormant_off(dev); return 0; } diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 9456d31cb1c1..f15aa6ba77f1 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c @@ -82,6 +82,7 @@ int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr) dev->type = ARPHRD_RAWHDLC; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->addr_len = 0; + netif_dormant_off(dev); return 0; } diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index b1285cc8fee6..d1884987f94e 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -100,6 +100,7 @@ int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) dev->tx_queue_len = old_qlen; memcpy(dev->dev_addr, "\x00\x01", 2); get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); + netif_dormant_off(dev); return 0; } diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 07e5eef1fe0f..a867fb411f89 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -212,6 +212,7 @@ int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr) dev->hard_header = NULL; dev->type = ARPHRD_X25; dev->addr_len = 0; + netif_dormant_off(dev); return 0; } diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index e013b817cab8..dcf46add3adf 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c @@ -564,4 +564,5 @@ module_exit(n2_cleanup); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("RISCom/N2 serial port driver"); MODULE_LICENSE("GPL v2"); -module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */ +module_param(hw, charp, 0444); +MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,..."); diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index fa9d2c4edc93..2e8ac995d56f 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -447,6 +447,7 @@ config AIRO_CS tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) select CRYPTO + select CRYPTO_AES ---help--- This is the standard Linux driver to support Cisco/Aironet PCMCIA 802.11 wireless cards. This driver is the same as the Aironet diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 3889f79e7128..df317c1e12a8 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -3701,7 +3701,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev, } if (sec->flags & SEC_AUTH_MODE) { secinfo->auth_mode = sec->auth_mode; - dprintk(", .auth_mode = %d\n", sec->auth_mode); + dprintk(", .auth_mode = %d", sec->auth_mode); } dprintk("\n"); if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index d6ed5781b93a..317ace7f9aae 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, if (orinoco_lock(priv, &flags) != 0) return -EBUSY; - if (erq->pointer) { + if (erq->length > 0) { if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) index = priv->tx_key; @@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, if (erq->flags & IW_ENCODE_RESTRICTED) restricted = 1; - if (erq->pointer) { + if (erq->pointer && erq->length > 0) { priv->keys[index].len = cpu_to_le16(xlen); memset(priv->keys[index].data, 0, sizeof(priv->keys[index].data)); diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 662ecc8a33ff..c52e9bcf8d02 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c @@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface, zd->dev->name); usb_set_intfdata(interface, zd); + zd1201_enable(zd); /* zd1201 likes to startup enabled, */ + zd1201_disable(zd); /* interfering with all the wifis in range */ return 0; err_net: diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index f5b9f187a930..7ff1d88094b6 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -121,6 +121,16 @@ config RTC_DRV_DS1553 This driver can also be built as a module. If so, the module will be called rtc-ds1553. +config RTC_DRV_ISL1208 + tristate "Intersil 1208" + depends on RTC_CLASS && I2C + help + If you say yes here you get support for the + Intersil 1208 RTC chip. + + This driver can also be built as a module. If so, the module + will be called rtc-isl1208. + config RTC_DRV_DS1672 tristate "Dallas/Maxim DS1672" depends on RTC_CLASS && I2C diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 54220714ff49..bbcfb09d81d9 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o obj-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o +obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c new file mode 100644 index 000000000000..f324d0a635d4 --- /dev/null +++ b/drivers/rtc/rtc-isl1208.c @@ -0,0 +1,591 @@ +/* + * Intersil ISL1208 rtc class driver + * + * Copyright 2005,2006 Hebert Valerio Riedel <hvr@gnu.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/bcd.h> +#include <linux/rtc.h> + +#define DRV_NAME "isl1208" +#define DRV_VERSION "0.2" + +/* Register map */ +/* rtc section */ +#define ISL1208_REG_SC 0x00 +#define ISL1208_REG_MN 0x01 +#define ISL1208_REG_HR 0x02 +#define ISL1208_REG_HR_MIL (1<<7) /* 24h/12h mode */ +#define ISL1208_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */ +#define ISL1208_REG_DT 0x03 +#define ISL1208_REG_MO 0x04 +#define ISL1208_REG_YR 0x05 +#define ISL1208_REG_DW 0x06 +#define ISL1208_RTC_SECTION_LEN 7 + +/* control/status section */ +#define ISL1208_REG_SR 0x07 +#define ISL1208_REG_SR_ARST (1<<7) /* auto reset */ +#define ISL1208_REG_SR_XTOSCB (1<<6) /* crystal oscillator */ +#define ISL1208_REG_SR_WRTC (1<<4) /* write rtc */ +#define ISL1208_REG_SR_ALM (1<<2) /* alarm */ +#define ISL1208_REG_SR_BAT (1<<1) /* battery */ +#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */ +#define ISL1208_REG_INT 0x08 +#define ISL1208_REG_09 0x09 /* reserved */ +#define ISL1208_REG_ATR 0x0a +#define ISL1208_REG_DTR 0x0b + +/* alarm section */ +#define ISL1208_REG_SCA 0x0c +#define ISL1208_REG_MNA 0x0d +#define ISL1208_REG_HRA 0x0e +#define ISL1208_REG_DTA 0x0f +#define ISL1208_REG_MOA 0x10 +#define ISL1208_REG_DWA 0x11 +#define ISL1208_ALARM_SECTION_LEN 6 + +/* user section */ +#define ISL1208_REG_USR1 0x12 +#define ISL1208_REG_USR2 0x13 +#define ISL1208_USR_SECTION_LEN 2 + +/* i2c configuration */ +#define ISL1208_I2C_ADDR 0xde + +static unsigned short normal_i2c[] = { + ISL1208_I2C_ADDR>>1, I2C_CLIENT_END +}; +I2C_CLIENT_INSMOD; /* defines addr_data */ + +static int isl1208_attach_adapter(struct i2c_adapter *adapter); +static int isl1208_detach_client(struct i2c_client *client); + +static struct i2c_driver isl1208_driver = { + .driver = { + .name = DRV_NAME, + }, + .id = I2C_DRIVERID_ISL1208, + .attach_adapter = &isl1208_attach_adapter, + .detach_client = &isl1208_detach_client, +}; + +/* block read */ +static int +isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[], + unsigned len) +{ + u8 reg_addr[1] = { reg }; + struct i2c_msg msgs[2] = { + { client->addr, client->flags, sizeof(reg_addr), reg_addr }, + { client->addr, client->flags | I2C_M_RD, len, buf } + }; + int ret; + + BUG_ON(len == 0); + BUG_ON(reg > ISL1208_REG_USR2); + BUG_ON(reg + len > ISL1208_REG_USR2 + 1); + + ret = i2c_transfer(client->adapter, msgs, 2); + if (ret > 0) + ret = 0; + return ret; +} + +/* block write */ +static int +isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[], + unsigned len) +{ + u8 i2c_buf[ISL1208_REG_USR2 + 2]; + struct i2c_msg msgs[1] = { + { client->addr, client->flags, len + 1, i2c_buf } + }; + int ret; + + BUG_ON(len == 0); + BUG_ON(reg > ISL1208_REG_USR2); + BUG_ON(reg + len > ISL1208_REG_USR2 + 1); + + i2c_buf[0] = reg; + memcpy(&i2c_buf[1], &buf[0], len); + + ret = i2c_transfer(client->adapter, msgs, 1); + if (ret > 0) + ret = 0; + return ret; +} + +/* simple check to see wether we have a isl1208 */ +static int isl1208_i2c_validate_client(struct i2c_client *client) +{ + u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; + u8 zero_mask[ISL1208_RTC_SECTION_LEN] = { + 0x80, 0x80, 0x40, 0xc0, 0xe0, 0x00, 0xf8 + }; + int i; + int ret; + + ret = isl1208_i2c_read_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN); + if (ret < 0) + return ret; + + for (i = 0; i < ISL1208_RTC_SECTION_LEN; ++i) { + if (regs[i] & zero_mask[i]) /* check if bits are cleared */ + return -ENODEV; + } + + return 0; +} + +static int isl1208_i2c_get_sr(struct i2c_client *client) +{ + return i2c_smbus_read_byte_data(client, ISL1208_REG_SR) == -1 ? -EIO:0; +} + +static int isl1208_i2c_get_atr(struct i2c_client *client) +{ + int atr = i2c_smbus_read_byte_data(client, ISL1208_REG_ATR); + + if (atr < 0) + return -EIO; + + /* The 6bit value in the ATR register controls the load + * capacitance C_load * in steps of 0.25pF + * + * bit (1<<5) of the ATR register is inverted + * + * C_load(ATR=0x20) = 4.50pF + * C_load(ATR=0x00) = 12.50pF + * C_load(ATR=0x1f) = 20.25pF + * + */ + + atr &= 0x3f; /* mask out lsb */ + atr ^= 1<<5; /* invert 6th bit */ + atr += 2*9; /* add offset of 4.5pF; unit[atr] = 0.25pF */ + + return atr; +} + +static int isl1208_i2c_get_dtr(struct i2c_client *client) +{ + int dtr = i2c_smbus_read_byte_data(client, ISL1208_REG_DTR); + + if (dtr < 0) + return -EIO; + + /* dtr encodes adjustments of {-60,-40,-20,0,20,40,60} ppm */ + dtr = ((dtr & 0x3) * 20) * (dtr & (1<<2) ? -1 : 1); + + return dtr; +} + +static int isl1208_i2c_get_usr(struct i2c_client *client) +{ + u8 buf[ISL1208_USR_SECTION_LEN] = { 0, }; + int ret; + + ret = isl1208_i2c_read_regs (client, ISL1208_REG_USR1, buf, + ISL1208_USR_SECTION_LEN); + if (ret < 0) + return ret; + + return (buf[1] << 8) | buf[0]; +} + +static int isl1208_i2c_set_usr(struct i2c_client *client, u16 usr) +{ + u8 buf[ISL1208_USR_SECTION_LEN]; + + buf[0] = usr & 0xff; + buf[1] = (usr >> 8) & 0xff; + + return isl1208_i2c_set_regs (client, ISL1208_REG_USR1, buf, + ISL1208_USR_SECTION_LEN); +} + +static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq) +{ + struct i2c_client *const client = to_i2c_client(dev); + int sr, dtr, atr, usr; + + sr = isl1208_i2c_get_sr(client); + if (sr < 0) { + dev_err(&client->dev, "%s: reading SR failed\n", __func__); + return sr; + } + + seq_printf(seq, "status_reg\t:%s%s%s%s%s%s (0x%.2x)\n", + (sr & ISL1208_REG_SR_RTCF) ? " RTCF" : "", + (sr & ISL1208_REG_SR_BAT) ? " BAT" : "", + (sr & ISL1208_REG_SR_ALM) ? " ALM" : "", + (sr & ISL1208_REG_SR_WRTC) ? " WRTC" : "", + (sr & ISL1208_REG_SR_XTOSCB) ? " XTOSCB" : "", + (sr & ISL1208_REG_SR_ARST) ? " ARST" : "", + sr); + + seq_printf(seq, "batt_status\t: %s\n", + (sr & ISL1208_REG_SR_RTCF) ? "bad" : "okay"); + + dtr = isl1208_i2c_get_dtr(client); + if (dtr >= 0 -1) + seq_printf(seq, "digital_trim\t: %d ppm\n", dtr); + + atr = isl1208_i2c_get_atr(client); + if (atr >= 0) + seq_printf(seq, "analog_trim\t: %d.%.2d pF\n", + atr>>2, (atr&0x3)*25); + + usr = isl1208_i2c_get_usr(client); + if (usr >= 0) + seq_printf(seq, "user_data\t: 0x%.4x\n", usr); + + return 0; +} + + +static int isl1208_i2c_read_time(struct i2c_client *client, + struct rtc_time *tm) +{ + int sr; + u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; + + sr = isl1208_i2c_get_sr(client); + if (sr < 0) { + dev_err(&client->dev, "%s: reading SR failed\n", __func__); + return -EIO; + } + + sr = isl1208_i2c_read_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN); + if (sr < 0) { + dev_err(&client->dev, "%s: reading RTC section failed\n", + __func__); + return sr; + } + + tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SC]); + tm->tm_min = BCD2BIN(regs[ISL1208_REG_MN]); + { /* HR field has a more complex interpretation */ + const u8 _hr = regs[ISL1208_REG_HR]; + if (_hr & ISL1208_REG_HR_MIL) /* 24h format */ + tm->tm_hour = BCD2BIN(_hr & 0x3f); + else { // 12h format + tm->tm_hour = BCD2BIN(_hr & 0x1f); + if (_hr & ISL1208_REG_HR_PM) /* PM flag set */ + tm->tm_hour += 12; + } + } + + tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DT]); + tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MO]) - 1; /* rtc starts at 1 */ + tm->tm_year = BCD2BIN(regs[ISL1208_REG_YR]) + 100; + tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DW]); + + return 0; +} + +static int isl1208_i2c_read_alarm(struct i2c_client *client, + struct rtc_wkalrm *alarm) +{ + struct rtc_time *const tm = &alarm->time; + u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, }; + int sr; + + sr = isl1208_i2c_get_sr(client); + if (sr < 0) { + dev_err(&client->dev, "%s: reading SR failed\n", __func__); + return sr; + } + + sr = isl1208_i2c_read_regs(client, ISL1208_REG_SCA, regs, + ISL1208_ALARM_SECTION_LEN); + if (sr < 0) { + dev_err(&client->dev, "%s: reading alarm section failed\n", + __func__); + return sr; + } + + /* MSB of each alarm register is an enable bit */ + tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SCA-ISL1208_REG_SCA] & 0x7f); + tm->tm_min = BCD2BIN(regs[ISL1208_REG_MNA-ISL1208_REG_SCA] & 0x7f); + tm->tm_hour = BCD2BIN(regs[ISL1208_REG_HRA-ISL1208_REG_SCA] & 0x3f); + tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DTA-ISL1208_REG_SCA] & 0x3f); + tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MOA-ISL1208_REG_SCA] & 0x1f)-1; + tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DWA-ISL1208_REG_SCA] & 0x03); + + return 0; +} + +static int isl1208_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + return isl1208_i2c_read_time(to_i2c_client(dev), tm); +} + +static int isl1208_i2c_set_time(struct i2c_client *client, + struct rtc_time const *tm) +{ + int sr; + u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; + + regs[ISL1208_REG_SC] = BIN2BCD(tm->tm_sec); + regs[ISL1208_REG_MN] = BIN2BCD(tm->tm_min); + regs[ISL1208_REG_HR] = BIN2BCD(tm->tm_hour) | ISL1208_REG_HR_MIL; + + regs[ISL1208_REG_DT] = BIN2BCD(tm->tm_mday); + regs[ISL1208_REG_MO] = BIN2BCD(tm->tm_mon + 1); + regs[ISL1208_REG_YR] = BIN2BCD(tm->tm_year - 100); + + regs[ISL1208_REG_DW] = BIN2BCD(tm->tm_wday & 7); + + sr = isl1208_i2c_get_sr(client); + if (sr < 0) { + dev_err(&client->dev, "%s: reading SR failed\n", __func__); + return sr; + } + + /* set WRTC */ + sr = i2c_smbus_write_byte_data (client, ISL1208_REG_SR, + sr | ISL1208_REG_SR_WRTC); + if (sr < 0) { + dev_err(&client->dev, "%s: writing SR failed\n", __func__); + return sr; + } + + /* write RTC registers */ + sr = isl1208_i2c_set_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN); + if (sr < 0) { + dev_err(&client->dev, "%s: writing RTC section failed\n", + __func__); + return sr; + } + + /* clear WRTC again */ + sr = i2c_smbus_write_byte_data (client, ISL1208_REG_SR, + sr & ~ISL1208_REG_SR_WRTC); + if (sr < 0) { + dev_err(&client->dev, "%s: writing SR failed\n", __func__); + return sr; + } + + return 0; +} + + +static int isl1208_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + return isl1208_i2c_set_time(to_i2c_client(dev), tm); +} + +static int isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm); +} + +static struct rtc_class_ops isl1208_rtc_ops = { + .proc = isl1208_rtc_proc, + .read_time = isl1208_rtc_read_time, + .set_time = isl1208_rtc_set_time, + .read_alarm = isl1208_rtc_read_alarm, + //.set_alarm = isl1208_rtc_set_alarm, +}; + +/* sysfs interface */ + +static ssize_t isl1208_sysfs_show_atrim(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int atr; + + atr = isl1208_i2c_get_atr(to_i2c_client(dev)); + if (atr < 0) + return atr; + + return sprintf(buf, "%d.%.2d pF\n", atr>>2, (atr&0x3)*25); +} +static DEVICE_ATTR(atrim, S_IRUGO, isl1208_sysfs_show_atrim, NULL); + +static ssize_t isl1208_sysfs_show_dtrim(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int dtr; + + dtr = isl1208_i2c_get_dtr(to_i2c_client(dev)); + if (dtr < 0) + return dtr; + + return sprintf(buf, "%d ppm\n", dtr); +} +static DEVICE_ATTR(dtrim, S_IRUGO, isl1208_sysfs_show_dtrim, NULL); + +static ssize_t isl1208_sysfs_show_usr(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int usr; + + usr = isl1208_i2c_get_usr(to_i2c_client(dev)); + if (usr < 0) + return usr; + + return sprintf(buf, "0x%.4x\n", usr); +} + +static ssize_t isl1208_sysfs_store_usr(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int usr = -1; + + if (buf[0] == '0' && (buf[1] == 'x' || buf[1] == 'X')) { + if (sscanf(buf, "%x", &usr) != 1) + return -EINVAL; + } else { + if (sscanf(buf, "%d", &usr) != 1) + return -EINVAL; + } + + if (usr < 0 || usr > 0xffff) + return -EINVAL; + + return isl1208_i2c_set_usr(to_i2c_client(dev), usr) ? -EIO : count; +} +static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr, + isl1208_sysfs_store_usr); + +static int +isl1208_probe(struct i2c_adapter *adapter, int addr, int kind) +{ + int rc = 0; + struct i2c_client *new_client = NULL; + struct rtc_device *rtc = NULL; + + if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { + rc = -ENODEV; + goto failout; + } + + new_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); + if (new_client == NULL) { + rc = -ENOMEM; + goto failout; + } + + new_client->addr = addr; + new_client->adapter = adapter; + new_client->driver = &isl1208_driver; + new_client->flags = 0; + strcpy(new_client->name, DRV_NAME); + + if (kind < 0) { + rc = isl1208_i2c_validate_client(new_client); + if (rc < 0) + goto failout; + } + + rc = i2c_attach_client(new_client); + if (rc < 0) + goto failout; + + dev_info(&new_client->dev, + "chip found, driver version " DRV_VERSION "\n"); + + rtc = rtc_device_register(isl1208_driver.driver.name, + &new_client->dev, + &isl1208_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc)) { + rc = PTR_ERR(rtc); + goto failout_detach; + } + + i2c_set_clientdata(new_client, rtc); + + rc = isl1208_i2c_get_sr(new_client); + if (rc < 0) { + dev_err(&new_client->dev, "reading status failed\n"); + goto failout_unregister; + } + + if (rc & ISL1208_REG_SR_RTCF) + dev_warn(&new_client->dev, "rtc power failure detected, " + "please set clock.\n"); + + rc = device_create_file(&new_client->dev, &dev_attr_atrim); + if (rc < 0) + goto failout_unregister; + rc = device_create_file(&new_client->dev, &dev_attr_dtrim); + if (rc < 0) + goto failout_atrim; + rc = device_create_file(&new_client->dev, &dev_attr_usr); + if (rc < 0) + goto failout_dtrim; + + return 0; + + failout_dtrim: + device_remove_file(&new_client->dev, &dev_attr_dtrim); + failout_atrim: + device_remove_file(&new_client->dev, &dev_attr_atrim); + failout_unregister: + rtc_device_unregister(rtc); + failout_detach: + i2c_detach_client(new_client); + failout: + kfree(new_client); + return rc; +} + +static int +isl1208_attach_adapter (struct i2c_adapter *adapter) +{ + return i2c_probe(adapter, &addr_data, isl1208_probe); +} + +static int +isl1208_detach_client(struct i2c_client *client) +{ + int rc; + struct rtc_device *const rtc = i2c_get_clientdata(client); + + if (rtc) + rtc_device_unregister(rtc); /* do we need to kfree? */ + + rc = i2c_detach_client(client); + if (rc) + return rc; + + kfree(client); + + return 0; +} + +/* module management */ + +static int __init isl1208_init(void) +{ + return i2c_add_driver(&isl1208_driver); +} + +static void __exit isl1208_exit(void) +{ + i2c_del_driver(&isl1208_driver); +} + +MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>"); +MODULE_DESCRIPTION("Intersil ISL1208 RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_init(isl1208_init); +module_exit(isl1208_exit); diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 4cd879cb9bdd..1140302ff11d 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -304,6 +304,7 @@ static int __init xpram_setup_sizes(unsigned long pages) { unsigned long mem_needed; unsigned long mem_auto; + unsigned long long size; int mem_auto_no; int i; @@ -321,9 +322,19 @@ static int __init xpram_setup_sizes(unsigned long pages) mem_needed = 0; mem_auto_no = 0; for (i = 0; i < xpram_devs; i++) { - if (sizes[i]) - xpram_sizes[i] = - (memparse(sizes[i], &sizes[i]) + 3) & -4UL; + if (sizes[i]) { + size = simple_strtoull(sizes[i], &sizes[i], 0); + switch (sizes[i][0]) { + case 'g': + case 'G': + size <<= 20; + break; + case 'm': + case 'M': + size <<= 10; + } + xpram_sizes[i] = (size + 3) & -4UL; + } if (xpram_sizes[i]) mem_needed += xpram_sizes[i]; else diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 95e285b2e25c..7a84014f2037 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -1106,10 +1106,10 @@ raw3270_delete_device(struct raw3270 *rp) /* Remove from device chain. */ mutex_lock(&raw3270_mutex); - if (rp->clttydev) + if (rp->clttydev && !IS_ERR(rp->clttydev)) class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); - if (rp->cltubdev) + if (rp->cltubdev && !IS_ERR(rp->cltubdev)) class_device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor)); list_del_init(&rp->list); @@ -1173,21 +1173,37 @@ static struct attribute_group raw3270_attr_group = { .attrs = raw3270_attrs, }; -static void -raw3270_create_attributes(struct raw3270 *rp) +static int raw3270_create_attributes(struct raw3270 *rp) { - //FIXME: check return code - sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); - rp->clttydev = - class_device_create(class3270, NULL, - MKDEV(IBM_TTY3270_MAJOR, rp->minor), - &rp->cdev->dev, "tty%s", - rp->cdev->dev.bus_id); - rp->cltubdev = - class_device_create(class3270, NULL, - MKDEV(IBM_FS3270_MAJOR, rp->minor), - &rp->cdev->dev, "tub%s", - rp->cdev->dev.bus_id); + int rc; + + rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); + if (rc) + goto out; + + rp->clttydev = class_device_create(class3270, NULL, + MKDEV(IBM_TTY3270_MAJOR, rp->minor), + &rp->cdev->dev, "tty%s", + rp->cdev->dev.bus_id); + if (IS_ERR(rp->clttydev)) { + rc = PTR_ERR(rp->clttydev); + goto out_ttydev; + } + + rp->cltubdev = class_device_create(class3270, NULL, + MKDEV(IBM_FS3270_MAJOR, rp->minor), + &rp->cdev->dev, "tub%s", + rp->cdev->dev.bus_id); + if (!IS_ERR(rp->cltubdev)) + goto out; + + rc = PTR_ERR(rp->cltubdev); + class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); + +out_ttydev: + sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group); +out: + return rc; } /* @@ -1255,7 +1271,9 @@ raw3270_set_online (struct ccw_device *cdev) rc = raw3270_reset_device(rp); if (rc) goto failure; - raw3270_create_attributes(rp); + rc = raw3270_create_attributes(rp); + if (rc) + goto failure; set_bit(RAW3270_FLAGS_READY, &rp->flags); mutex_lock(&raw3270_mutex); list_for_each_entry(np, &raw3270_notifier, list) diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index a5c68e60fcf4..643b6d078563 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c @@ -76,14 +76,22 @@ struct tape_class_device *register_tape_dev( device, "%s", tcd->device_name ); - sysfs_create_link( + rc = PTR_ERR(tcd->class_device); + if (rc) + goto fail_with_cdev; + rc = sysfs_create_link( &device->kobj, &tcd->class_device->kobj, tcd->mode_name ); + if (rc) + goto fail_with_class_device; return tcd; +fail_with_class_device: + class_device_destroy(tape_class, tcd->char_device->dev); + fail_with_cdev: cdev_del(tcd->char_device); diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 122b4d8965c3..2826aed91043 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -543,20 +543,24 @@ int tape_generic_probe(struct ccw_device *cdev) { struct tape_device *device; + int ret; device = tape_alloc_device(); if (IS_ERR(device)) return -ENODEV; - PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); + ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); + ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); + if (ret) { + tape_put_device(device); + PRINT_ERR("probe failed for tape device %s\n", cdev->dev.bus_id); + return ret; + } cdev->dev.driver_data = device; + cdev->handler = __tape_do_irq; device->cdev = cdev; device->cdev_id = busid_to_int(cdev->dev.bus_id); - cdev->handler = __tape_do_irq; - - ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); - sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); - - return 0; + PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); + return ret; } static inline void diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index f26a2ee3aad8..3cba6c9fab11 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -152,7 +152,6 @@ ccwgroup_create(struct device *root, struct ccwgroup_device *gdev; int i; int rc; - int del_drvdata; if (argc > 256) /* disallow dumb users */ return -EINVAL; @@ -163,7 +162,6 @@ ccwgroup_create(struct device *root, atomic_set(&gdev->onoff, 0); - del_drvdata = 0; for (i = 0; i < argc; i++) { gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); @@ -180,10 +178,8 @@ ccwgroup_create(struct device *root, rc = -EINVAL; goto free_dev; } - } - for (i = 0; i < argc; i++) gdev->cdev[i]->dev.driver_data = gdev; - del_drvdata = 1; + } gdev->creator_id = creator_id; gdev->count = argc; @@ -226,9 +222,9 @@ error: free_dev: for (i = 0; i < argc; i++) if (gdev->cdev[i]) { - put_device(&gdev->cdev[i]->dev); - if (del_drvdata) + if (gdev->cdev[i]->dev.driver_data == gdev) gdev->cdev[i]->dev.driver_data = NULL; + put_device(&gdev->cdev[i]->dev); } kfree(gdev); return rc; diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 0df3af1f08de..828b2d334f0a 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -1068,6 +1068,7 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, if (count) { interval = cmb_data->last_update - cdev->private->cmb_start_time; + interval = (interval * 1000) >> 12; interval /= count; } else interval = -1; diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index ac6e0c7e43d9..7a39e0b0386c 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) if (cdev->private->iretry) { cdev->private->iretry--; ret = cio_halt(sch); - return (ret == 0) ? -EBUSY : ret; + if (ret != -EBUSY) + return (ret == 0) ? -EBUSY : ret; } /* halt io unsuccessful. */ cdev->private->iretry = 255; /* 255 clear retries. */ diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 20c8eb16f464..8a4b58120146 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -2686,9 +2686,17 @@ static struct attribute_group ctc_attr_group = { static int ctc_add_attributes(struct device *dev) { - device_create_file(dev, &dev_attr_loglevel); - device_create_file(dev, &dev_attr_stats); - return 0; + int rc; + + rc = device_create_file(dev, &dev_attr_loglevel); + if (rc) + goto out; + rc = device_create_file(dev, &dev_attr_stats); + if (!rc) + goto out; + device_remove_file(dev, &dev_attr_loglevel); +out: + return rc; } static void @@ -2901,7 +2909,12 @@ ctc_new_device(struct ccwgroup_device *cgdev) goto out; } - ctc_add_attributes(&cgdev->dev); + if (ctc_add_attributes(&cgdev->dev)) { + ctc_netdev_unregister(dev); + dev->priv = NULL; + ctc_free_netdevice(dev, 1); + goto out; + } strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name)); diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 103c41470bd2..5fff1f93973a 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -8451,10 +8451,11 @@ __qeth_reboot_event_card(struct device *dev, void *data) static int qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { + int ret; - driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL, - __qeth_reboot_event_card); - return NOTIFY_DONE; + ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL, + __qeth_reboot_event_card); + return ret ? NOTIFY_BAD : NOTIFY_DONE; } diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c index 16b59773c0bb..935952ef88f1 100644 --- a/drivers/sbus/sbus.c +++ b/drivers/sbus/sbus.c @@ -233,7 +233,7 @@ static void __init build_one_sbus(struct device_node *dp, int num_sbus) sbus->ofdev.node = dp; sbus->ofdev.dev.parent = NULL; sbus->ofdev.dev.bus = &sbus_bus_type; - strcpy(sbus->ofdev.dev.bus_id, dp->path_component_name); + sprintf(sbus->ofdev.dev.bus_id, "sbus%d", num_sbus); if (of_device_register(&sbus->ofdev) != 0) printk(KERN_DEBUG "sbus: device registration error for %s!\n", diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c index c690c2b89e41..acf292736b4e 100644 --- a/drivers/scsi/53c7xx.c +++ b/drivers/scsi/53c7xx.c @@ -3451,12 +3451,12 @@ create_cmd (Scsi_Cmnd *cmd) { for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4, cmd_dataout += 4, ++i) { u32 vbuf = cmd->use_sg - ? (u32)page_address(((struct scatterlist *)cmd->buffer)[i].page)+ - ((struct scatterlist *)cmd->buffer)[i].offset + ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+ + ((struct scatterlist *)cmd->request_buffer)[i].offset : (u32)(cmd->request_buffer); u32 bbuf = virt_to_bus((void *)vbuf); u32 count = cmd->use_sg ? - ((struct scatterlist *)cmd->buffer)[i].length : + ((struct scatterlist *)cmd->request_buffer)[i].length : cmd->request_bufflen; /* @@ -5417,7 +5417,7 @@ insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) { if ((buffers = cmd->use_sg)) { for (offset = 0, - segment = (struct scatterlist *) cmd->buffer; + segment = (struct scatterlist *) cmd->request_buffer; buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) && (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length))))); --buffers, offset += segment->length, ++segment) diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c index 8a4659e94105..bdc6bb262bce 100644 --- a/drivers/scsi/NCR53C9x.c +++ b/drivers/scsi/NCR53C9x.c @@ -911,7 +911,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp) sp->SCp.ptr = (char *) virt_to_phys(sp->request_buffer); } else { - sp->SCp.buffer = (struct scatterlist *) sp->buffer; + sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; sp->SCp.buffers_residual = sp->use_sg - 1; sp->SCp.this_residual = sp->SCp.buffer->length; if (esp->dma_mmu_get_scsi_sgl) @@ -2152,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp, */ static int esp_should_clear_sync(Scsi_Cmnd *sp) { - unchar cmd1 = sp->cmnd[0]; - unchar cmd2 = sp->data_cmnd[0]; + unchar cmd = sp->cmnd[0]; /* These cases are for spinning up a disk and * waiting for that spinup to complete. */ - if(cmd1 == START_STOP || - cmd2 == START_STOP) + if(cmd == START_STOP) return 0; - if(cmd1 == TEST_UNIT_READY || - cmd2 == TEST_UNIT_READY) + if(cmd == TEST_UNIT_READY) return 0; /* One more special case for SCSI tape drives, * this is what is used to probe the device for * completion of a rewind or tape load operation. */ - if(sp->device->type == TYPE_TAPE) { - if(cmd1 == MODE_SENSE || - cmd2 == MODE_SENSE) - return 0; - } + if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE) + return 0; return 1; } diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c index a06f547e87f7..d05681f9d81a 100644 --- a/drivers/scsi/NCR_D700.c +++ b/drivers/scsi/NCR_D700.c @@ -114,7 +114,7 @@ MODULE_DESCRIPTION("NCR Dual700 SCSI Driver"); MODULE_LICENSE("GPL"); module_param(NCR_D700, charp, 0); -static __u8 __initdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = +static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; #ifdef MODULE @@ -173,7 +173,7 @@ struct NCR_D700_private { char pad; }; -static int +static int __devinit NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, int slot, u32 region, int differential) { @@ -243,7 +243,7 @@ NCR_D700_intr(int irq, void *data, struct pt_regs *regs) * essentially connectecd to the MCA bus independently, it is easier * to set them up as two separate host adapters, rather than one * adapter with two channels */ -static int +static int __devinit NCR_D700_probe(struct device *dev) { struct NCR_D700_private *p; @@ -329,7 +329,7 @@ NCR_D700_probe(struct device *dev) for (i = 0; i < 2; i++) { int err; - if ((err = NCR_D700_probe_one(p, i, slot, irq, + if ((err = NCR_D700_probe_one(p, i, irq, slot, offset_addr + (0x80 * i), differential)) != 0) printk("D700: SIOP%d: probe failed, error = %d\n", @@ -349,7 +349,7 @@ NCR_D700_probe(struct device *dev) return 0; } -static void +static void __devexit NCR_D700_remove_one(struct Scsi_Host *host) { scsi_remove_host(host); @@ -359,7 +359,7 @@ NCR_D700_remove_one(struct Scsi_Host *host) release_region(host->base, 64); } -static int +static int __devexit NCR_D700_remove(struct device *dev) { struct NCR_D700_private *p = dev_get_drvdata(dev); @@ -380,7 +380,7 @@ static struct mca_driver NCR_D700_driver = { .name = "NCR_D700", .bus = &mca_bus_type, .probe = NCR_D700_probe, - .remove = NCR_D700_remove, + .remove = __devexit_p(NCR_D700_remove), }, }; diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 36e63f82d9f8..f974869ea323 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -551,6 +551,11 @@ struct aha152x_hostdata { struct aha152x_scdata { Scsi_Cmnd *next; /* next sc in queue */ struct semaphore *sem; /* semaphore to block on */ + unsigned char cmd_len; + unsigned char cmnd[MAX_COMMAND_SIZE]; + unsigned short use_sg; + unsigned request_bufflen; + void *request_buffer; }; @@ -1006,11 +1011,20 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p return FAILED; } } else { + struct aha152x_scdata *sc; + SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); if(SCpnt->host_scribble==0) { printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); return FAILED; } + + sc = SCDATA(SCpnt); + memcpy(sc->cmnd, SCpnt->cmnd, sizeof(sc->cmnd)); + sc->request_buffer = SCpnt->request_buffer; + sc->request_bufflen = SCpnt->request_bufflen; + sc->use_sg = SCpnt->use_sg; + sc->cmd_len = SCpnt->cmd_len; } SCNEXT(SCpnt) = NULL; @@ -1165,6 +1179,10 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt) DECLARE_MUTEX_LOCKED(sem); struct timer_list timer; int ret, issued, disconnected; + unsigned char old_cmd_len = SCpnt->cmd_len; + unsigned short old_use_sg = SCpnt->use_sg; + void *old_buffer = SCpnt->request_buffer; + unsigned old_bufflen = SCpnt->request_bufflen; unsigned long flags; #if defined(AHA152X_DEBUG) @@ -1198,11 +1216,11 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt) add_timer(&timer); down(&sem); del_timer(&timer); - - SCpnt->cmd_len = SCpnt->old_cmd_len; - SCpnt->use_sg = SCpnt->old_use_sg; - SCpnt->request_buffer = SCpnt->buffer; - SCpnt->request_bufflen = SCpnt->bufflen; + + SCpnt->cmd_len = old_cmd_len; + SCpnt->use_sg = old_use_sg; + SCpnt->request_buffer = old_buffer; + SCpnt->request_bufflen = old_bufflen; DO_LOCK(flags); @@ -1565,6 +1583,9 @@ static void busfree_run(struct Scsi_Host *shpnt) #endif if(DONE_SC->SCp.phase & check_condition) { + struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC; + struct aha152x_scdata *sc = SCDATA(cmd); + #if 0 if(HOSTDATA(shpnt)->debug & debug_eh) { printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC)); @@ -1573,13 +1594,13 @@ static void busfree_run(struct Scsi_Host *shpnt) #endif /* restore old command */ - memcpy((void *) DONE_SC->cmnd, (void *) DONE_SC->data_cmnd, sizeof(DONE_SC->data_cmnd)); - DONE_SC->request_buffer = DONE_SC->buffer; - DONE_SC->request_bufflen = DONE_SC->bufflen; - DONE_SC->use_sg = DONE_SC->old_use_sg; - DONE_SC->cmd_len = DONE_SC->old_cmd_len; + memcpy(cmd->cmnd, sc->cmnd, sizeof(sc->cmnd)); + cmd->request_buffer = sc->request_buffer; + cmd->request_bufflen = sc->request_bufflen; + cmd->use_sg = sc->use_sg; + cmd->cmd_len = sc->cmd_len; - DONE_SC->SCp.Status = 0x02; + cmd->SCp.Status = 0x02; HOSTDATA(shpnt)->commands--; if (!HOSTDATA(shpnt)->commands) diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index a1e8ca758594..653818d2f802 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -7289,7 +7289,7 @@ ahd_reset_cmds_pending(struct ahd_softc *ahd) ahd->flags &= ~AHD_UPDATE_PEND_CMDS; } -void +static void ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) { cam_status ostat; diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index b244c7124179..998999c0a972 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -243,25 +243,6 @@ ahd_print_path(struct ahd_softc *ahd, struct scb *scb) static uint32_t aic79xx_no_reset; /* - * Certain PCI motherboards will scan PCI devices from highest to lowest, - * others scan from lowest to highest, and they tend to do all kinds of - * strange things when they come into contact with PCI bridge chips. The - * net result of all this is that the PCI card that is actually used to boot - * the machine is very hard to detect. Most motherboards go from lowest - * PCI slot number to highest, and the first SCSI controller found is the - * one you boot from. The only exceptions to this are when a controller - * has its BIOS disabled. So, we by default sort all of our SCSI controllers - * from lowest PCI slot number to highest PCI slot number. We also force - * all controllers with their BIOS disabled to the end of the list. This - * works on *almost* all computers. Where it doesn't work, we have this - * option. Setting this option to non-0 will reverse the order of the sort - * to highest first, then lowest, but will still leave cards with their BIOS - * disabled at the very end. That should fix everyone up unless there are - * really strange cirumstances. - */ -static uint32_t aic79xx_reverse_scan; - -/* * Should we force EXTENDED translation on a controller. * 0 == Use whatever is in the SEEPROM or default to off * 1 == Use whatever is in the SEEPROM or default to on @@ -350,7 +331,6 @@ MODULE_PARM_DESC(aic79xx, " periodically to prevent tag starvation.\n" " This may be required by some older disk\n" " or drives/RAID arrays.\n" -" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n" " tag_info:<tag_str> Set per-target tag depth\n" " global_tag_depth:<int> Global tag depth for all targets on all buses\n" " slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" @@ -1031,7 +1011,6 @@ aic79xx_setup(char *s) #ifdef AHD_DEBUG { "debug", &ahd_debug }, #endif - { "reverse_scan", &aic79xx_reverse_scan }, { "periodic_otag", &aic79xx_periodic_otag }, { "pci_parity", &aic79xx_pci_parity }, { "seltime", &aic79xx_seltime }, diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index debf3e2a0798..aa4be8a31415 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -353,7 +353,6 @@ MODULE_PARM_DESC(aic7xxx, " periodically to prevent tag starvation.\n" " This may be required by some older disk\n" " drives or RAID arrays.\n" -" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n" " tag_info:<tag_str> Set per-target tag depth\n" " global_tag_depth:<int> Global tag depth for every target\n" " on every bus\n" diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 3e1053f111dc..4cf7afc31cc7 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt) info->stats.aborts += 1; printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); - __scsi_print_command(SCpnt->data_cmnd); + __scsi_print_command(SCpnt->cmnd); print_debug_list(); fas216_dumpstate(info); diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 94b1261a259d..19745a31072b 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c @@ -105,9 +105,6 @@ enum { PIIX_FLAG_SCR = (1 << 26), /* SCR available */ PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ - PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */ - /* ICH6/7 use different scheme for map value */ - PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30), /* combined mode. if set, PATA is channel 0. * if clear, PATA is channel 1. @@ -126,6 +123,7 @@ enum { ich6_sata = 4, ich6_sata_ahci = 5, ich6m_sata_ahci = 6, + ich8_sata_ahci = 7, /* constants for mapping table */ P0 = 0, /* port 0 */ @@ -141,11 +139,19 @@ enum { struct piix_map_db { const u32 mask; + const u16 port_enable; + const int present_shift; const int map[][4]; }; +struct piix_host_priv { + const int *map; + const struct piix_map_db *map_db; +}; + static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static void piix_host_stop(struct ata_host_set *host_set); static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); static void piix_pata_error_handler(struct ata_port *ap); @@ -186,11 +192,11 @@ static const struct pci_device_id piix_pci_tbl[] = { /* Enterprise Southbridge 2 (where's the datasheet?) */ { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, /* SATA Controller 1 IDE (ICH8, no datasheet yet) */ - { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, + { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, /* SATA Controller 2 IDE (ICH8, ditto) */ - { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, + { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, /* Mobile SATA Controller IDE (ICH8M, ditto) */ - { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, + { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, { } /* terminate list */ }; @@ -254,7 +260,7 @@ static const struct ata_port_operations piix_pata_ops = { .port_start = ata_port_start, .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .host_stop = piix_host_stop, }; static const struct ata_port_operations piix_sata_ops = { @@ -284,11 +290,13 @@ static const struct ata_port_operations piix_sata_ops = { .port_start = ata_port_start, .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .host_stop = piix_host_stop, }; -static struct piix_map_db ich5_map_db = { +static const struct piix_map_db ich5_map_db = { .mask = 0x7, + .port_enable = 0x3, + .present_shift = 4, .map = { /* PM PS SM SS MAP */ { P0, NA, P1, NA }, /* 000b */ @@ -302,8 +310,10 @@ static struct piix_map_db ich5_map_db = { }, }; -static struct piix_map_db ich6_map_db = { +static const struct piix_map_db ich6_map_db = { .mask = 0x3, + .port_enable = 0xf, + .present_shift = 4, .map = { /* PM PS SM SS MAP */ { P0, P2, P1, P3 }, /* 00b */ @@ -313,8 +323,10 @@ static struct piix_map_db ich6_map_db = { }, }; -static struct piix_map_db ich6m_map_db = { +static const struct piix_map_db ich6m_map_db = { .mask = 0x3, + .port_enable = 0x5, + .present_shift = 4, .map = { /* PM PS SM SS MAP */ { P0, P2, RV, RV }, /* 00b */ @@ -324,6 +336,28 @@ static struct piix_map_db ich6m_map_db = { }, }; +static const struct piix_map_db ich8_map_db = { + .mask = 0x3, + .port_enable = 0x3, + .present_shift = 8, + .map = { + /* PM PS SM SS MAP */ + { P0, NA, P1, NA }, /* 00b (hardwired) */ + { RV, RV, RV, RV }, + { RV, RV, RV, RV }, /* 10b (never) */ + { RV, RV, RV, RV }, + }, +}; + +static const struct piix_map_db *piix_map_db_table[] = { + [ich5_sata] = &ich5_map_db, + [esb_sata] = &ich5_map_db, + [ich6_sata] = &ich6_map_db, + [ich6_sata_ahci] = &ich6_map_db, + [ich6m_sata_ahci] = &ich6m_map_db, + [ich8_sata_ahci] = &ich8_map_db, +}; + static struct ata_port_info piix_port_info[] = { /* piix4_pata */ { @@ -356,63 +390,69 @@ static struct ata_port_info piix_port_info[] = { /* ich5_sata */ { .sht = &piix_sht, - .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | - PIIX_FLAG_CHECKINTR, + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &piix_sata_ops, - .private_data = &ich5_map_db, }, /* i6300esb_sata */ { .sht = &piix_sht, - .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &piix_sata_ops, - .private_data = &ich5_map_db, }, /* ich6_sata */ { .sht = &piix_sht, - .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &piix_sata_ops, - .private_data = &ich6_map_db, }, /* ich6_sata_ahci */ { .sht = &piix_sht, - .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | PIIX_FLAG_AHCI, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &piix_sata_ops, - .private_data = &ich6_map_db, }, /* ich6m_sata_ahci */ { .sht = &piix_sht, - .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | + .host_flags = ATA_FLAG_SATA | + PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | + PIIX_FLAG_AHCI, + .pio_mask = 0x1f, /* pio0-4 */ + .mwdma_mask = 0x07, /* mwdma0-2 */ + .udma_mask = 0x7f, /* udma0-6 */ + .port_ops = &piix_sata_ops, + }, + + /* ich8_sata_ahci */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | PIIX_FLAG_AHCI, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &piix_sata_ops, - .private_data = &ich6m_map_db, }, }; @@ -508,46 +548,29 @@ static void piix_pata_error_handler(struct ata_port *ap) static int piix_sata_prereset(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); - const unsigned int *map = ap->host_set->private_data; + struct piix_host_priv *hpriv = ap->host_set->private_data; + const unsigned int *map = hpriv->map; int base = 2 * ap->hard_port_no; - unsigned int present_mask = 0; + unsigned int present = 0; int port, i; - u8 pcs; + u16 pcs; - pci_read_config_byte(pdev, ICH5_PCS, &pcs); + pci_read_config_word(pdev, ICH5_PCS, &pcs); DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base); - /* enable all ports on this ap and wait for them to settle */ - for (i = 0; i < 2; i++) { - port = map[base + i]; - if (port >= 0) - pcs |= 1 << port; - } - - pci_write_config_byte(pdev, ICH5_PCS, pcs); - msleep(100); - - /* let's see which devices are present */ - pci_read_config_byte(pdev, ICH5_PCS, &pcs); - for (i = 0; i < 2; i++) { port = map[base + i]; if (port < 0) continue; - if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port)) - present_mask |= 1 << i; - else - pcs &= ~(1 << port); + if ((ap->flags & PIIX_FLAG_IGNORE_PCS) || + (pcs & 1 << (hpriv->map_db->present_shift + port))) + present = 1; } - /* disable offline ports on non-AHCI controllers */ - if (!(ap->flags & PIIX_FLAG_AHCI)) - pci_write_config_byte(pdev, ICH5_PCS, pcs); - DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", ap->id, pcs, present_mask); - if (!present_mask) { + if (!present) { ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; return 0; @@ -761,10 +784,27 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) return no_piix_dma; } +static void __devinit piix_init_pcs(struct pci_dev *pdev, + const struct piix_map_db *map_db) +{ + u16 pcs, new_pcs; + + pci_read_config_word(pdev, ICH5_PCS, &pcs); + + new_pcs = pcs | map_db->port_enable; + + if (new_pcs != pcs) { + DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs); + pci_write_config_word(pdev, ICH5_PCS, new_pcs); + msleep(150); + } +} + static void __devinit piix_init_sata_map(struct pci_dev *pdev, - struct ata_port_info *pinfo) + struct ata_port_info *pinfo, + const struct piix_map_db *map_db) { - struct piix_map_db *map_db = pinfo[0].private_data; + struct piix_host_priv *hpriv = pinfo[0].private_data; const unsigned int *map; int i, invalid_map = 0; u8 map_value; @@ -805,8 +845,8 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev, dev_printk(KERN_ERR, &pdev->dev, "invalid MAP value %u\n", map_value); - pinfo[0].private_data = (void *)map; - pinfo[1].private_data = (void *)map; + hpriv->map = map; + hpriv->map_db = map_db; } /** @@ -829,6 +869,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) static int printed_version; struct ata_port_info port_info[2]; struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; + struct piix_host_priv *hpriv; unsigned long host_flags; if (!printed_version++) @@ -839,8 +880,14 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (!in_module_init) return -ENODEV; + hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + port_info[0] = piix_port_info[ent->driver_data]; port_info[1] = piix_port_info[ent->driver_data]; + port_info[0].private_data = hpriv; + port_info[1].private_data = hpriv; host_flags = port_info[0].host_flags; @@ -855,8 +902,11 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) } /* Initialize SATA map */ - if (host_flags & ATA_FLAG_SATA) - piix_init_sata_map(pdev, port_info); + if (host_flags & ATA_FLAG_SATA) { + piix_init_sata_map(pdev, port_info, + piix_map_db_table[ent->driver_data]); + piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]); + } /* On ICH5, some BIOSen disable the interrupt using the * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. @@ -879,6 +929,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) return ata_pci_init_one(pdev, ppinfo, 2); } +static void piix_host_stop(struct ata_host_set *host_set) +{ + if (host_set->next == NULL) + kfree(host_set->private_data); + ata_host_stop(host_set); +} + static int __init piix_init(void) { int rc; diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 007a14e5c3fd..e397129c90d1 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -507,7 +507,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd) */ if (cmd->use_sg) { - cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; + cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; cmd->SCp.buffers_residual = cmd->use_sg - 1; cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+ cmd->SCp.buffer->offset; diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index dddd2acce76f..61f6024b61ba 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c @@ -5,6 +5,7 @@ * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) * by D. Gilbert and aeb (20020609) * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 + * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702 */ #include <linux/blkdev.h> @@ -36,55 +37,56 @@ static const char * cdb_byte0_names[] = { /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, "Reasssign Blocks", -/* 08-0d */ "Read (6)", NULL, "Write (6)", "Seek (6)", NULL, NULL, +/* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL, /* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", -/* 13-16 */ "Verify (6)", "Recover Buffered Data", "Mode Select (6)", - "Reserve (6)", -/* 17-1a */ "Release (6)", "Copy", "Erase", "Mode Sense (6)", +/* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)", + "Reserve(6)", +/* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)", /* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", /* 1e-1f */ "Prevent/Allow Medium Removal", NULL, /* 20-22 */ NULL, NULL, NULL, /* 23-28 */ "Read Format Capacities", "Set Window", - "Read Capacity (10)", NULL, NULL, "Read (10)", -/* 29-2d */ "Read Generation", "Write (10)", "Seek (10)", "Erase (10)", - "Read updated block", -/* 2e-31 */ "Write Verify (10)", "Verify (10)", "Search High", "Search Equal", + "Read Capacity(10)", NULL, NULL, "Read(10)", +/* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)", + "Read updated block", +/* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal", /* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", -/* 35-37 */ "Synchronize Cache (10)", "Lock/Unlock Cache (10)", +/* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)", "Read Defect Data(10)", /* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", "Read Buffer", -/* 3d-3f */ "Update Block", "Read Long (10)", "Write Long (10)", -/* 40-41 */ "Change Definition", "Write Same (10)", +/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)", +/* 40-41 */ "Change Definition", "Write Same(10)", /* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", - "Play audio (10)", "Get configuration", "Play audio msf", + "Play audio(10)", "Get configuration", "Play audio msf", "Play audio track/index", -/* 49-4f */ "Play track relative (10)", "Get event status notification", +/* 49-4f */ "Play track relative(10)", "Get event status notification", "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", NULL, /* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", - "Reserve track", "Send OPC info", "Mode Select (10)", -/* 56-5b */ "Reserve (10)", "Release (10)", "Repair track", "Read master cue", - "Mode Sense (10)", "Close track/session", + "Reserve track", "Send OPC info", "Mode Select(10)", +/* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue", + "Mode Sense(10)", "Close track/session", /* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", "Persistent reserve out", /* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", -/* 80-84 */ "Xdwrite (16)", "Rebuild (16)", "Regenerate (16)", "Extended copy", +/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy", "Receive copy results", -/* 85-89 */ "Memory Export In (16)", "Access control in", "Access control out", - "Read (16)", "Memory Export Out (16)", -/* 8a-8f */ "Write (16)", NULL, "Read attributes", "Write attributes", - "Write and verify (16)", "Verify (16)", -/* 90-94 */ "Pre-fetch (16)", "Synchronize cache (16)", - "Lock/unlock cache (16)", "Write same (16)", NULL, +/* 85-89 */ "ATA command pass through(16)", "Access control in", + "Access control out", "Read(16)", "Memory Export Out(16)", +/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes", + "Write and verify(16)", "Verify(16)", +/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)", + "Lock/unlock cache(16)", "Write same(16)", NULL, /* 95-99 */ NULL, NULL, NULL, NULL, NULL, -/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in (16)", - "Service action out (16)", -/* a0-a5 */ "Report luns", "Blank", "Send event", "Maintenance in", - "Maintenance out", "Move medium/play audio(12)", +/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)", + "Service action out(16)", +/* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank", + "Security protocol in", "Maintenance in", "Maintenance out", + "Move medium/play audio(12)", /* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", "Play track relative(12)", /* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", @@ -92,12 +94,12 @@ static const char * cdb_byte0_names[] = { /* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", /* b2-b4 */ "Search data low(12)", "Set limits(12)", "Read element status attached", -/* b5-b6 */ "Request volume element address", "Send volume tag, set streaming", +/* b5-b6 */ "Security protocol out", "Send volume tag, set streaming", /* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", /* ba-bc */ "Redundancy group (in), Scan", - "Redundancy group (out), Set cd-rom speed", "Spare in, Play cd", -/* bd-bf */ "Spare out, Mechanism status", "Volume set in, Read cd", - "Volume set out, Send DVD structure", + "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd", +/* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd", + "Volume set (out), Send DVD structure", }; struct value_name_pair { @@ -112,6 +114,7 @@ static const struct value_name_pair maint_in_arr[] = { {0xc, "Report supported operation codes"}, {0xd, "Report supported task management functions"}, {0xe, "Report priority"}, + {0xf, "Report timestamp"}, }; #define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) @@ -120,6 +123,7 @@ static const struct value_name_pair maint_out_arr[] = { {0xa, "Set target port groups"}, {0xb, "Change aliases"}, {0xe, "Set priority"}, + {0xe, "Set timestamp"}, }; #define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) @@ -427,6 +431,7 @@ static struct error_info additional[] = {0x001A, "Rewind operation in progress"}, {0x001B, "Set capacity operation in progress"}, {0x001C, "Verify operation in progress"}, + {0x001D, "ATA pass through information available"}, {0x0100, "No index/sector signal"}, @@ -438,7 +443,7 @@ static struct error_info additional[] = {0x0400, "Logical unit not ready, cause not reportable"}, {0x0401, "Logical unit is in process of becoming ready"}, - {0x0402, "Logical unit not ready, initializing cmd. required"}, + {0x0402, "Logical unit not ready, initializing command required"}, {0x0403, "Logical unit not ready, manual intervention required"}, {0x0404, "Logical unit not ready, format in progress"}, {0x0405, "Logical unit not ready, rebuild in progress"}, @@ -478,6 +483,9 @@ static struct error_info additional[] = {0x0B00, "Warning"}, {0x0B01, "Warning - specified temperature exceeded"}, {0x0B02, "Warning - enclosure degraded"}, + {0x0B03, "Warning - background self-test failed"}, + {0x0B04, "Warning - background pre-scan detected medium error"}, + {0x0B05, "Warning - background medium scan detected medium error"}, {0x0C00, "Write error"}, {0x0C01, "Write error - recovered with auto reallocation"}, @@ -493,6 +501,7 @@ static struct error_info additional[] = {0x0C0B, "Auxiliary memory write error"}, {0x0C0C, "Write error - unexpected unsolicited data"}, {0x0C0D, "Write error - not enough unsolicited data"}, + {0x0C0F, "Defects in error window"}, {0x0D00, "Error detected by third party temporary initiator"}, {0x0D01, "Third party device failure"}, @@ -504,11 +513,12 @@ static struct error_info additional[] = {0x0E00, "Invalid information unit"}, {0x0E01, "Information unit too short"}, {0x0E02, "Information unit too long"}, + {0x0E03, "Invalid field in command information unit"}, {0x1000, "Id CRC or ECC error"}, - {0x1001, "Data block guard check failed"}, - {0x1002, "Data block application tag check failed"}, - {0x1003, "Data block reference tag check failed"}, + {0x1001, "Logical block guard check failed"}, + {0x1002, "Logical block application tag check failed"}, + {0x1003, "Logical block reference tag check failed"}, {0x1100, "Unrecovered read error"}, {0x1101, "Read retries exhausted"}, @@ -530,6 +540,7 @@ static struct error_info additional[] = {0x1111, "Read error - loss of streaming"}, {0x1112, "Auxiliary memory read error"}, {0x1113, "Read error - failed retransmission request"}, + {0x1114, "Read error - lba marked bad by application client"}, {0x1200, "Address mark not found for id field"}, @@ -610,11 +621,14 @@ static struct error_info additional[] = {0x2100, "Logical block address out of range"}, {0x2101, "Invalid element address"}, {0x2102, "Invalid address for write"}, + {0x2103, "Invalid write crossing layer jump"}, {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, {0x2400, "Invalid field in cdb"}, {0x2401, "CDB decryption error"}, + {0x2402, "Obsolete"}, + {0x2403, "Obsolete"}, {0x2404, "Security audit value frozen"}, {0x2405, "Security working key frozen"}, {0x2406, "Nonce not unique"}, @@ -637,7 +651,10 @@ static struct error_info additional[] = {0x260C, "Invalid operation for copy source or destination"}, {0x260D, "Copy segment granularity violation"}, {0x260E, "Invalid parameter while port is enabled"}, - {0x260F, "Invalid data-out buffer integrity"}, + {0x260F, "Invalid data-out buffer integrity check value"}, + {0x2610, "Data decryption key fail limit reached"}, + {0x2611, "Incomplete key-associated data set"}, + {0x2612, "Vendor specific key reference not found"}, {0x2700, "Write protected"}, {0x2701, "Hardware write protected"}, @@ -649,6 +666,7 @@ static struct error_info additional[] = {0x2800, "Not ready to ready change, medium may have changed"}, {0x2801, "Import or export element accessed"}, + {0x2802, "Format-layer may have changed"}, {0x2900, "Power on, reset, or bus device reset occurred"}, {0x2901, "Power on occurred"}, @@ -669,6 +687,11 @@ static struct error_info additional[] = {0x2A07, "Implicit asymmetric access state transition failed"}, {0x2A08, "Priority changed"}, {0x2A09, "Capacity data has changed"}, + {0x2A10, "Timestamp changed"}, + {0x2A11, "Data encryption parameters changed by another i_t nexus"}, + {0x2A12, "Data encryption parameters changed by vendor specific " + "event"}, + {0x2A13, "Data encryption key instance counter has changed"}, {0x2B00, "Copy cannot execute since host cannot disconnect"}, @@ -690,6 +713,7 @@ static struct error_info additional[] = {0x2E00, "Insufficient time for operation"}, {0x2F00, "Commands cleared by another initiator"}, + {0x2F01, "Commands cleared by power loss notification"}, {0x3000, "Incompatible medium installed"}, {0x3001, "Cannot read medium - unknown format"}, @@ -702,7 +726,8 @@ static struct error_info additional[] = {0x3008, "Cannot write - application code mismatch"}, {0x3009, "Current session not fixated for append"}, {0x300A, "Cleaning request rejected"}, - {0x300C, "WORM medium, overwrite attempted"}, + {0x300C, "WORM medium - overwrite attempted"}, + {0x300D, "WORM medium - integrity check"}, {0x3010, "Medium not formatted"}, {0x3100, "Medium format corrupted"}, @@ -790,6 +815,9 @@ static struct error_info additional[] = {0x3F0F, "Echo buffer overwritten"}, {0x3F10, "Medium loadable"}, {0x3F11, "Medium auxiliary memory accessible"}, + {0x3F12, "iSCSI IP address added"}, + {0x3F13, "iSCSI IP address removed"}, + {0x3F14, "iSCSI IP address changed"}, /* * {0x40NN, "Ram failure"}, * {0x40NN, "Diagnostic failure on component nn"}, @@ -799,6 +827,7 @@ static struct error_info additional[] = {0x4300, "Message error"}, {0x4400, "Internal target failure"}, + {0x4471, "ATA device failed set features"}, {0x4500, "Select or reselect failure"}, @@ -807,9 +836,10 @@ static struct error_info additional[] = {0x4700, "Scsi parity error"}, {0x4701, "Data phase CRC error detected"}, {0x4702, "Scsi parity error detected during st data phase"}, - {0x4703, "Information unit CRC error detected"}, + {0x4703, "Information unit iuCRC error detected"}, {0x4704, "Asynchronous information protection error detected"}, {0x4705, "Protocol service CRC error"}, + {0x4706, "Phy test function in progress"}, {0x477f, "Some commands cleared by iSCSI Protocol event"}, {0x4800, "Initiator detected error message received"}, @@ -844,6 +874,8 @@ static struct error_info additional[] = {0x5300, "Media load or eject failed"}, {0x5301, "Unload tape failure"}, {0x5302, "Medium removal prevented"}, + {0x5303, "Medium removal prevented by data transfer element"}, + {0x5304, "Medium thread or unthread failure"}, {0x5400, "Scsi to host system interface failure"}, @@ -855,6 +887,7 @@ static struct error_info additional[] = {0x5505, "Insufficient access control resources"}, {0x5506, "Auxiliary memory out of space"}, {0x5507, "Quota error"}, + {0x5508, "Maximum number of supplemental decryption keys exceeded"}, {0x5700, "Unable to recover table-of-contents"}, @@ -1004,6 +1037,7 @@ static struct error_info additional[] = {0x6708, "Assign failure occurred"}, {0x6709, "Multiply assigned logical unit"}, {0x670A, "Set target port groups command failed"}, + {0x670B, "ATA device feature not enabled"}, {0x6800, "Logical unit not configured"}, @@ -1030,6 +1064,8 @@ static struct error_info additional[] = {0x6F03, "Read of scrambled sector without authentication"}, {0x6F04, "Media region code is mismatched to logical unit region"}, {0x6F05, "Drive region must be permanent/region reset count error"}, + {0x6F06, "Insufficient block count for binding nonce recording"}, + {0x6F07, "Conflict in binding nonce recording"}, /* * {0x70NN, "Decompression exception short algorithm id of nn"}, */ @@ -1041,6 +1077,8 @@ static struct error_info additional[] = {0x7203, "Session fixation error - incomplete track in session"}, {0x7204, "Empty or partially written reserved track"}, {0x7205, "No more track reservations allowed"}, + {0x7206, "RMZ extension is not allowed"}, + {0x7207, "No more test zone extensions are allowed"}, {0x7300, "Cd control error"}, {0x7301, "Power calibration area almost full"}, @@ -1049,6 +1087,18 @@ static struct error_info additional[] = {0x7304, "Program memory area update failure"}, {0x7305, "Program memory area is full"}, {0x7306, "RMA/PMA is almost full"}, + {0x7310, "Current power calibration area almost full"}, + {0x7311, "Current power calibration area is full"}, + {0x7317, "RDZ is full"}, + + {0x7400, "Security error"}, + {0x7401, "Unable to decrypt data"}, + {0x7402, "Unencrypted data encountered while decrypting"}, + {0x7403, "Incorrect data encryption key"}, + {0x7404, "Cryptographic integrity validation failed"}, + {0x7405, "Error decrypting data"}, + {0x7471, "Logical unit access not authorized"}, + {0, NULL} }; diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c index 10573c24a50b..98bd22714d0d 100644 --- a/drivers/scsi/esp.c +++ b/drivers/scsi/esp.c @@ -1397,7 +1397,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp) sp->SCp.ptr = NULL; } } else { - sp->SCp.buffer = (struct scatterlist *) sp->buffer; + sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, sp->SCp.buffer, sp->use_sg, @@ -1410,7 +1410,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp) static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) { if (sp->use_sg) { - sbus_unmap_sg(esp->sdev, sp->buffer, sp->use_sg, + sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg, sp->sc_data_direction); } else if (sp->request_bufflen) { sbus_unmap_single(esp->sdev, @@ -2754,18 +2754,15 @@ static int esp_do_data_finale(struct esp *esp) */ static int esp_should_clear_sync(struct scsi_cmnd *sp) { - u8 cmd1 = sp->cmnd[0]; - u8 cmd2 = sp->data_cmnd[0]; + u8 cmd = sp->cmnd[0]; /* These cases are for spinning up a disk and * waiting for that spinup to complete. */ - if (cmd1 == START_STOP || - cmd2 == START_STOP) + if (cmd == START_STOP) return 0; - if (cmd1 == TEST_UNIT_READY || - cmd2 == TEST_UNIT_READY) + if (cmd == TEST_UNIT_READY) return 0; /* One more special case for SCSI tape drives, @@ -2773,8 +2770,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp) * completion of a rewind or tape load operation. */ if (sp->device->type == TYPE_TAPE) { - if (cmd1 == MODE_SENSE || - cmd2 == MODE_SENSE) + if (cmd == MODE_SENSE) return 0; } diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c index 7eed0b098171..6aeb5f003c3c 100644 --- a/drivers/scsi/ibmvscsi/iseries_vscsi.c +++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c @@ -81,7 +81,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, int rc; single_host_data = hostdata; - rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0); + rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests); if (rc < 0) { printk("viopath_open failed with rc %d in open_event_path\n", rc); diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index 242b8873b333..ed22b96580c6 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c @@ -238,6 +238,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue, if (rc == 2) { /* Adapter is good, but other end is not ready */ printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); + retrc = 0; } else if (rc != 0) { printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); goto reg_crq_failed; diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c index 3fd8a96f2af3..bfac4441d89f 100644 --- a/drivers/scsi/jazz_esp.c +++ b/drivers/scsi/jazz_esp.c @@ -257,7 +257,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp) static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) { int sz = sp->use_sg - 1; - struct scatterlist *sg = (struct scatterlist *)sp->buffer; + struct scatterlist *sg = (struct scatterlist *)sp->request_buffer; while(sz >= 0) { vdma_free(sg[sz].dma_address); diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index 4b6aa30f4d68..29f59345305d 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c @@ -764,12 +764,27 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, unsigned int action) { unsigned long flags; + struct ata_eh_info *ehi = &ap->eh_info; + struct ata_eh_context *ehc = &ap->eh_context; spin_lock_irqsave(ap->lock, flags); - ata_eh_clear_action(dev, &ap->eh_info, action); + /* Reset is represented by combination of actions and EHI + * flags. Suck in all related bits before clearing eh_info to + * avoid losing requested action. + */ + if (action & ATA_EH_RESET_MASK) { + ehc->i.action |= ehi->action & ATA_EH_RESET_MASK; + ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK; + + /* make sure all reset actions are cleared & clear EHI flags */ + action |= ATA_EH_RESET_MASK; + ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; + } + + ata_eh_clear_action(dev, ehi, action); - if (!(ap->eh_context.i.flags & ATA_EHI_QUIET)) + if (!(ehc->i.flags & ATA_EHI_QUIET)) ap->pflags |= ATA_PFLAG_RECOVERED; spin_unlock_irqrestore(ap->lock, flags); @@ -790,6 +805,12 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, unsigned int action) { + /* if reset is complete, clear all reset actions & reset modifier */ + if (action & ATA_EH_RESET_MASK) { + action |= ATA_EH_RESET_MASK; + ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; + } + ata_eh_clear_action(dev, &ap->eh_context.i, action); } @@ -1276,8 +1297,6 @@ static int ata_eh_speed_down(struct ata_device *dev, int is_io, static void ata_eh_autopsy(struct ata_port *ap) { struct ata_eh_context *ehc = &ap->eh_context; - unsigned int action = ehc->i.action; - struct ata_device *failed_dev = NULL; unsigned int all_err_mask = 0; int tag, is_io = 0; u32 serror; @@ -1294,7 +1313,7 @@ static void ata_eh_autopsy(struct ata_port *ap) ehc->i.serror |= serror; ata_eh_analyze_serror(ap); } else if (rc != -EOPNOTSUPP) - action |= ATA_EH_HARDRESET; + ehc->i.action |= ATA_EH_HARDRESET; /* analyze NCQ failure */ ata_eh_analyze_ncq_error(ap); @@ -1315,7 +1334,7 @@ static void ata_eh_autopsy(struct ata_port *ap) qc->err_mask |= ehc->i.err_mask; /* analyze TF */ - action |= ata_eh_analyze_tf(qc, &qc->result_tf); + ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); /* DEV errors are probably spurious in case of ATA_BUS error */ if (qc->err_mask & AC_ERR_ATA_BUS) @@ -1329,11 +1348,11 @@ static void ata_eh_autopsy(struct ata_port *ap) /* SENSE_VALID trumps dev/unknown error and revalidation */ if (qc->flags & ATA_QCFLAG_SENSE_VALID) { qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); - action &= ~ATA_EH_REVALIDATE; + ehc->i.action &= ~ATA_EH_REVALIDATE; } /* accumulate error info */ - failed_dev = qc->dev; + ehc->i.dev = qc->dev; all_err_mask |= qc->err_mask; if (qc->flags & ATA_QCFLAG_IO) is_io = 1; @@ -1342,25 +1361,22 @@ static void ata_eh_autopsy(struct ata_port *ap) /* enforce default EH actions */ if (ap->pflags & ATA_PFLAG_FROZEN || all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) - action |= ATA_EH_SOFTRESET; + ehc->i.action |= ATA_EH_SOFTRESET; else if (all_err_mask) - action |= ATA_EH_REVALIDATE; + ehc->i.action |= ATA_EH_REVALIDATE; /* if we have offending qcs and the associated failed device */ - if (failed_dev) { + if (ehc->i.dev) { /* speed down */ - action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask); + ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io, + all_err_mask); /* perform per-dev EH action only on the offending device */ - ehc->i.dev_action[failed_dev->devno] |= - action & ATA_EH_PERDEV_MASK; - action &= ~ATA_EH_PERDEV_MASK; + ehc->i.dev_action[ehc->i.dev->devno] |= + ehc->i.action & ATA_EH_PERDEV_MASK; + ehc->i.action &= ~ATA_EH_PERDEV_MASK; } - /* record autopsy result */ - ehc->i.dev = failed_dev; - ehc->i.action |= action; - DPRINTK("EXIT\n"); } @@ -1483,6 +1499,9 @@ static int ata_eh_reset(struct ata_port *ap, int classify, ata_reset_fn_t reset; int i, did_followup_srst, rc; + /* about to reset */ + ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); + /* Determine which reset to use and record in ehc->i.action. * prereset() may examine and modify it. */ @@ -1531,8 +1550,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, ata_port_printk(ap, KERN_INFO, "%s resetting port\n", reset == softreset ? "soft" : "hard"); - /* reset */ - ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); + /* mark that this EH session started with reset */ ehc->i.flags |= ATA_EHI_DID_RESET; rc = ata_do_reset(ap, reset, classes); @@ -1595,7 +1613,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, postreset(ap, classes); /* reset successful, schedule revalidation */ - ata_eh_done(ap, NULL, ATA_EH_RESET_MASK); + ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); ehc->i.action |= ATA_EH_REVALIDATE; } @@ -1848,15 +1866,16 @@ static int ata_eh_skip_recovery(struct ata_port *ap) for (i = 0; i < ata_port_max_devices(ap); i++) { struct ata_device *dev = &ap->device[i]; - if (ata_dev_absent(dev) || ata_dev_ready(dev)) + if (!(dev->flags & ATA_DFLAG_SUSPENDED)) break; } if (i == ata_port_max_devices(ap)) return 1; - /* always thaw frozen port and recover failed devices */ - if (ap->pflags & ATA_PFLAG_FROZEN || ata_port_nr_enabled(ap)) + /* thaw frozen port, resume link and recover failed devices */ + if ((ap->pflags & ATA_PFLAG_FROZEN) || + (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap)) return 0; /* skip if class codes for all vacant slots are ATA_DEV_NONE */ diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index f81691fcf177..d44f9aac6b8f 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -21,10 +21,12 @@ struct lpfc_sli2_slim; -#define LPFC_MAX_TARGET 256 /* max targets supported */ -#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */ -#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */ +#define LPFC_MAX_TARGET 256 /* max number of targets supported */ +#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els + requests */ +#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact + the NameServer before giving up. */ #define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */ #define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */ #define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */ @@ -41,7 +43,6 @@ struct lpfc_sli2_slim; (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) /* Provide maximum configuration definitions. */ #define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ -#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */ #define FC_MAX_ADPTMSG 64 #define MAX_HBAEVT 32 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index b62a72dfab29..5c68cdd8736f 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -219,9 +219,19 @@ lpfc_issue_lip(struct Scsi_Host *host) return -ENOMEM; memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); + pmboxq->mb.mbxCommand = MBX_DOWN_LINK; + pmboxq->mb.mbxOwner = OWN_HOST; + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { + memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); + lpfc_init_link(phba, pmboxq, phba->cfg_topology, + phba->cfg_link_speed); + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, + phba->fc_ratov * 2); + } + if (mbxstatus == MBX_TIMEOUT) pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; else @@ -233,51 +243,53 @@ lpfc_issue_lip(struct Scsi_Host *host) return 0; } -static ssize_t -lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) +static int +lpfc_selective_reset(struct lpfc_hba *phba) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); + struct completion online_compl; + int status = 0; + + init_completion(&online_compl); + lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_OFFLINE); + wait_for_completion(&online_compl); + + if (status != 0) + return -EIO; + + init_completion(&online_compl); + lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_ONLINE); + wait_for_completion(&online_compl); + + if (status != 0) + return -EIO; + + return 0; } static ssize_t -lpfc_board_online_show(struct class_device *cdev, char *buf) +lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count) { struct Scsi_Host *host = class_to_shost(cdev); struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + int status = -EINVAL; - if (phba->fc_flag & FC_OFFLINE_MODE) - return snprintf(buf, PAGE_SIZE, "0\n"); + if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) + status = lpfc_selective_reset(phba); + + if (status == 0) + return strlen(buf); else - return snprintf(buf, PAGE_SIZE, "1\n"); + return status; } static ssize_t -lpfc_board_online_store(struct class_device *cdev, const char *buf, - size_t count) +lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) { struct Scsi_Host *host = class_to_shost(cdev); struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; - struct completion online_compl; - int val=0, status=0; - - if (sscanf(buf, "%d", &val) != 1) - return -EINVAL; - - init_completion(&online_compl); - - if (val) - lpfc_workq_post_event(phba, &status, &online_compl, - LPFC_EVT_ONLINE); - else - lpfc_workq_post_event(phba, &status, &online_compl, - LPFC_EVT_OFFLINE); - wait_for_completion(&online_compl); - if (!status) - return strlen(buf); - else - return -EIO; + return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); } static ssize_t @@ -532,10 +544,9 @@ static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, NULL); -static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR, - lpfc_board_online_show, lpfc_board_online_store); static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); +static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); static int lpfc_poll = 0; module_param(lpfc_poll, int, 0); @@ -695,12 +706,12 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " "during discovery"); /* -# lpfc_max_luns: maximum number of LUNs per target driver will support -# Value range is [1,32768]. Default value is 256. -# NOTE: The SCSI layer will scan each target for this many luns +# lpfc_max_luns: maximum allowed LUN. +# Value range is [0,65535]. Default value is 255. +# NOTE: The SCSI layer might probe all allowed LUN on some old targets. */ -LPFC_ATTR_R(max_luns, 256, 1, 32768, - "Maximum number of LUNs per target driver will support"); +LPFC_ATTR_R(max_luns, 255, 0, 65535, + "Maximum allowed LUN"); /* # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. @@ -739,8 +750,8 @@ struct class_device_attribute *lpfc_host_attrs[] = { &class_device_attr_lpfc_max_luns, &class_device_attr_nport_evt_cnt, &class_device_attr_management_version, - &class_device_attr_board_online, &class_device_attr_board_mode, + &class_device_attr_issue_reset, &class_device_attr_lpfc_poll, &class_device_attr_lpfc_poll_tmo, NULL, diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index ee22173fce43..517e9e4dd461 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -147,6 +147,7 @@ int lpfc_sli_hba_setup(struct lpfc_hba *); int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_handle_mb_event(struct lpfc_hba *); +int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4126fd87956f..b89f6cb641e6 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -648,33 +648,32 @@ lpfc_more_plogi(struct lpfc_hba * phba) } static struct lpfc_nodelist * -lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, +lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp, struct lpfc_nodelist *ndlp) { struct lpfc_nodelist *new_ndlp; - struct lpfc_dmabuf *pcmd, *prsp; uint32_t *lp; struct serv_parm *sp; uint8_t name[sizeof (struct lpfc_name)]; uint32_t rc; - pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; - prsp = (struct lpfc_dmabuf *) pcmd->list.next; lp = (uint32_t *) prsp->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); + memset(name, 0, sizeof (struct lpfc_name)); /* Now we to find out if the NPort we are logging into, matches the WWPN * we have for that ndlp. If not, we have some work to do. */ new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); - memset(name, 0, sizeof (struct lpfc_name)); - rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); - if (!rc || (new_ndlp == ndlp)) { + if (new_ndlp == ndlp) return ndlp; - } if (!new_ndlp) { + rc = + memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); + if (!rc) + return ndlp; new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); if (!new_ndlp) return ndlp; @@ -683,17 +682,21 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, } lpfc_unreg_rpi(phba, new_ndlp); - new_ndlp->nlp_prev_state = ndlp->nlp_state; new_ndlp->nlp_DID = ndlp->nlp_DID; - new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; - lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST); + new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; + new_ndlp->nlp_state = ndlp->nlp_state; + lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK); /* Move this back to NPR list */ - lpfc_unreg_rpi(phba, ndlp); - ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ - ndlp->nlp_state = NLP_STE_NPR_NODE; - lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); - + if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + } + else { + lpfc_unreg_rpi(phba, ndlp); + ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ + ndlp->nlp_state = NLP_STE_NPR_NODE; + lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); + } return new_ndlp; } @@ -703,6 +706,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, { IOCB_t *irsp; struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *prsp; int disc, rc, did, type; @@ -769,7 +773,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, } } else { /* Good status, call state machine */ - ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp); + prsp = list_entry(((struct lpfc_dmabuf *) + cmdiocb->context2)->list.next, + struct lpfc_dmabuf, list); + ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp); rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PLOGI); } @@ -3282,10 +3289,9 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba) } else lpfc_sli_release_iocbq(phba, piocb); } - if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) { - phba->els_tmofunc.expires = jiffies + HZ * timeout; - add_timer(&phba->els_tmofunc); - } + if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) + mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout); + spin_unlock_irq(phba->host->host_lock); } @@ -3442,6 +3448,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { ndlp->nlp_type |= NLP_FABRIC; } + ndlp->nlp_state = NLP_STE_UNUSED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); } phba->fc_stat.elsRcvFrame++; @@ -3463,13 +3471,14 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, rjt_err = 1; break; } + ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp); lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); break; case ELS_CMD_FLOGI: phba->fc_stat.elsRcvFLOGI++; lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_LOGO: @@ -3492,7 +3501,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, phba->fc_stat.elsRcvRSCN++; lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_ADISC: @@ -3535,28 +3544,28 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, phba->fc_stat.elsRcvLIRR++; lpfc_els_rcv_lirr(phba, elsiocb, ndlp); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_RPS: phba->fc_stat.elsRcvRPS++; lpfc_els_rcv_rps(phba, elsiocb, ndlp); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_RPL: phba->fc_stat.elsRcvRPL++; lpfc_els_rcv_rpl(phba, elsiocb, ndlp); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; case ELS_CMD_RNID: phba->fc_stat.elsRcvRNID++; lpfc_els_rcv_rnid(phba, elsiocb, ndlp); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; default: @@ -3568,7 +3577,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba, "%d:0115 Unknown ELS command x%x received from " "NPORT x%x\n", phba->brd_no, cmd, did); if (newnode) { - mempool_free( ndlp, phba->nlp_mem_pool); + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); } break; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index adb086009ae0..4d6cf990c4fc 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1084,7 +1084,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba, fc_remote_port_rolechg(rport, rport_ids.roles); if ((rport->scsi_target_id != -1) && - (rport->scsi_target_id < MAX_FCP_TARGET)) { + (rport->scsi_target_id < LPFC_MAX_TARGET)) { ndlp->nlp_sid = rport->scsi_target_id; } @@ -1313,7 +1313,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) if ((rport_add == mapped) && ((!nlp->rport) || (nlp->rport->scsi_target_id == -1) || - (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) { + (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) { nlp->nlp_state = NLP_STE_UNMAPPED_NODE; spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= NLP_TGT_NO_SCSIID; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 81755a3f7c68..ef47b824cbed 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -71,6 +71,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba) uint16_t offset = 0; static char licensed[56] = "key unlock for use with gnu public licensed code only\0"; + static int init_key = 1; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { @@ -82,10 +83,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba) phba->hba_state = LPFC_INIT_MBX_CMDS; if (lpfc_is_LC_HBA(phba->pcidev->device)) { - uint32_t *ptext = (uint32_t *) licensed; + if (init_key) { + uint32_t *ptext = (uint32_t *) licensed; - for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) - *ptext = cpu_to_be32(*ptext); + for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) + *ptext = cpu_to_be32(*ptext); + init_key = 0; + } lpfc_read_nv(phba, pmb); memset((char*)mb->un.varRDnvp.rsvd3, 0, @@ -405,19 +409,26 @@ lpfc_config_port_post(struct lpfc_hba * phba) } /* MBOX buffer will be freed in mbox compl */ - i = 0; + return (0); +} + +static int +lpfc_discovery_wait(struct lpfc_hba *phba) +{ + int i = 0; + while ((phba->hba_state != LPFC_HBA_READY) || (phba->num_disc_nodes) || (phba->fc_prli_sent) || ((phba->fc_map_cnt == 0) && (i<2)) || - (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { + (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) { /* Check every second for 30 retries. */ i++; if (i > 30) { - break; + return -ETIMEDOUT; } if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { /* The link is down. Set linkdown timeout */ - break; + return -ETIMEDOUT; } /* Delay for 1 second to give discovery time to complete. */ @@ -425,12 +436,7 @@ lpfc_config_port_post(struct lpfc_hba * phba) } - /* Since num_disc_nodes keys off of PLOGI, delay a bit to let - * any potential PRLIs to flush thru the SLI sub-system. - */ - msleep(50); - - return (0); + return 0; } /************************************************************************/ @@ -1339,7 +1345,8 @@ lpfc_offline(struct lpfc_hba * phba) struct lpfc_sli_ring *pring; struct lpfc_sli *psli; unsigned long iflag; - int i = 0; + int i; + int cnt = 0; if (!phba) return 0; @@ -1348,17 +1355,27 @@ lpfc_offline(struct lpfc_hba * phba) return 0; psli = &phba->sli; - pring = &psli->ring[psli->fcp_ring]; lpfc_linkdown(phba); + lpfc_sli_flush_mbox_queue(phba); - /* The linkdown event takes 30 seconds to timeout. */ - while (pring->txcmplq_cnt) { - mdelay(10); - if (i++ > 3000) - break; + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->ring[i]; + /* The linkdown event takes 30 seconds to timeout. */ + while (pring->txcmplq_cnt) { + mdelay(10); + if (cnt++ > 3000) { + lpfc_printf_log(phba, + KERN_WARNING, LOG_INIT, + "%d:0466 Outstanding IO when " + "bringing Adapter offline\n", + phba->brd_no); + break; + } + } } + /* stop all timers associated with this hba */ lpfc_stop_timer(phba); phba->work_hba_events = 0; @@ -1639,6 +1656,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_irq; } + lpfc_discovery_wait(phba); + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { spin_lock_irq(phba->host->host_lock); lpfc_poll_start_timer(phba); diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 07017658ac56..066292d3995a 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -133,6 +133,11 @@ lpfc_mem_free(struct lpfc_hba * phba) pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); pci_pool_destroy(phba->lpfc_mbuf_pool); + + /* Free the iocb lookup array */ + kfree(psli->iocbq_lookup); + psli->iocbq_lookup = NULL; + } void * diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 27d60ad897cd..bd0b0e293d63 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1110,6 +1110,17 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba, phba->brd_no, did, mb->mbxStatus, phba->hba_state); + /* + * If RegLogin failed due to lack of HBA resources do not + * retry discovery. + */ + if (mb->mbxStatus == MBXERR_RPI_FULL) { + ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; + ndlp->nlp_state = NLP_STE_UNUSED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); + return ndlp->nlp_state; + } + /* Put ndlp in npr list set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(phba->host->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index aea1ee472f3d..a760a44173df 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -153,22 +153,6 @@ static void lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) { unsigned long iflag = 0; - /* - * There are only two special cases to consider. (1) the scsi command - * requested scatter-gather usage or (2) the scsi command allocated - * a request buffer, but did not request use_sg. There is a third - * case, but it does not require resource deallocation. - */ - if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { - dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, - psb->seg_cnt, psb->pCmd->sc_data_direction); - } else { - if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { - dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, - psb->pCmd->request_bufflen, - psb->pCmd->sc_data_direction); - } - } spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); psb->pCmd = NULL; @@ -282,6 +266,27 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) } static void +lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) +{ + /* + * There are only two special cases to consider. (1) the scsi command + * requested scatter-gather usage or (2) the scsi command allocated + * a request buffer, but did not request use_sg. There is a third + * case, but it does not require resource deallocation. + */ + if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { + dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, + psb->seg_cnt, psb->pCmd->sc_data_direction); + } else { + if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { + dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, + psb->pCmd->request_bufflen, + psb->pCmd->sc_data_direction); + } + } +} + +static void lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) { struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; @@ -454,6 +459,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, cmd->scsi_done(cmd); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); return; } @@ -511,6 +517,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } } + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); } @@ -609,6 +616,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, static int lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, + unsigned int lun, uint8_t task_mgmt_cmd) { struct lpfc_sli *psli; @@ -627,8 +635,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, piocb = &piocbq->iocb; fcp_cmnd = lpfc_cmd->fcp_cmnd; - int_to_scsilun(lpfc_cmd->pCmd->device->lun, - &lpfc_cmd->fcp_cmnd->fcp_lun); + int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); fcp_cmnd->fcpCntl2 = task_mgmt_cmd; piocb->ulpCommand = CMD_FCP_ICMND64_CR; @@ -655,14 +662,16 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, static int lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, - unsigned tgt_id, struct lpfc_rport_data *rdata) + unsigned tgt_id, unsigned int lun, + struct lpfc_rport_data *rdata) { struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; int ret; lpfc_cmd->rdata = rdata; - ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); + ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, + FCP_TARGET_RESET); if (!ret) return FAILED; @@ -822,6 +831,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) return 0; out_host_busy_free_buf: + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -969,12 +979,12 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) if (lpfc_cmd == NULL) goto out; - lpfc_cmd->pCmd = cmnd; lpfc_cmd->timeout = 60; lpfc_cmd->scsi_hba = phba; lpfc_cmd->rdata = rdata; - ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); + ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, + FCP_LUN_RESET); if (!ret) goto out_free_scsi_buf; @@ -1001,7 +1011,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) cmd_status = iocbqrsp->iocb.ulpStatus; lpfc_sli_release_iocbq(phba, iocbqrsp); - lpfc_release_scsi_buf(phba, lpfc_cmd); /* * All outstanding txcmplq I/Os should have been aborted by the device. @@ -1040,6 +1049,8 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) } out_free_scsi_buf: + lpfc_release_scsi_buf(phba, lpfc_cmd); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0713 SCSI layer issued LUN reset (%d, %d) " "Data: x%x x%x x%x\n", @@ -1070,7 +1081,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) /* The lpfc_cmd storage is reused. Set all loop invariants. */ lpfc_cmd->timeout = 60; - lpfc_cmd->pCmd = cmnd; lpfc_cmd->scsi_hba = phba; /* @@ -1078,7 +1088,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) * targets known to the driver. Should any target reset * fail, this routine returns failure to the midlayer. */ - for (i = 0; i < MAX_FCP_TARGET; i++) { + for (i = 0; i < LPFC_MAX_TARGET; i++) { /* Search the mapped list for this target ID */ match = 0; list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { @@ -1090,8 +1100,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) if (!match) continue; - ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, - i, ndlp->rport->dd_data); + ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, + ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0713 Bus Reset on target %d failed\n", diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index bb69a7a1ec59..350a625fa224 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -191,35 +191,12 @@ static int lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) { - uint16_t iotag; - list_add_tail(&piocb->list, &pring->txcmplq); pring->txcmplq_cnt++; if (unlikely(pring->ringno == LPFC_ELS_RING)) mod_timer(&phba->els_tmofunc, jiffies + HZ * (phba->fc_ratov << 1)); - if (pring->fast_lookup) { - /* Setup fast lookup based on iotag for completion */ - iotag = piocb->iocb.ulpIoTag; - if (iotag && (iotag < pring->fast_iotag)) - *(pring->fast_lookup + iotag) = piocb; - else { - - /* Cmd ring <ringno> put: iotag <iotag> greater then - configured max <fast_iotag> wd0 <icmd> */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_SLI, - "%d:0316 Cmd ring %d put: iotag x%x " - "greater then configured max x%x " - "wd0 x%x\n", - phba->brd_no, - pring->ringno, iotag, - pring->fast_iotag, - *(((uint32_t *)(&piocb->iocb)) + 7)); - } - } return (0); } @@ -601,7 +578,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba) /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus <status> */ lpfc_printf_log(phba, - KERN_ERR, + KERN_WARNING, LOG_MBOX | LOG_SLI, "%d:0304 Stray Mailbox Interrupt " "mbxCommand x%x mbxStatus x%x\n", @@ -1570,8 +1547,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) void lpfc_reset_barrier(struct lpfc_hba * phba) { - uint32_t * resp_buf; - uint32_t * mbox_buf; + uint32_t __iomem *resp_buf; + uint32_t __iomem *mbox_buf; volatile uint32_t mbox; uint32_t hc_copy; int i; @@ -1587,7 +1564,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba) * Tell the other part of the chip to suspend temporarily all * its DMA activity. */ - resp_buf = (uint32_t *)phba->MBslimaddr; + resp_buf = phba->MBslimaddr; /* Disable the error attention */ hc_copy = readl(phba->HCregaddr); @@ -1605,7 +1582,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba) ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); - mbox_buf = (uint32_t *)phba->MBslimaddr; + mbox_buf = phba->MBslimaddr; writel(mbox, mbox_buf); for (i = 0; @@ -1805,7 +1782,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) skip_post = 0; word0 = 0; /* This is really setting up word1 */ } - to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t); + to_slim = phba->MBslimaddr + sizeof (uint32_t); writel(*(uint32_t *) mb, to_slim); readl(to_slim); /* flush */ @@ -2659,8 +2636,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba) INIT_LIST_HEAD(&(pring->txq)); - kfree(pring->fast_lookup); - pring->fast_lookup = NULL; } spin_unlock_irqrestore(phba->host->host_lock, flags); @@ -3110,6 +3085,24 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, return retval; } +int +lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) +{ + int i = 0; + + while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) { + if (i++ > LPFC_MBOX_TMO * 1000) + return 1; + + if (lpfc_sli_handle_mb_event(phba) == 0) + i = 0; + + msleep(1); + } + + return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; +} + irqreturn_t lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) { diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index a52d6c6cf083..d8ef0d2894d4 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -135,8 +135,6 @@ struct lpfc_sli_ring { uint32_t fast_iotag; /* max fastlookup based iotag */ uint32_t iotag_ctr; /* keeps track of the next iotag to use */ uint32_t iotag_max; /* max iotag value to use */ - struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by - iotag */ struct list_head txq; uint16_t txq_cnt; /* current length of queue */ uint16_t txq_max; /* max length */ diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 6b737568b831..10e89c6ae823 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.6" +#define LPFC_DRIVER_VERSION "8.1.7" #define LPFC_DRIVER_NAME "lpfc" diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index 93edaa8696cf..89ef34df5a1d 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c @@ -378,7 +378,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd) int nseg; total = 0; - scl = (struct scatterlist *) cmd->buffer; + scl = (struct scatterlist *) cmd->request_buffer; nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, cmd->sc_data_direction); for (i = 0; i < nseg; ++i) { diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index c88717727be8..5572981a9f92 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1268,7 +1268,7 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd) if (cmd->use_sg > 0) { int nseg; total = 0; - scl = (struct scatterlist *) cmd->buffer; + scl = (struct scatterlist *) cmd->request_buffer; off = ms->data_ptr; nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg, cmd->sc_data_direction); diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c index 7abf64d1bfc9..0bd9c60e6455 100644 --- a/drivers/scsi/pluto.c +++ b/drivers/scsi/pluto.c @@ -169,8 +169,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt) SCpnt->request->rq_status = RQ_SCSI_BUSY; SCpnt->done = pluto_detect_done; - SCpnt->bufflen = 256; - SCpnt->buffer = fcs[i].inquiry; SCpnt->request_bufflen = 256; SCpnt->request_buffer = fcs[i].inquiry; PLD(("set up %d %08lx\n", i, (long)SCpnt)) diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 69e0551a81d2..5b2f0741a55b 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -874,7 +874,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, if (Cmnd->use_sg) { int sg_count; - sg = (struct scatterlist *) Cmnd->buffer; + sg = (struct scatterlist *) Cmnd->request_buffer; sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction); ds = cmd->dataseg; @@ -1278,7 +1278,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti) if (Cmnd->use_sg) { sbus_unmap_sg(qpti->sdev, - (struct scatterlist *)Cmnd->buffer, + (struct scatterlist *)Cmnd->request_buffer, Cmnd->use_sg, Cmnd->sc_data_direction); } else { diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 64631bd38952..4776f4e55839 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c @@ -269,8 +269,15 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = { { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_20619 }, +/* TODO: remove all associated board_20771 code, as it completely + * duplicates board_2037x code, unless reason for separation can be + * divined. + */ +#if 0 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_20771 }, +#endif + { } /* terminate list */ }; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 2ab7df0dcfe8..b332caddd5b3 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -346,7 +346,7 @@ void scsi_log_send(struct scsi_cmnd *cmd) if (level > 3) { printk(KERN_INFO "buffer = 0x%p, bufflen = %d," " done = 0x%p, queuecommand 0x%p\n", - cmd->buffer, cmd->bufflen, + cmd->request_buffer, cmd->request_bufflen, cmd->done, sdev->host->hostt->queuecommand); @@ -661,11 +661,6 @@ void __scsi_done(struct scsi_cmnd *cmd) */ int scsi_retry_command(struct scsi_cmnd *cmd) { - /* - * Restore the SCSI command state. - */ - scsi_setup_cmd_retry(cmd); - /* * Zero the sense information from the last time we tried * this command. @@ -711,10 +706,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd) "Notifying upper driver of completion " "(result %x)\n", cmd->result)); - /* - * We can get here with use_sg=0, causing a panic in the upper level - */ - cmd->use_sg = cmd->old_use_sg; cmd->done(cmd); } EXPORT_SYMBOL(scsi_finish_command); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 9c63b00773c4..a80303c6b3fd 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -286,7 +286,7 @@ static int inquiry_evpd_83(unsigned char * arr, int target_dev_id, int dev_id_num, const char * dev_id_str, int dev_id_str_len); static int inquiry_evpd_88(unsigned char * arr, int target_dev_id); -static void do_create_driverfs_files(void); +static int do_create_driverfs_files(void); static void do_remove_driverfs_files(void); static int sdebug_add_adapter(void); @@ -2487,19 +2487,22 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp, DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, sdebug_add_host_store); -static void do_create_driverfs_files(void) +static int do_create_driverfs_files(void) { - driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); - driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); + int ret; + + ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); + return ret; } static void do_remove_driverfs_files(void) @@ -2522,6 +2525,7 @@ static int __init scsi_debug_init(void) unsigned int sz; int host_to_add; int k; + int ret; if (scsi_debug_dev_size_mb < 1) scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ @@ -2560,12 +2564,32 @@ static int __init scsi_debug_init(void) if (scsi_debug_num_parts > 0) sdebug_build_parts(fake_storep); - init_all_queued(); + ret = device_register(&pseudo_primary); + if (ret < 0) { + printk(KERN_WARNING "scsi_debug: device_register error: %d\n", + ret); + goto free_vm; + } + ret = bus_register(&pseudo_lld_bus); + if (ret < 0) { + printk(KERN_WARNING "scsi_debug: bus_register error: %d\n", + ret); + goto dev_unreg; + } + ret = driver_register(&sdebug_driverfs_driver); + if (ret < 0) { + printk(KERN_WARNING "scsi_debug: driver_register error: %d\n", + ret); + goto bus_unreg; + } + ret = do_create_driverfs_files(); + if (ret < 0) { + printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n", + ret); + goto del_files; + } - device_register(&pseudo_primary); - bus_register(&pseudo_lld_bus); - driver_register(&sdebug_driverfs_driver); - do_create_driverfs_files(); + init_all_queued(); sdebug_driver_template.proc_name = (char *)sdebug_proc_name; @@ -2585,6 +2609,18 @@ static int __init scsi_debug_init(void) scsi_debug_add_host); } return 0; + +del_files: + do_remove_driverfs_files(); + driver_unregister(&sdebug_driverfs_driver); +bus_unreg: + bus_unregister(&pseudo_lld_bus); +dev_unreg: + device_unregister(&pseudo_primary); +free_vm: + vfree(fake_storep); + + return ret; } static void __exit scsi_debug_exit(void) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 6683d596234a..6a5b731bd5ba 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -460,19 +460,67 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) * Return value: * SUCCESS or FAILED or NEEDS_RETRY **/ -static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) +static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense) { struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; + int old_result = scmd->result; DECLARE_COMPLETION(done); unsigned long timeleft; unsigned long flags; + unsigned char old_cmnd[MAX_COMMAND_SIZE]; + enum dma_data_direction old_data_direction; + unsigned short old_use_sg; + unsigned char old_cmd_len; + unsigned old_bufflen; + void *old_buffer; int rtn; + /* + * We need saved copies of a number of fields - this is because + * error handling may need to overwrite these with different values + * to run different commands, and once error handling is complete, + * we will need to restore these values prior to running the actual + * command. + */ + old_buffer = scmd->request_buffer; + old_bufflen = scmd->request_bufflen; + memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd)); + old_data_direction = scmd->sc_data_direction; + old_cmd_len = scmd->cmd_len; + old_use_sg = scmd->use_sg; + + if (copy_sense) { + int gfp_mask = GFP_ATOMIC; + + if (shost->hostt->unchecked_isa_dma) + gfp_mask |= __GFP_DMA; + + scmd->sc_data_direction = DMA_FROM_DEVICE; + scmd->request_bufflen = 252; + scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask); + if (!scmd->request_buffer) + return FAILED; + } else { + scmd->request_buffer = NULL; + scmd->request_bufflen = 0; + scmd->sc_data_direction = DMA_NONE; + } + + scmd->underflow = 0; + scmd->use_sg = 0; + scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); + if (sdev->scsi_level <= SCSI_2) scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | (sdev->lun << 5 & 0xe0); + /* + * Zero the sense buffer. The scsi spec mandates that any + * untransferred sense data should be interpreted as being zero. + */ + memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); + shost->eh_action = &done; spin_lock_irqsave(shost->host_lock, flags); @@ -522,6 +570,29 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) rtn = FAILED; } + + /* + * Last chance to have valid sense data. + */ + if (copy_sense) { + if (!SCSI_SENSE_VALID(scmd)) { + memcpy(scmd->sense_buffer, scmd->request_buffer, + sizeof(scmd->sense_buffer)); + } + kfree(scmd->request_buffer); + } + + + /* + * Restore original data + */ + scmd->request_buffer = old_buffer; + scmd->request_bufflen = old_bufflen; + memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd)); + scmd->sc_data_direction = old_data_direction; + scmd->cmd_len = old_cmd_len; + scmd->use_sg = old_use_sg; + scmd->result = old_result; return rtn; } @@ -537,56 +608,10 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) static int scsi_request_sense(struct scsi_cmnd *scmd) { static unsigned char generic_sense[6] = - {REQUEST_SENSE, 0, 0, 0, 252, 0}; - unsigned char *scsi_result; - int saved_result; - int rtn; + {REQUEST_SENSE, 0, 0, 0, 252, 0}; memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); - - scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0)); - - - if (unlikely(!scsi_result)) { - printk(KERN_ERR "%s: cannot allocate scsi_result.\n", - __FUNCTION__); - return FAILED; - } - - /* - * zero the sense buffer. some host adapters automatically always - * request sense, so it is not a good idea that - * scmd->request_buffer and scmd->sense_buffer point to the same - * address (db). 0 is not a valid sense code. - */ - memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); - memset(scsi_result, 0, 252); - - saved_result = scmd->result; - scmd->request_buffer = scsi_result; - scmd->request_bufflen = 252; - scmd->use_sg = 0; - scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); - scmd->sc_data_direction = DMA_FROM_DEVICE; - scmd->underflow = 0; - - rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); - - /* last chance to have valid sense data */ - if(!SCSI_SENSE_VALID(scmd)) { - memcpy(scmd->sense_buffer, scmd->request_buffer, - sizeof(scmd->sense_buffer)); - } - - kfree(scsi_result); - - /* - * when we eventually call scsi_finish, we really wish to complete - * the original request, so let's restore the original data. (db) - */ - scsi_setup_cmd_retry(scmd); - scmd->result = saved_result; - return rtn; + return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1); } /** @@ -605,12 +630,6 @@ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) { scmd->device->host->host_failed--; scmd->eh_eflags = 0; - - /* - * set this back so that the upper level can correctly free up - * things. - */ - scsi_setup_cmd_retry(scmd); list_move_tail(&scmd->eh_entry, done_q); } EXPORT_SYMBOL(scsi_eh_finish_cmd); @@ -715,47 +734,26 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd) { static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; int retry_cnt = 1, rtn; - int saved_result; retry_tur: memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); - /* - * zero the sense buffer. the scsi spec mandates that any - * untransferred sense data should be interpreted as being zero. - */ - memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); - - saved_result = scmd->result; - scmd->request_buffer = NULL; - scmd->request_bufflen = 0; - scmd->use_sg = 0; - scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); - scmd->underflow = 0; - scmd->sc_data_direction = DMA_NONE; - rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); + rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0); - /* - * when we eventually call scsi_finish, we really wish to complete - * the original request, so let's restore the original data. (db) - */ - scsi_setup_cmd_retry(scmd); - scmd->result = saved_result; - - /* - * hey, we are done. let's look to see what happened. - */ SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", __FUNCTION__, scmd, rtn)); - if (rtn == SUCCESS) - return 0; - else if (rtn == NEEDS_RETRY) { + + switch (rtn) { + case NEEDS_RETRY: if (retry_cnt--) goto retry_tur; + /*FALLTHRU*/ + case SUCCESS: return 0; + default: + return 1; } - return 1; } /** @@ -837,44 +835,16 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) static int scsi_eh_try_stu(struct scsi_cmnd *scmd) { static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; - int rtn; - int saved_result; - if (!scmd->device->allow_restart) - return 1; - - memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); - - /* - * zero the sense buffer. the scsi spec mandates that any - * untransferred sense data should be interpreted as being zero. - */ - memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); - - saved_result = scmd->result; - scmd->request_buffer = NULL; - scmd->request_bufflen = 0; - scmd->use_sg = 0; - scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); - scmd->underflow = 0; - scmd->sc_data_direction = DMA_NONE; + if (scmd->device->allow_restart) { + int rtn; - rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); - - /* - * when we eventually call scsi_finish, we really wish to complete - * the original request, so let's restore the original data. (db) - */ - scsi_setup_cmd_retry(scmd); - scmd->result = saved_result; + memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); + rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0); + if (rtn == SUCCESS) + return 0; + } - /* - * hey, we are done. let's look to see what happened. - */ - SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", - __FUNCTION__, scmd, rtn)); - if (rtn == SUCCESS) - return 0; return 1; } @@ -1684,8 +1654,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag) scmd->scsi_done = scsi_reset_provider_done_command; scmd->done = NULL; - scmd->buffer = NULL; - scmd->bufflen = 0; scmd->request_buffer = NULL; scmd->request_bufflen = 0; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index a89c4115cfba..32293f451669 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, sshdr.asc, sshdr.ascq); break; case NOT_READY: /* This happens if there is no disc in drive */ - if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { - printk(KERN_INFO "Device not ready. Make sure" - " there is a disc in the drive.\n"); + if (sdev->removable) break; - } case UNIT_ATTENTION: if (sdev->removable) { sdev->changed = 1; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 08af9aae7df3..077c1c691210 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -436,60 +436,16 @@ EXPORT_SYMBOL_GPL(scsi_execute_async); * * Arguments: cmd - command that is ready to be queued. * - * Returns: Nothing - * * Notes: This function has the job of initializing a number of * fields related to error handling. Typically this will * be called once for each command, as required. */ -static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) +static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) { cmd->serial_number = 0; - memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); - if (cmd->cmd_len == 0) cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); - - /* - * We need saved copies of a number of fields - this is because - * error handling may need to overwrite these with different values - * to run different commands, and once error handling is complete, - * we will need to restore these values prior to running the actual - * command. - */ - cmd->old_use_sg = cmd->use_sg; - cmd->old_cmd_len = cmd->cmd_len; - cmd->sc_old_data_direction = cmd->sc_data_direction; - cmd->old_underflow = cmd->underflow; - memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd)); - cmd->buffer = cmd->request_buffer; - cmd->bufflen = cmd->request_bufflen; - - return 1; -} - -/* - * Function: scsi_setup_cmd_retry() - * - * Purpose: Restore the command state for a retry - * - * Arguments: cmd - command to be restored - * - * Returns: Nothing - * - * Notes: Immediately prior to retrying a command, we need - * to restore certain fields that we saved above. - */ -void scsi_setup_cmd_retry(struct scsi_cmnd *cmd) -{ - memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd)); - cmd->request_buffer = cmd->buffer; - cmd->request_bufflen = cmd->bufflen; - cmd->use_sg = cmd->old_use_sg; - cmd->cmd_len = cmd->old_cmd_len; - cmd->sc_data_direction = cmd->sc_old_data_direction; - cmd->underflow = cmd->old_underflow; } void scsi_device_unbusy(struct scsi_device *sdev) @@ -807,22 +763,13 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index) */ static void scsi_release_buffers(struct scsi_cmnd *cmd) { - struct request *req = cmd->request; - - /* - * Free up any indirection buffers we allocated for DMA purposes. - */ if (cmd->use_sg) scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); - else if (cmd->request_buffer != req->buffer) - kfree(cmd->request_buffer); /* * Zero these out. They now point to freed memory, and it is * dangerous to hang onto the pointers. */ - cmd->buffer = NULL; - cmd->bufflen = 0; cmd->request_buffer = NULL; cmd->request_bufflen = 0; } @@ -858,7 +805,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd) void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; - int this_count = cmd->bufflen; + int this_count = cmd->request_bufflen; request_queue_t *q = cmd->device->request_queue; struct request *req = cmd->request; int clear_errors = 1; @@ -866,28 +813,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) int sense_valid = 0; int sense_deferred = 0; - /* - * Free up any indirection buffers we allocated for DMA purposes. - * For the case of a READ, we need to copy the data out of the - * bounce buffer and into the real buffer. - */ - if (cmd->use_sg) - scsi_free_sgtable(cmd->buffer, cmd->sglist_len); - else if (cmd->buffer != req->buffer) { - if (rq_data_dir(req) == READ) { - unsigned long flags; - char *to = bio_kmap_irq(req->bio, &flags); - memcpy(to, cmd->buffer, cmd->bufflen); - bio_kunmap_irq(to, &flags); - } - kfree(cmd->buffer); - } + scsi_release_buffers(cmd); if (result) { sense_valid = scsi_command_normalize_sense(cmd, &sshdr); if (sense_valid) sense_deferred = scsi_sense_is_deferred(&sshdr); } + if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ req->errors = result; if (result) { @@ -908,15 +841,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } /* - * Zero these out. They now point to freed memory, and it is - * dangerous to hang onto the pointers. - */ - cmd->buffer = NULL; - cmd->bufflen = 0; - cmd->request_buffer = NULL; - cmd->request_bufflen = 0; - - /* * Next deal with any sectors which we were able to correctly * handle. */ @@ -1012,7 +936,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (!(req->flags & REQ_QUIET)) { scmd_printk(KERN_INFO, cmd, "Volume overflow, CDB: "); - __scsi_print_command(cmd->data_cmnd); + __scsi_print_command(cmd->cmnd); scsi_print_sense("", cmd); } /* See SSC3rXX or current. */ @@ -1143,7 +1067,7 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd) * successfully. Since this is a REQ_BLOCK_PC command the * caller should check the request's errors value */ - scsi_io_completion(cmd, cmd->bufflen); + scsi_io_completion(cmd, cmd->request_bufflen); } static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index e2fbe9a9d5a9..ae24c85aaeea 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -57,7 +57,6 @@ extern int scsi_eh_scmd_add(struct scsi_cmnd *, int); /* scsi_lib.c */ extern int scsi_maybe_unblock_host(struct scsi_device *sdev); -extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd); extern void scsi_device_unbusy(struct scsi_device *sdev); extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); extern void scsi_next_command(struct scsi_cmnd *cmd); diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index dd075627e605..5a625c3fddae 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -41,6 +41,7 @@ struct sas_host_attrs { struct mutex lock; u32 next_target_id; u32 next_expander_id; + int next_port_id; }; #define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) @@ -146,6 +147,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev, mutex_init(&sas_host->lock); sas_host->next_target_id = 0; sas_host->next_expander_id = 0; + sas_host->next_port_id = 0; return 0; } @@ -327,7 +329,7 @@ sas_phy_protocol_attr(identify.target_port_protocols, sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", unsigned long long); sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); -//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8); +//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int); sas_phy_linkspeed_attr(negotiated_linkrate); sas_phy_linkspeed_attr(minimum_linkrate_hw); sas_phy_linkspeed_attr(minimum_linkrate); @@ -590,6 +592,38 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id) } EXPORT_SYMBOL(sas_port_alloc); +/** sas_port_alloc_num - allocate and initialize a SAS port structure + * + * @parent: parent device + * + * Allocates a SAS port structure and a number to go with it. This + * interface is really for adapters where the port number has no + * meansing, so the sas class should manage them. It will be added to + * the device tree below the device specified by @parent which must be + * either a Scsi_Host or a sas_expander_device. + * + * Returns %NULL on error + */ +struct sas_port *sas_port_alloc_num(struct device *parent) +{ + int index; + struct Scsi_Host *shost = dev_to_shost(parent); + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + + /* FIXME: use idr for this eventually */ + mutex_lock(&sas_host->lock); + if (scsi_is_sas_expander_device(parent)) { + struct sas_rphy *rphy = dev_to_rphy(parent); + struct sas_expander_device *exp = rphy_to_expander_device(rphy); + + index = exp->next_port_id++; + } else + index = sas_host->next_port_id++; + mutex_unlock(&sas_host->lock); + return sas_port_alloc(parent, index); +} +EXPORT_SYMBOL(sas_port_alloc_num); + /** * sas_port_add - add a SAS port to the device hierarchy * @@ -658,6 +692,13 @@ void sas_port_delete(struct sas_port *port) } mutex_unlock(&port->phy_list_mutex); + if (port->is_backlink) { + struct device *parent = port->dev.parent; + + sysfs_remove_link(&port->dev.kobj, parent->bus_id); + port->is_backlink = 0; + } + transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); @@ -733,6 +774,19 @@ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy) } EXPORT_SYMBOL(sas_port_delete_phy); +void sas_port_mark_backlink(struct sas_port *port) +{ + struct device *parent = port->dev.parent->parent->parent; + + if (port->is_backlink) + return; + port->is_backlink = 1; + sysfs_create_link(&port->dev.kobj, &parent->kobj, + parent->bus_id); + +} +EXPORT_SYMBOL(sas_port_mark_backlink); + /* * SAS remote PHY attributes. */ @@ -1140,7 +1194,7 @@ int sas_rphy_add(struct sas_rphy *rphy) if (identify->device_type == SAS_END_DEVICE && rphy->scsi_target_id != -1) { - scsi_scan_target(&rphy->dev, parent->port_identifier, + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, ~0, 0); } @@ -1242,15 +1296,13 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel, mutex_lock(&sas_host->lock); list_for_each_entry(rphy, &sas_host->rphy_list, list) { - struct sas_port *parent = dev_to_sas_port(rphy->dev.parent); - if (rphy->identify.device_type != SAS_END_DEVICE || rphy->scsi_target_id == -1) continue; - if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && + if ((channel == SCAN_WILD_CARD || channel == 0) && (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { - scsi_scan_target(&rphy->dev, parent->port_identifier, + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 1); } } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 3225d31449e1..98bd3aab9739 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -502,8 +502,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt) SCpnt->cmnd[4] = (unsigned char) this_count; SCpnt->cmnd[5] = 0; } - SCpnt->request_bufflen = SCpnt->bufflen = - this_count * sdp->sector_size; + SCpnt->request_bufflen = this_count * sdp->sector_size; /* * We shouldn't disconnect in the middle of a sector, so with a dumb diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c index 3f312a84c6a7..2679ea8bff1a 100644 --- a/drivers/scsi/seagate.c +++ b/drivers/scsi/seagate.c @@ -1002,7 +1002,7 @@ connect_loop: } #endif - buffer = (struct scatterlist *) SCint->buffer; + buffer = (struct scatterlist *) SCint->request_buffer; len = buffer->length; data = page_address(buffer->page) + buffer->offset; } else { diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index fd94408577e5..fae6e95a6298 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -360,7 +360,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt) "mismatch count %d, bytes %d\n", size, SCpnt->request_bufflen); if (SCpnt->request_bufflen > size) - SCpnt->request_bufflen = SCpnt->bufflen = size; + SCpnt->request_bufflen = size; } } @@ -387,8 +387,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt) if (this_count > 0xffff) { this_count = 0xffff; - SCpnt->request_bufflen = SCpnt->bufflen = - this_count * s_size; + SCpnt->request_bufflen = this_count * s_size; } SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 756ceb93ddc8..7f669b600677 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -368,7 +368,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); if (cmdstatp->have_sense) - __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); + __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); } ) /* end DEB */ if (!debugging) { /* Abnormal conditions for tape */ if (!cmdstatp->have_sense) @@ -384,9 +384,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) scode != VOLUME_OVERFLOW && SRpnt->cmd[0] != MODE_SENSE && SRpnt->cmd[0] != TEST_UNIT_READY) { - printk(KERN_WARNING "%s: Error with sense data: ", name); - __scsi_print_sense("st", SRpnt->sense, - SCSI_SENSE_BUFFERSIZE); + + __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); } } diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 2ebe0d663899..2f8073b73bf3 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c @@ -517,7 +517,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd) */ if (cmd->use_sg) { - cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; + cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; cmd->SCp.buffers_residual = cmd->use_sg - 1; cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 1f328cae5c05..6b60536ac92b 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -347,7 +347,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) { int sz = sp->use_sg - 1; - struct scatterlist *sg = (struct scatterlist *)sp->buffer; + struct scatterlist *sg = (struct scatterlist *)sp->request_buffer; while(sz >= 0) { dvma_unmap((char *)sg[sz].dma_address); diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index 680f38ab60d8..2083454db511 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c @@ -373,7 +373,7 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd, */ if (cmd->use_sg) { - cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; + cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; cmd->SCp.buffers_residual = cmd->use_sg - 1; cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset; diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index 0dbd4df44c05..dc673e1b6fd9 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c @@ -1047,12 +1047,13 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id * up = &sunsab_ports[inst * 2]; err = sunsab_init_one(&up[0], op, - sizeof(union sab82532_async_regs), + 0, (inst * 2) + 0); if (err) return err; - err = sunsab_init_one(&up[0], op, 0, + err = sunsab_init_one(&up[1], op, + sizeof(union sab82532_async_regs), (inst * 2) + 1); if (err) { of_iounmap(up[0].port.membase, @@ -1117,7 +1118,7 @@ static int __init sunsab_init(void) int err; num_channels = 0; - for_each_node_by_name(dp, "su") + for_each_node_by_name(dp, "se") num_channels += 2; for_each_node_by_name(dp, "serial") { if (of_device_is_compatible(dp, "sab82532")) diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 93bdaa3169fc..d3a5aeee73a3 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -1200,6 +1200,11 @@ static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up) if (up->port.type == PORT_UNKNOWN) return -ENODEV; + printk("%s: %s port at %lx, irq %u\n", + to_of_device(up->port.dev)->node->full_name, + (up->su_type == SU_PORT_KBD) ? "Keyboard" : "Mouse", + up->port.mapbase, up->port.irq); + #ifdef CONFIG_SERIO serio = &up->serio; serio->port_data = up; diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index a1456d9352cb..47bc3d57e019 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c @@ -68,9 +68,6 @@ static int num_sunzilog; #define NUM_SUNZILOG num_sunzilog #define NUM_CHANNELS (NUM_SUNZILOG * 2) -#define KEYBOARD_LINE 0x2 -#define MOUSE_LINE 0x3 - #define ZS_CLOCK 4915200 /* Zilog input clock rate. */ #define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */ @@ -1225,12 +1222,10 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe { int baud, brg; - if (channel == KEYBOARD_LINE) { - up->flags |= SUNZILOG_FLAG_CONS_KEYB; + if (up->flags & SUNZILOG_FLAG_CONS_KEYB) { up->cflag = B1200 | CS8 | CLOCAL | CREAD; baud = 1200; } else { - up->flags |= SUNZILOG_FLAG_CONS_MOUSE; up->cflag = B4800 | CS8 | CLOCAL | CREAD; baud = 4800; } @@ -1243,14 +1238,14 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe } #ifdef CONFIG_SERIO -static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int channel) +static void __init sunzilog_register_serio(struct uart_sunzilog_port *up) { struct serio *serio = &up->serio; serio->port_data = up; serio->id.type = SERIO_RS232; - if (channel == KEYBOARD_LINE) { + if (up->flags & SUNZILOG_FLAG_CONS_KEYB) { serio->id.proto = SERIO_SUNKBD; strlcpy(serio->name, "zskbd", sizeof(serio->name)); } else { @@ -1259,7 +1254,8 @@ static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int ch strlcpy(serio->name, "zsms", sizeof(serio->name)); } strlcpy(serio->phys, - (channel == KEYBOARD_LINE ? "zs/serio0" : "zs/serio1"), + ((up->flags & SUNZILOG_FLAG_CONS_KEYB) ? + "zs/serio0" : "zs/serio1"), sizeof(serio->phys)); serio->write = sunzilog_serio_write; @@ -1286,8 +1282,8 @@ static void __init sunzilog_init_hw(struct uart_sunzilog_port *up) (void) read_zsreg(channel, R0); } - if (up->port.line == KEYBOARD_LINE || - up->port.line == MOUSE_LINE) { + if (up->flags & (SUNZILOG_FLAG_CONS_KEYB | + SUNZILOG_FLAG_CONS_MOUSE)) { sunzilog_init_kbdms(up, up->port.line); up->curregs[R9] |= (NV | MIE); write_zsreg(channel, R9, up->curregs[R9]); @@ -1313,37 +1309,26 @@ static void __init sunzilog_init_hw(struct uart_sunzilog_port *up) spin_unlock_irqrestore(&up->port.lock, flags); #ifdef CONFIG_SERIO - if (up->port.line == KEYBOARD_LINE || up->port.line == MOUSE_LINE) - sunzilog_register_serio(up, up->port.line); + if (up->flags & (SUNZILOG_FLAG_CONS_KEYB | + SUNZILOG_FLAG_CONS_MOUSE)) + sunzilog_register_serio(up); #endif } -static int __devinit zs_get_instance(struct device_node *dp) -{ - int ret; - - ret = of_getintprop_default(dp, "slave", -1); - if (ret != -1) - return ret; - - if (of_find_property(dp, "keyboard", NULL)) - ret = 1; - else - ret = 0; - - return ret; -} - static int zilog_irq = -1; -static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *match) +static int __devinit zs_probe(struct of_device *op, const struct of_device_id *match) { - struct of_device *op = to_of_device(&dev->dev); + static int inst; struct uart_sunzilog_port *up; struct zilog_layout __iomem *rp; - int inst = zs_get_instance(dev->node); + int keyboard_mouse; int err; + keyboard_mouse = 0; + if (of_find_property(op->node, "keyboard", NULL)) + keyboard_mouse = 1; + sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0, sizeof(struct zilog_layout), "zs"); @@ -1352,16 +1337,8 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id * rp = sunzilog_chip_regs[inst]; - if (zilog_irq == -1) { + if (zilog_irq == -1) zilog_irq = op->irqs[0]; - err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED, - "zs", sunzilog_irq_chain); - if (err) { - of_iounmap(rp, sizeof(struct zilog_layout)); - - return err; - } - } up = &sunzilog_port_table[inst * 2]; @@ -1378,7 +1355,7 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id * up[0].port.line = (inst * 2) + 0; up[0].port.dev = &op->dev; up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A; - if (inst == 1) + if (keyboard_mouse) up[0].flags |= SUNZILOG_FLAG_CONS_KEYB; sunzilog_init_hw(&up[0]); @@ -1395,11 +1372,11 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id * up[1].port.line = (inst * 2) + 1; up[1].port.dev = &op->dev; up[1].flags |= 0; - if (inst == 1) + if (keyboard_mouse) up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE; sunzilog_init_hw(&up[1]); - if (inst != 1) { + if (!keyboard_mouse) { err = uart_add_one_port(&sunzilog_reg, &up[0].port); if (err) { of_iounmap(rp, sizeof(struct zilog_layout)); @@ -1411,9 +1388,18 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id * of_iounmap(rp, sizeof(struct zilog_layout)); return err; } + } else { + printk(KERN_INFO "%s: Keyboard at MMIO %lx (irq = %d) " + "is a zs\n", + op->dev.bus_id, up[0].port.mapbase, op->irqs[0]); + printk(KERN_INFO "%s: Mouse at MMIO %lx (irq = %d) " + "is a zs\n", + op->dev.bus_id, up[1].port.mapbase, op->irqs[0]); } - dev_set_drvdata(&dev->dev, &up[0]); + dev_set_drvdata(&op->dev, &up[0]); + + inst++; return 0; } @@ -1462,36 +1448,65 @@ static struct of_platform_driver zs_driver = { static int __init sunzilog_init(void) { struct device_node *dp; - int err; + int err, uart_count; + int num_keybms; NUM_SUNZILOG = 0; - for_each_node_by_name(dp, "zs") + num_keybms = 0; + for_each_node_by_name(dp, "zs") { NUM_SUNZILOG++; + if (of_find_property(dp, "keyboard", NULL)) + num_keybms++; + } + uart_count = 0; if (NUM_SUNZILOG) { int uart_count; err = sunzilog_alloc_tables(); if (err) - return err; + goto out; - /* Subtract 1 for keyboard, 1 for mouse. */ - uart_count = (NUM_SUNZILOG * 2) - 2; + uart_count = (NUM_SUNZILOG * 2) - (2 * num_keybms); sunzilog_reg.nr = uart_count; sunzilog_reg.minor = sunserial_current_minor; err = uart_register_driver(&sunzilog_reg); - if (err) { - sunzilog_free_tables(); - return err; - } + if (err) + goto out_free_tables; + sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64; sunzilog_reg.cons = SUNZILOG_CONSOLE(); sunserial_current_minor += uart_count; } - return of_register_driver(&zs_driver, &of_bus_type); + err = of_register_driver(&zs_driver, &of_bus_type); + if (err) + goto out_unregister_uart; + + if (zilog_irq != -1) { + err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED, + "zs", sunzilog_irq_chain); + if (err) + goto out_unregister_driver; + } + +out: + return err; + +out_unregister_driver: + of_unregister_driver(&zs_driver); + +out_unregister_uart: + if (NUM_SUNZILOG) { + uart_unregister_driver(&sunzilog_reg); + sunzilog_reg.cons = NULL; + } + +out_free_tables: + sunzilog_free_tables(); + goto out; } static void __exit sunzilog_exit(void) diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 3badb48d662b..6533b0f39231 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1518,6 +1518,26 @@ config FB_PXA_PARAMETERS <file:Documentation/fb/pxafb.txt> describes the available parameters. +config FB_MBX + tristate "2700G LCD framebuffer support" + depends on FB && ARCH_PXA + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + ---help--- + Framebuffer driver for the Intel 2700G (Marathon) Graphics + Accelerator + +config FB_MBX_DEBUG + bool "Enable debugging info via debugfs" + depends on FB_MBX && DEBUG_FS + default n + ---help--- + Enable this if you want debugging information using the debug + filesystem (debugfs) + + If unsure, say N. + config FB_W100 tristate "W100 frame buffer support" depends on FB && PXA_SHARPSL diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 6283d015f8f5..95563c9c6b9c 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_FB_SIS) += sis/ obj-$(CONFIG_FB_KYRO) += kyro/ obj-$(CONFIG_FB_SAVAGE) += savage/ obj-$(CONFIG_FB_GEODE) += geode/ +obj-$(CONFIG_FB_MBX) += mbx/ obj-$(CONFIG_FB_I810) += vgastate.o obj-$(CONFIG_FB_NEOMAGIC) += neofb.o vgastate.o obj-$(CONFIG_FB_VIRGE) += virgefb.o diff --git a/drivers/video/mbx/Makefile b/drivers/video/mbx/Makefile new file mode 100644 index 000000000000..16c1165cf9c7 --- /dev/null +++ b/drivers/video/mbx/Makefile @@ -0,0 +1,4 @@ +# Makefile for the 2700G controller driver. + +obj-$(CONFIG_FB_MBX) += mbxfb.o +obj-$(CONFIG_FB_MBX_DEBUG) += mbxfbdebugfs.o diff --git a/drivers/video/mbx/mbxdebugfs.c b/drivers/video/mbx/mbxdebugfs.c new file mode 100644 index 000000000000..84aab3ad024e --- /dev/null +++ b/drivers/video/mbx/mbxdebugfs.c @@ -0,0 +1,188 @@ +#include <linux/debugfs.h> + +#define BIG_BUFFER_SIZE (1024) + +static char big_buffer[BIG_BUFFER_SIZE]; + +struct mbxfb_debugfs_data { + struct dentry *dir; + struct dentry *sysconf; + struct dentry *clock; + struct dentry *display; + struct dentry *gsctl; +}; + +static int open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->u.generic_ip; + return 0; +} + +static ssize_t write_file_dummy(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return count; +} + +static ssize_t sysconf_read_file(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char * s = big_buffer; + + s += sprintf(s, "SYSCFG = %08lx\n", SYSCFG); + s += sprintf(s, "PFBASE = %08lx\n", PFBASE); + s += sprintf(s, "PFCEIL = %08lx\n", PFCEIL); + s += sprintf(s, "POLLFLAG = %08lx\n", POLLFLAG); + s += sprintf(s, "SYSRST = %08lx\n", SYSRST); + + return simple_read_from_buffer(userbuf, count, ppos, + big_buffer, s-big_buffer); +} + + +static ssize_t gsctl_read_file(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char * s = big_buffer; + + s += sprintf(s, "GSCTRL = %08lx\n", GSCTRL); + s += sprintf(s, "VSCTRL = %08lx\n", VSCTRL); + s += sprintf(s, "GBBASE = %08lx\n", GBBASE); + s += sprintf(s, "VBBASE = %08lx\n", VBBASE); + s += sprintf(s, "GDRCTRL = %08lx\n", GDRCTRL); + s += sprintf(s, "VCMSK = %08lx\n", VCMSK); + s += sprintf(s, "GSCADR = %08lx\n", GSCADR); + s += sprintf(s, "VSCADR = %08lx\n", VSCADR); + s += sprintf(s, "VUBASE = %08lx\n", VUBASE); + s += sprintf(s, "VVBASE = %08lx\n", VVBASE); + s += sprintf(s, "GSADR = %08lx\n", GSADR); + s += sprintf(s, "VSADR = %08lx\n", VSADR); + s += sprintf(s, "HCCTRL = %08lx\n", HCCTRL); + s += sprintf(s, "HCSIZE = %08lx\n", HCSIZE); + s += sprintf(s, "HCPOS = %08lx\n", HCPOS); + s += sprintf(s, "HCBADR = %08lx\n", HCBADR); + s += sprintf(s, "HCCKMSK = %08lx\n", HCCKMSK); + s += sprintf(s, "GPLUT = %08lx\n", GPLUT); + + return simple_read_from_buffer(userbuf, count, ppos, + big_buffer, s-big_buffer); +} + +static ssize_t display_read_file(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char * s = big_buffer; + + s += sprintf(s, "DSCTRL = %08lx\n", DSCTRL); + s += sprintf(s, "DHT01 = %08lx\n", DHT01); + s += sprintf(s, "DHT02 = %08lx\n", DHT02); + s += sprintf(s, "DHT03 = %08lx\n", DHT03); + s += sprintf(s, "DVT01 = %08lx\n", DVT01); + s += sprintf(s, "DVT02 = %08lx\n", DVT02); + s += sprintf(s, "DVT03 = %08lx\n", DVT03); + s += sprintf(s, "DBCOL = %08lx\n", DBCOL); + s += sprintf(s, "BGCOLOR = %08lx\n", BGCOLOR); + s += sprintf(s, "DINTRS = %08lx\n", DINTRS); + s += sprintf(s, "DINTRE = %08lx\n", DINTRE); + s += sprintf(s, "DINTRCNT = %08lx\n", DINTRCNT); + s += sprintf(s, "DSIG = %08lx\n", DSIG); + s += sprintf(s, "DMCTRL = %08lx\n", DMCTRL); + s += sprintf(s, "CLIPCTRL = %08lx\n", CLIPCTRL); + s += sprintf(s, "SPOCTRL = %08lx\n", SPOCTRL); + s += sprintf(s, "SVCTRL = %08lx\n", SVCTRL); + s += sprintf(s, "DLSTS = %08lx\n", DLSTS); + s += sprintf(s, "DLLCTRL = %08lx\n", DLLCTRL); + s += sprintf(s, "DVLNUM = %08lx\n", DVLNUM); + s += sprintf(s, "DUCTRL = %08lx\n", DUCTRL); + s += sprintf(s, "DVECTRL = %08lx\n", DVECTRL); + s += sprintf(s, "DHDET = %08lx\n", DHDET); + s += sprintf(s, "DVDET = %08lx\n", DVDET); + s += sprintf(s, "DODMSK = %08lx\n", DODMSK); + s += sprintf(s, "CSC01 = %08lx\n", CSC01); + s += sprintf(s, "CSC02 = %08lx\n", CSC02); + s += sprintf(s, "CSC03 = %08lx\n", CSC03); + s += sprintf(s, "CSC04 = %08lx\n", CSC04); + s += sprintf(s, "CSC05 = %08lx\n", CSC05); + + return simple_read_from_buffer(userbuf, count, ppos, + big_buffer, s-big_buffer); +} + +static ssize_t clock_read_file(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char * s = big_buffer; + + s += sprintf(s, "SYSCLKSRC = %08lx\n", SYSCLKSRC); + s += sprintf(s, "PIXCLKSRC = %08lx\n", PIXCLKSRC); + s += sprintf(s, "CLKSLEEP = %08lx\n", CLKSLEEP); + s += sprintf(s, "COREPLL = %08lx\n", COREPLL); + s += sprintf(s, "DISPPLL = %08lx\n", DISPPLL); + s += sprintf(s, "PLLSTAT = %08lx\n", PLLSTAT); + s += sprintf(s, "VOVRCLK = %08lx\n", VOVRCLK); + s += sprintf(s, "PIXCLK = %08lx\n", PIXCLK); + s += sprintf(s, "MEMCLK = %08lx\n", MEMCLK); + s += sprintf(s, "M24CLK = %08lx\n", M24CLK); + s += sprintf(s, "MBXCLK = %08lx\n", MBXCLK); + s += sprintf(s, "SDCLK = %08lx\n", SDCLK); + s += sprintf(s, "PIXCLKDIV = %08lx\n", PIXCLKDIV); + + return simple_read_from_buffer(userbuf, count, ppos, + big_buffer, s-big_buffer); +} + +static struct file_operations sysconf_fops = { + .read = sysconf_read_file, + .write = write_file_dummy, + .open = open_file_generic, +}; + +static struct file_operations clock_fops = { + .read = clock_read_file, + .write = write_file_dummy, + .open = open_file_generic, +}; + +static struct file_operations display_fops = { + .read = display_read_file, + .write = write_file_dummy, + .open = open_file_generic, +}; + +static struct file_operations gsctl_fops = { + .read = gsctl_read_file, + .write = write_file_dummy, + .open = open_file_generic, +}; + + +static void __devinit mbxfb_debugfs_init(struct fb_info *fbi) +{ + struct mbxfb_info *mfbi = fbi->par; + struct mbxfb_debugfs_data *dbg; + + dbg = kzalloc(sizeof(struct mbxfb_debugfs_data), GFP_KERNEL); + mfbi->debugfs_data = dbg; + + dbg->dir = debugfs_create_dir("mbxfb", NULL); + dbg->sysconf = debugfs_create_file("sysconf", 0444, dbg->dir, + fbi, &sysconf_fops); + dbg->clock = debugfs_create_file("clock", 0444, dbg->dir, + fbi, &clock_fops); + dbg->display = debugfs_create_file("display", 0444, dbg->dir, + fbi, &display_fops); + dbg->gsctl = debugfs_create_file("gsctl", 0444, dbg->dir, + fbi, &gsctl_fops); +} + +static void __devexit mbxfb_debugfs_remove(struct fb_info *fbi) +{ + struct mbxfb_info *mfbi = fbi->par; + struct mbxfb_debugfs_data *dbg = mfbi->debugfs_data; + + debugfs_remove(dbg->gsctl); + debugfs_remove(dbg->display); + debugfs_remove(dbg->clock); + debugfs_remove(dbg->sysconf); + debugfs_remove(dbg->dir); +} diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c new file mode 100644 index 000000000000..6849ab75d403 --- /dev/null +++ b/drivers/video/mbx/mbxfb.c @@ -0,0 +1,683 @@ +/* + * linux/drivers/video/mbx/mbxfb.c + * + * Copyright (C) 2006 Compulab, Ltd. + * Mike Rapoport <mike@compulab.co.il> + * + * Based on pxafb.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + * + * Intel 2700G (Marathon) Graphics Accelerator Frame Buffer Driver + * + */ + +#include <linux/delay.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <asm/io.h> + +#include <video/mbxfb.h> + +#include "regs.h" +#include "reg_bits.h" + +static unsigned long virt_base_2700; + +#define MIN_XRES 16 +#define MIN_YRES 16 +#define MAX_XRES 2048 +#define MAX_YRES 2048 + +#define MAX_PALETTES 16 + +/* FIXME: take care of different chip revisions with different sizes + of ODFB */ +#define MEMORY_OFFSET 0x60000 + +struct mbxfb_info { + struct device *dev; + + struct resource *fb_res; + struct resource *fb_req; + + struct resource *reg_res; + struct resource *reg_req; + + void __iomem *fb_virt_addr; + unsigned long fb_phys_addr; + + void __iomem *reg_virt_addr; + unsigned long reg_phys_addr; + + int (*platform_probe) (struct fb_info * fb); + int (*platform_remove) (struct fb_info * fb); + + u32 pseudo_palette[MAX_PALETTES]; +#ifdef CONFIG_FB_MBX_DEBUG + void *debugfs_data; +#endif + +}; + +static struct fb_var_screeninfo mbxfb_default __devinitdata = { + .xres = 640, + .yres = 480, + .xres_virtual = 640, + .yres_virtual = 480, + .bits_per_pixel = 16, + .red = {11, 5, 0}, + .green = {5, 6, 0}, + .blue = {0, 5, 0}, + .activate = FB_ACTIVATE_TEST, + .height = -1, + .width = -1, + .pixclock = 40000, + .left_margin = 48, + .right_margin = 16, + .upper_margin = 33, + .lower_margin = 10, + .hsync_len = 96, + .vsync_len = 2, + .vmode = FB_VMODE_NONINTERLACED, + .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, +}; + +static struct fb_fix_screeninfo mbxfb_fix __devinitdata = { + .id = "MBX", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .xpanstep = 0, + .ypanstep = 0, + .ywrapstep = 0, + .accel = FB_ACCEL_NONE, +}; + +struct pixclock_div { + u8 m; + u8 n; + u8 p; +}; + +static unsigned int mbxfb_get_pixclock(unsigned int pixclock_ps, + struct pixclock_div *div) +{ + u8 m, n, p; + unsigned int err = 0; + unsigned int min_err = ~0x0; + unsigned int clk; + unsigned int best_clk = 0; + unsigned int ref_clk = 13000; /* FIXME: take from platform data */ + unsigned int pixclock; + + /* convert pixclock to KHz */ + pixclock = PICOS2KHZ(pixclock_ps); + + for (m = 1; m < 64; m++) { + for (n = 1; n < 8; n++) { + for (p = 0; p < 8; p++) { + clk = (ref_clk * m) / (n * (1 << p)); + err = (clk > pixclock) ? (clk - pixclock) : + (pixclock - clk); + if (err < min_err) { + min_err = err; + best_clk = clk; + div->m = m; + div->n = n; + div->p = p; + } + } + } + } + return KHZ2PICOS(best_clk); +} + +static int mbxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int trans, struct fb_info *info) +{ + u32 val, ret = 1; + + if (regno < MAX_PALETTES) { + u32 *pal = info->pseudo_palette; + + val = (red & 0xf800) | ((green & 0xfc00) >> 5) | + ((blue & 0xf800) >> 11); + pal[regno] = val; + ret = 0; + } + + return ret; +} + +static int mbxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + struct pixclock_div div; + + var->pixclock = mbxfb_get_pixclock(var->pixclock, &div); + + if (var->xres < MIN_XRES) + var->xres = MIN_XRES; + if (var->yres < MIN_YRES) + var->yres = MIN_YRES; + if (var->xres > MAX_XRES) + return -EINVAL; + if (var->yres > MAX_YRES) + return -EINVAL; + var->xres_virtual = max(var->xres_virtual, var->xres); + var->yres_virtual = max(var->yres_virtual, var->yres); + + switch (var->bits_per_pixel) { + /* 8 bits-per-pixel is not supported yet */ + case 8: + return -EINVAL; + case 16: + var->green.length = (var->green.length == 5) ? 5 : 6; + var->red.length = 5; + var->blue.length = 5; + var->transp.length = 6 - var->green.length; + var->blue.offset = 0; + var->green.offset = 5; + var->red.offset = 5 + var->green.length; + var->transp.offset = (5 + var->red.offset) & 15; + break; + case 24: /* RGB 888 */ + case 32: /* RGBA 8888 */ + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.length = var->bits_per_pixel - 24; + var->transp.offset = (var->transp.length) ? 24 : 0; + break; + } + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->transp.msb_right = 0; + + return 0; +} + +static int mbxfb_set_par(struct fb_info *info) +{ + struct fb_var_screeninfo *var = &info->var; + struct pixclock_div div; + ushort hbps, ht, hfps, has; + ushort vbps, vt, vfps, vas; + u32 gsctrl = readl(GSCTRL); + u32 gsadr = readl(GSADR); + + info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; + + /* setup color mode */ + gsctrl &= ~(FMsk(GSCTRL_GPIXFMT)); + /* FIXME: add *WORKING* support for 8-bits per color */ + if (info->var.bits_per_pixel == 8) { + return -EINVAL; + } else { + fb_dealloc_cmap(&info->cmap); + gsctrl &= ~GSCTRL_LUT_EN; + + info->fix.visual = FB_VISUAL_TRUECOLOR; + switch (info->var.bits_per_pixel) { + case 16: + if (info->var.green.length == 5) + gsctrl |= GSCTRL_GPIXFMT_ARGB1555; + else + gsctrl |= GSCTRL_GPIXFMT_RGB565; + break; + case 24: + gsctrl |= GSCTRL_GPIXFMT_RGB888; + break; + case 32: + gsctrl |= GSCTRL_GPIXFMT_ARGB8888; + break; + } + } + + /* setup resolution */ + gsctrl &= ~(FMsk(GSCTRL_GSWIDTH) | FMsk(GSCTRL_GSHEIGHT)); + gsctrl |= Gsctrl_Width(info->var.xres - 1) | + Gsctrl_Height(info->var.yres - 1); + writel(gsctrl, GSCTRL); + udelay(1000); + + gsadr &= ~(FMsk(GSADR_SRCSTRIDE)); + gsadr |= Gsadr_Srcstride(info->var.xres * info->var.bits_per_pixel / + (8 * 16) - 1); + writel(gsadr, GSADR); + udelay(1000); + + /* setup timings */ + var->pixclock = mbxfb_get_pixclock(info->var.pixclock, &div); + + writel((Disp_Pll_M(div.m) | Disp_Pll_N(div.n) | + Disp_Pll_P(div.p) | DISP_PLL_EN), DISPPLL); + + hbps = var->hsync_len; + has = hbps + var->left_margin; + hfps = has + var->xres; + ht = hfps + var->right_margin; + + vbps = var->vsync_len; + vas = vbps + var->upper_margin; + vfps = vas + var->yres; + vt = vfps + var->lower_margin; + + writel((Dht01_Hbps(hbps) | Dht01_Ht(ht)), DHT01); + writel((Dht02_Hlbs(has) | Dht02_Has(has)), DHT02); + writel((Dht03_Hfps(hfps) | Dht03_Hrbs(hfps)), DHT03); + writel((Dhdet_Hdes(has) | Dhdet_Hdef(hfps)), DHDET); + + writel((Dvt01_Vbps(vbps) | Dvt01_Vt(vt)), DVT01); + writel((Dvt02_Vtbs(vas) | Dvt02_Vas(vas)), DVT02); + writel((Dvt03_Vfps(vfps) | Dvt03_Vbbs(vfps)), DVT03); + writel((Dvdet_Vdes(vas) | Dvdet_Vdef(vfps)), DVDET); + writel((Dvectrl_Vevent(vfps) | Dvectrl_Vfetch(vbps)), DVECTRL); + + writel((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); + + return 0; +} + +static int mbxfb_blank(int blank, struct fb_info *info) +{ + switch (blank) { + case FB_BLANK_POWERDOWN: + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_NORMAL: + writel((readl(DSCTRL) & ~DSCTRL_SYNCGEN_EN), DSCTRL); + udelay(1000); + writel((readl(PIXCLK) & ~PIXCLK_EN), PIXCLK); + udelay(1000); + writel((readl(VOVRCLK) & ~VOVRCLK_EN), VOVRCLK); + udelay(1000); + break; + case FB_BLANK_UNBLANK: + writel((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); + udelay(1000); + writel((readl(PIXCLK) | PIXCLK_EN), PIXCLK); + udelay(1000); + break; + } + return 0; +} + +static struct fb_ops mbxfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = mbxfb_check_var, + .fb_set_par = mbxfb_set_par, + .fb_setcolreg = mbxfb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_blank = mbxfb_blank, +}; + +/* + Enable external SDRAM controller. Assume that all clocks are active + by now. +*/ +static void __devinit setup_memc(struct fb_info *fbi) +{ + struct mbxfb_info *mfbi = fbi->par; + unsigned long tmp; + int i; + + /* FIXME: use platfrom specific parameters */ + /* setup SDRAM controller */ + writel((LMCFG_LMC_DS | LMCFG_LMC_TS | LMCFG_LMD_TS | + LMCFG_LMA_TS), + LMCFG); + udelay(1000); + + writel(LMPWR_MC_PWR_ACT, LMPWR); + udelay(1000); + + /* setup SDRAM timings */ + writel((Lmtim_Tras(7) | Lmtim_Trp(3) | Lmtim_Trcd(3) | + Lmtim_Trc(9) | Lmtim_Tdpl(2)), + LMTIM); + udelay(1000); + /* setup SDRAM refresh rate */ + writel(0xc2b, LMREFRESH); + udelay(1000); + /* setup SDRAM type parameters */ + writel((LMTYPE_CASLAT_3 | LMTYPE_BKSZ_2 | LMTYPE_ROWSZ_11 | + LMTYPE_COLSZ_8), + LMTYPE); + udelay(1000); + /* enable memory controller */ + writel(LMPWR_MC_PWR_ACT, LMPWR); + udelay(1000); + + /* perform dummy reads */ + for ( i = 0; i < 16; i++ ) { + tmp = readl(fbi->screen_base); + } +} + +static void enable_clocks(struct fb_info *fbi) +{ + /* enable clocks */ + writel(SYSCLKSRC_PLL_2, SYSCLKSRC); + udelay(1000); + writel(PIXCLKSRC_PLL_1, PIXCLKSRC); + udelay(1000); + writel(0x00000000, CLKSLEEP); + udelay(1000); + writel((Core_Pll_M(0x17) | Core_Pll_N(0x3) | Core_Pll_P(0x0) | + CORE_PLL_EN), + COREPLL); + udelay(1000); + writel((Disp_Pll_M(0x1b) | Disp_Pll_N(0x7) | Disp_Pll_P(0x1) | + DISP_PLL_EN), + DISPPLL); + + writel(0x00000000, VOVRCLK); + udelay(1000); + writel(PIXCLK_EN, PIXCLK); + udelay(1000); + writel(MEMCLK_EN, MEMCLK); + udelay(1000); + writel(0x00000006, M24CLK); + udelay(1000); + writel(0x00000006, MBXCLK); + udelay(1000); + writel(SDCLK_EN, SDCLK); + udelay(1000); + writel(0x00000001, PIXCLKDIV); + udelay(1000); +} + +static void __devinit setup_graphics(struct fb_info *fbi) +{ + unsigned long gsctrl; + + gsctrl = GSCTRL_GAMMA_EN | Gsctrl_Width(fbi->var.xres - 1) | + Gsctrl_Height(fbi->var.yres - 1); + switch (fbi->var.bits_per_pixel) { + case 16: + if (fbi->var.green.length == 5) + gsctrl |= GSCTRL_GPIXFMT_ARGB1555; + else + gsctrl |= GSCTRL_GPIXFMT_RGB565; + break; + case 24: + gsctrl |= GSCTRL_GPIXFMT_RGB888; + break; + case 32: + gsctrl |= GSCTRL_GPIXFMT_ARGB8888; + break; + } + + writel(gsctrl, GSCTRL); + udelay(1000); + writel(0x00000000, GBBASE); + udelay(1000); + writel(0x00ffffff, GDRCTRL); + udelay(1000); + writel((GSCADR_STR_EN | Gscadr_Gbase_Adr(0x6000)), GSCADR); + udelay(1000); + writel(0x00000000, GPLUT); + udelay(1000); +} + +static void __devinit setup_display(struct fb_info *fbi) +{ + unsigned long dsctrl = 0; + + dsctrl = DSCTRL_BLNK_POL; + if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT) + dsctrl |= DSCTRL_HS_POL; + if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT) + dsctrl |= DSCTRL_VS_POL; + writel(dsctrl, DSCTRL); + udelay(1000); + writel(0xd0303010, DMCTRL); + udelay(1000); + writel((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); +} + +static void __devinit enable_controller(struct fb_info *fbi) +{ + writel(SYSRST_RST, SYSRST); + udelay(1000); + + + enable_clocks(fbi); + setup_memc(fbi); + setup_graphics(fbi); + setup_display(fbi); +} + +#ifdef CONFIG_PM +/* + * Power management hooks. Note that we won't be called from IRQ context, + * unlike the blank functions above, so we may sleep. + */ +static int mbxfb_suspend(struct platform_device *dev, pm_message_t state) +{ + /* make frame buffer memory enter self-refresh mode */ + writel(LMPWR_MC_PWR_SRM, LMPWR); + while (LMPWRSTAT != LMPWRSTAT_MC_PWR_SRM) + ; /* empty statement */ + + /* reset the device, since it's initial state is 'mostly sleeping' */ + writel(SYSRST_RST, SYSRST); + return 0; +} + +static int mbxfb_resume(struct platform_device *dev) +{ + struct fb_info *fbi = platform_get_drvdata(dev); + + enable_clocks(fbi); +/* setup_graphics(fbi); */ +/* setup_display(fbi); */ + + writel((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); + return 0; +} +#else +#define mbxfb_suspend NULL +#define mbxfb_resume NULL +#endif + +/* debugfs entries */ +#ifndef CONFIG_FB_MBX_DEBUG +#define mbxfb_debugfs_init(x) do {} while(0) +#define mbxfb_debugfs_remove(x) do {} while(0) +#endif + +#define res_size(_r) (((_r)->end - (_r)->start) + 1) + +static int __devinit mbxfb_probe(struct platform_device *dev) +{ + int ret; + struct fb_info *fbi; + struct mbxfb_info *mfbi; + struct mbxfb_platform_data *pdata; + + dev_dbg(dev, "mbxfb_probe\n"); + + fbi = framebuffer_alloc(sizeof(struct mbxfb_info), &dev->dev); + if (fbi == NULL) { + dev_err(&dev->dev, "framebuffer_alloc failed\n"); + return -ENOMEM; + } + + mfbi = fbi->par; + fbi->pseudo_palette = mfbi->pseudo_palette; + pdata = dev->dev.platform_data; + if (pdata->probe) + mfbi->platform_probe = pdata->probe; + if (pdata->remove) + mfbi->platform_remove = pdata->remove; + + mfbi->fb_res = platform_get_resource(dev, IORESOURCE_MEM, 0); + mfbi->reg_res = platform_get_resource(dev, IORESOURCE_MEM, 1); + + if (!mfbi->fb_res || !mfbi->reg_res) { + dev_err(&dev->dev, "no resources found\n"); + ret = -ENODEV; + goto err1; + } + + mfbi->fb_req = request_mem_region(mfbi->fb_res->start, + res_size(mfbi->fb_res), dev->name); + if (mfbi->fb_req == NULL) { + dev_err(&dev->dev, "failed to claim framebuffer memory\n"); + ret = -EINVAL; + goto err1; + } + mfbi->fb_phys_addr = mfbi->fb_res->start; + + mfbi->reg_req = request_mem_region(mfbi->reg_res->start, + res_size(mfbi->reg_res), dev->name); + if (mfbi->reg_req == NULL) { + dev_err(&dev->dev, "failed to claim Marathon registers\n"); + ret = -EINVAL; + goto err2; + } + mfbi->reg_phys_addr = mfbi->reg_res->start; + + mfbi->reg_virt_addr = ioremap_nocache(mfbi->reg_phys_addr, + res_size(mfbi->reg_req)); + if (!mfbi->reg_virt_addr) { + dev_err(&dev->dev, "failed to ioremap Marathon registers\n"); + ret = -EINVAL; + goto err3; + } + virt_base_2700 = (unsigned long)mfbi->reg_virt_addr; + + mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr, + res_size(mfbi->fb_req)); + if (!mfbi->reg_virt_addr) { + dev_err(&dev->dev, "failed to ioremap frame buffer\n"); + ret = -EINVAL; + goto err4; + } + + /* FIXME: get from platform */ + fbi->screen_base = (char __iomem *)(mfbi->fb_virt_addr + 0x60000); + fbi->screen_size = 8 * 1024 * 1024; /* 8 Megs */ + fbi->fbops = &mbxfb_ops; + + fbi->var = mbxfb_default; + fbi->fix = mbxfb_fix; + fbi->fix.smem_start = mfbi->fb_phys_addr + 0x60000; + fbi->fix.smem_len = 8 * 1024 * 1024; + fbi->fix.line_length = 640 * 2; + + ret = fb_alloc_cmap(&fbi->cmap, 256, 0); + if (ret < 0) { + dev_err(&dev->dev, "fb_alloc_cmap failed\n"); + ret = -EINVAL; + goto err5; + } + + platform_set_drvdata(dev, fbi); + + printk(KERN_INFO "fb%d: mbx frame buffer device\n", fbi->node); + + if (mfbi->platform_probe) + mfbi->platform_probe(fbi); + + enable_controller(fbi); + + mbxfb_debugfs_init(fbi); + + ret = register_framebuffer(fbi); + if (ret < 0) { + dev_err(&dev->dev, "register_framebuffer failed\n"); + ret = -EINVAL; + goto err6; + } + + return 0; + +err6: + fb_dealloc_cmap(&fbi->cmap); +err5: + iounmap(mfbi->fb_virt_addr); +err4: + iounmap(mfbi->reg_virt_addr); +err3: + release_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res)); +err2: + release_mem_region(mfbi->fb_res->start, res_size(mfbi->fb_res)); +err1: + framebuffer_release(fbi); + + return ret; +} + +static int __devexit mbxfb_remove(struct platform_device *dev) +{ + struct fb_info *fbi = platform_get_drvdata(dev); + + writel(SYSRST_RST, SYSRST); + udelay(1000); + + mbxfb_debugfs_remove(fbi); + + if (fbi) { + struct mbxfb_info *mfbi = fbi->par; + + unregister_framebuffer(fbi); + if (mfbi) { + if (mfbi->platform_remove) + mfbi->platform_remove(fbi); + + if (mfbi->fb_virt_addr) + iounmap(mfbi->fb_virt_addr); + if (mfbi->reg_virt_addr) + iounmap(mfbi->reg_virt_addr); + if (mfbi->reg_req) + release_mem_region(mfbi->reg_req->start, + res_size(mfbi->reg_req)); + if (mfbi->fb_req) + release_mem_region(mfbi->fb_req->start, + res_size(mfbi->fb_req)); + } + framebuffer_release(fbi); + } + + return 0; +} + +static struct platform_driver mbxfb_driver = { + .probe = mbxfb_probe, + .remove = mbxfb_remove, + .suspend = mbxfb_suspend, + .resume = mbxfb_resume, + .driver = { + .name = "mbx-fb", + }, +}; + +int __devinit mbxfb_init(void) +{ + return platform_driver_register(&mbxfb_driver); +} + +static void __devexit mbxfb_exit(void) +{ + platform_driver_unregister(&mbxfb_driver); +} + +module_init(mbxfb_init); +module_exit(mbxfb_exit); + +MODULE_DESCRIPTION("loadable framebuffer driver for Marathon device"); +MODULE_AUTHOR("Mike Rapoport, Compulab"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/mbx/reg_bits.h b/drivers/video/mbx/reg_bits.h new file mode 100644 index 000000000000..c226a8e45312 --- /dev/null +++ b/drivers/video/mbx/reg_bits.h @@ -0,0 +1,418 @@ +#ifndef __REG_BITS_2700G_ +#define __REG_BITS_2700G_ + +/* use defines from asm-arm/arch-pxa/bitfields.h for bit fields access */ +#define UData(Data) ((unsigned long) (Data)) +#define Fld(Size, Shft) (((Size) << 16) + (Shft)) +#define FSize(Field) ((Field) >> 16) +#define FShft(Field) ((Field) & 0x0000FFFF) +#define FMsk(Field) (((UData (1) << FSize (Field)) - 1) << FShft (Field)) +#define FAlnMsk(Field) ((UData (1) << FSize (Field)) - 1) +#define F1stBit(Field) (UData (1) << FShft (Field)) + +#define SYSRST_RST (1 << 0) + +/* SYSCLKSRC - SYSCLK Source Control Register */ +#define SYSCLKSRC_SEL Fld(2,0) +#define SYSCLKSRC_REF ((0x0) << FShft(SYSCLKSRC_SEL)) +#define SYSCLKSRC_PLL_1 ((0x1) << FShft(SYSCLKSRC_SEL)) +#define SYSCLKSRC_PLL_2 ((0x2) << FShft(SYSCLKSRC_SEL)) + +/* PIXCLKSRC - PIXCLK Source Control Register */ +#define PIXCLKSRC_SEL Fld(2,0) +#define PIXCLKSRC_REF ((0x0) << FShft(PIXCLKSRC_SEL)) +#define PIXCLKSRC_PLL_1 ((0x1) << FShft(PIXCLKSRC_SEL)) +#define PIXCLKSRC_PLL_2 ((0x2) << FShft(PIXCLKSRC_SEL)) + +/* Clock Disable Register */ +#define CLKSLEEP_SLP (1 << 0) + +/* Core PLL Control Register */ +#define CORE_PLL_M Fld(6,7) +#define Core_Pll_M(x) ((x) << FShft(CORE_PLL_M)) +#define CORE_PLL_N Fld(3,4) +#define Core_Pll_N(x) ((x) << FShft(CORE_PLL_N)) +#define CORE_PLL_P Fld(3,1) +#define Core_Pll_P(x) ((x) << FShft(CORE_PLL_P)) +#define CORE_PLL_EN (1 << 0) + +/* Display PLL Control Register */ +#define DISP_PLL_M Fld(6,7) +#define Disp_Pll_M(x) ((x) << FShft(DISP_PLL_M)) +#define DISP_PLL_N Fld(3,4) +#define Disp_Pll_N(x) ((x) << FShft(DISP_PLL_N)) +#define DISP_PLL_P Fld(3,1) +#define Disp_Pll_P(x) ((x) << FShft(DISP_PLL_P)) +#define DISP_PLL_EN (1 << 0) + +/* PLL status register */ +#define PLLSTAT_CORE_PLL_LOST_L (1 << 3) +#define PLLSTAT_CORE_PLL_LSTS (1 << 2) +#define PLLSTAT_DISP_PLL_LOST_L (1 << 1) +#define PLLSTAT_DISP_PLL_LSTS (1 << 0) + +/* Video and scale clock control register */ +#define VOVRCLK_EN (1 << 0) + +/* Pixel clock control register */ +#define PIXCLK_EN (1 << 0) + +/* Memory clock control register */ +#define MEMCLK_EN (1 << 0) + +/* MBX clock control register */ +#define MBXCLK_DIV Fld(2,2) +#define MBXCLK_DIV_1 ((0x0) << FShft(MBXCLK_DIV)) +#define MBXCLK_DIV_2 ((0x1) << FShft(MBXCLK_DIV)) +#define MBXCLK_DIV_3 ((0x2) << FShft(MBXCLK_DIV)) +#define MBXCLK_DIV_4 ((0x3) << FShft(MBXCLK_DIV)) +#define MBXCLK_EN Fld(2,0) +#define MBXCLK_EN_NONE ((0x0) << FShft(MBXCLK_EN)) +#define MBXCLK_EN_2D ((0x1) << FShft(MBXCLK_EN)) +#define MBXCLK_EN_BOTH ((0x2) << FShft(MBXCLK_EN)) + +/* M24 clock control register */ +#define M24CLK_DIV Fld(2,1) +#define M24CLK_DIV_1 ((0x0) << FShft(M24CLK_DIV)) +#define M24CLK_DIV_2 ((0x1) << FShft(M24CLK_DIV)) +#define M24CLK_DIV_3 ((0x2) << FShft(M24CLK_DIV)) +#define M24CLK_DIV_4 ((0x3) << FShft(M24CLK_DIV)) +#define M24CLK_EN (1 << 0) + +/* SDRAM clock control register */ +#define SDCLK_EN (1 << 0) + +/* PixClk Divisor Register */ +#define PIXCLKDIV_PD Fld(9,0) +#define Pixclkdiv_Pd(x) ((x) << FShft(PIXCLKDIV_PD)) + +/* LCD Config control register */ +#define LCDCFG_IN_FMT Fld(3,28) +#define Lcdcfg_In_Fmt(x) ((x) << FShft(LCDCFG_IN_FMT)) +#define LCDCFG_LCD1DEN_POL (1 << 27) +#define LCDCFG_LCD1FCLK_POL (1 << 26) +#define LCDCFG_LCD1LCLK_POL (1 << 25) +#define LCDCFG_LCD1D_POL (1 << 24) +#define LCDCFG_LCD2DEN_POL (1 << 23) +#define LCDCFG_LCD2FCLK_POL (1 << 22) +#define LCDCFG_LCD2LCLK_POL (1 << 21) +#define LCDCFG_LCD2D_POL (1 << 20) +#define LCDCFG_LCD1_TS (1 << 19) +#define LCDCFG_LCD1D_DS (1 << 18) +#define LCDCFG_LCD1C_DS (1 << 17) +#define LCDCFG_LCD1_IS_IN (1 << 16) +#define LCDCFG_LCD2_TS (1 << 3) +#define LCDCFG_LCD2D_DS (1 << 2) +#define LCDCFG_LCD2C_DS (1 << 1) +#define LCDCFG_LCD2_IS_IN (1 << 0) + +/* On-Die Frame Buffer Power Control Register */ +#define ODFBPWR_SLOW (1 << 2) +#define ODFBPWR_MODE Fld(2,0) +#define ODFBPWR_MODE_ACT ((0x0) << FShft(ODFBPWR_MODE)) +#define ODFBPWR_MODE_ACT_LP ((0x1) << FShft(ODFBPWR_MODE)) +#define ODFBPWR_MODE_SLEEP ((0x2) << FShft(ODFBPWR_MODE)) +#define ODFBPWR_MODE_SHUTD ((0x3) << FShft(ODFBPWR_MODE)) + +/* On-Die Frame Buffer Power State Status Register */ +#define ODFBSTAT_ACT (1 << 2) +#define ODFBSTAT_SLP (1 << 1) +#define ODFBSTAT_SDN (1 << 0) + +/* LMRST - Local Memory (SDRAM) Reset */ +#define LMRST_MC_RST (1 << 0) + +/* LMCFG - Local Memory (SDRAM) Configuration Register */ +#define LMCFG_LMC_DS (1 << 5) +#define LMCFG_LMD_DS (1 << 4) +#define LMCFG_LMA_DS (1 << 3) +#define LMCFG_LMC_TS (1 << 2) +#define LMCFG_LMD_TS (1 << 1) +#define LMCFG_LMA_TS (1 << 0) + +/* LMPWR - Local Memory (SDRAM) Power Control Register */ +#define LMPWR_MC_PWR_CNT Fld(2,0) +#define LMPWR_MC_PWR_ACT ((0x0) << FShft(LMPWR_MC_PWR_CNT)) /* Active */ +#define LMPWR_MC_PWR_SRM ((0x1) << FShft(LMPWR_MC_PWR_CNT)) /* Self-refresh */ +#define LMPWR_MC_PWR_DPD ((0x3) << FShft(LMPWR_MC_PWR_CNT)) /* deep power down */ + +/* LMPWRSTAT - Local Memory (SDRAM) Power Status Register */ +#define LMPWRSTAT_MC_PWR_CNT Fld(2,0) +#define LMPWRSTAT_MC_PWR_ACT ((0x0) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* Active */ +#define LMPWRSTAT_MC_PWR_SRM ((0x1) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* Self-refresh */ +#define LMPWRSTAT_MC_PWR_DPD ((0x3) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* deep power down */ + +/* LMTYPE - Local Memory (SDRAM) Type Register */ +#define LMTYPE_CASLAT Fld(3,10) +#define LMTYPE_CASLAT_1 ((0x1) << FShft(LMTYPE_CASLAT)) +#define LMTYPE_CASLAT_2 ((0x2) << FShft(LMTYPE_CASLAT)) +#define LMTYPE_CASLAT_3 ((0x3) << FShft(LMTYPE_CASLAT)) +#define LMTYPE_BKSZ Fld(2,8) +#define LMTYPE_BKSZ_1 ((0x1) << FShft(LMTYPE_BKSZ)) +#define LMTYPE_BKSZ_2 ((0x2) << FShft(LMTYPE_BKSZ)) +#define LMTYPE_ROWSZ Fld(4,4) +#define LMTYPE_ROWSZ_11 ((0xb) << FShft(LMTYPE_ROWSZ)) +#define LMTYPE_ROWSZ_12 ((0xc) << FShft(LMTYPE_ROWSZ)) +#define LMTYPE_ROWSZ_13 ((0xd) << FShft(LMTYPE_ROWSZ)) +#define LMTYPE_COLSZ Fld(4,0) +#define LMTYPE_COLSZ_7 ((0x7) << FShft(LMTYPE_COLSZ)) +#define LMTYPE_COLSZ_8 ((0x8) << FShft(LMTYPE_COLSZ)) +#define LMTYPE_COLSZ_9 ((0x9) << FShft(LMTYPE_COLSZ)) +#define LMTYPE_COLSZ_10 ((0xa) << FShft(LMTYPE_COLSZ)) +#define LMTYPE_COLSZ_11 ((0xb) << FShft(LMTYPE_COLSZ)) +#define LMTYPE_COLSZ_12 ((0xc) << FShft(LMTYPE_COLSZ)) + +/* LMTIM - Local Memory (SDRAM) Timing Register */ +#define LMTIM_TRAS Fld(4,16) +#define Lmtim_Tras(x) ((x) << FShft(LMTIM_TRAS)) +#define LMTIM_TRP Fld(4,12) +#define Lmtim_Trp(x) ((x) << FShft(LMTIM_TRP)) +#define LMTIM_TRCD Fld(4,8) +#define Lmtim_Trcd(x) ((x) << FShft(LMTIM_TRCD)) +#define LMTIM_TRC Fld(4,4) +#define Lmtim_Trc(x) ((x) << FShft(LMTIM_TRC)) +#define LMTIM_TDPL Fld(4,0) +#define Lmtim_Tdpl(x) ((x) << FShft(LMTIM_TDPL)) + +/* LMREFRESH - Local Memory (SDRAM) tREF Control Register */ +#define LMREFRESH_TREF Fld(2,0) +#define Lmrefresh_Tref(x) ((x) << FShft(LMREFRESH_TREF)) + +/* GSCTRL - Graphics surface control register */ +#define GSCTRL_LUT_EN (1 << 31) +#define GSCTRL_GPIXFMT Fld(4,27) +#define GSCTRL_GPIXFMT_INDEXED ((0x0) << FShft(GSCTRL_GPIXFMT)) +#define GSCTRL_GPIXFMT_ARGB4444 ((0x4) << FShft(GSCTRL_GPIXFMT)) +#define GSCTRL_GPIXFMT_ARGB1555 ((0x5) << FShft(GSCTRL_GPIXFMT)) +#define GSCTRL_GPIXFMT_RGB888 ((0x6) << FShft(GSCTRL_GPIXFMT)) +#define GSCTRL_GPIXFMT_RGB565 ((0x7) << FShft(GSCTRL_GPIXFMT)) +#define GSCTRL_GPIXFMT_ARGB8888 ((0x8) << FShft(GSCTRL_GPIXFMT)) +#define GSCTRL_GAMMA_EN (1 << 26) + +#define GSCTRL_GSWIDTH Fld(11,11) +#define Gsctrl_Width(Pixel) /* Display Width [1..2048 pix.] */ \ + (((Pixel) - 1) << FShft(GSCTRL_GSWIDTH)) + +#define GSCTRL_GSHEIGHT Fld(11,0) +#define Gsctrl_Height(Pixel) /* Display Height [1..2048 pix.] */ \ + (((Pixel) - 1) << FShft(GSCTRL_GSHEIGHT)) + +/* GBBASE fileds */ +#define GBBASE_GLALPHA Fld(8,24) +#define Gbbase_Glalpha(x) ((x) << FShft(GBBASE_GLALPHA)) + +#define GBBASE_COLKEY Fld(24,0) +#define Gbbase_Colkey(x) ((x) << FShft(GBBASE_COLKEY)) + +/* GDRCTRL fields */ +#define GDRCTRL_PIXDBL (1 << 31) +#define GDRCTRL_PIXHLV (1 << 30) +#define GDRCTRL_LNDBL (1 << 29) +#define GDRCTRL_LNHLV (1 << 28) +#define GDRCTRL_COLKEYM Fld(24,0) +#define Gdrctrl_Colkeym(x) ((x) << FShft(GDRCTRL_COLKEYM)) + +/* GSCADR graphics stream control address register fields */ +#define GSCADR_STR_EN (1 << 31) +#define GSCADR_COLKEY_EN (1 << 30) +#define GSCADR_COLKEYSCR (1 << 29) +#define GSCADR_BLEND_M Fld(2,27) +#define GSCADR_BLEND_NONE ((0x0) << FShft(GSCADR_BLEND_M)) +#define GSCADR_BLEND_INV ((0x1) << FShft(GSCADR_BLEND_M)) +#define GSCADR_BLEND_GLOB ((0x2) << FShft(GSCADR_BLEND_M)) +#define GSCADR_BLEND_PIX ((0x3) << FShft(GSCADR_BLEND_M)) +#define GSCADR_BLEND_POS Fld(2,24) +#define GSCADR_BLEND_GFX ((0x0) << FShft(GSCADR_BLEND_POS)) +#define GSCADR_BLEND_VID ((0x1) << FShft(GSCADR_BLEND_POS)) +#define GSCADR_BLEND_CUR ((0x2) << FShft(GSCADR_BLEND_POS)) +#define GSCADR_GBASE_ADR Fld(23,0) +#define Gscadr_Gbase_Adr(x) ((x) << FShft(GSCADR_GBASE_ADR)) + +/* GSADR graphics stride address register fields */ +#define GSADR_SRCSTRIDE Fld(10,22) +#define Gsadr_Srcstride(x) ((x) << FShft(GSADR_SRCSTRIDE)) +#define GSADR_XSTART Fld(11,11) +#define Gsadr_Xstart(x) ((x) << FShft(GSADR_XSTART)) +#define GSADR_YSTART Fld(11,0) +#define Gsadr_Ystart(y) ((y) << FShft(GSADR_YSTART)) + +/* GPLUT graphics palette register fields */ +#define GPLUT_LUTADR Fld(8,24) +#define Gplut_Lutadr(x) ((x) << FShft(GPLUT_LUTADR)) +#define GPLUT_LUTDATA Fld(24,0) +#define Gplut_Lutdata(x) ((x) << FShft(GPLUT_LUTDATA)) + +/* HCCTRL - Hardware Cursor Register fields */ +#define HCCTRL_CUR_EN (1 << 31) +#define HCCTRL_COLKEY_EN (1 << 29) +#define HCCTRL_COLKEYSRC (1 << 28) +#define HCCTRL_BLEND_M Fld(2,26) +#define HCCTRL_BLEND_NONE ((0x0) << FShft(HCCTRL_BLEND_M)) +#define HCCTRL_BLEND_INV ((0x1) << FShft(HCCTRL_BLEND_M)) +#define HCCTRL_BLEND_GLOB ((0x2) << FShft(HCCTRL_BLEND_M)) +#define HCCTRL_BLEND_PIX ((0x3) << FShft(HCCTRL_BLEND_M)) +#define HCCTRL_CPIXFMT Fld(3,23) +#define HCCTRL_CPIXFMT_RGB332 ((0x3) << FShft(HCCTRL_CPIXFMT)) +#define HCCTRL_CPIXFMT_ARGB4444 ((0x4) << FShft(HCCTRL_CPIXFMT)) +#define HCCTRL_CPIXFMT_ARGB1555 ((0x5) << FShft(HCCTRL_CPIXFMT)) +#define HCCTRL_CBASE_ADR Fld(23,0) +#define Hcctrl_Cbase_Adr(x) ((x) << FShft(HCCTRL_CBASE_ADR)) + +/* HCSIZE Hardware Cursor Size Register fields */ +#define HCSIZE_BLEND_POS Fld(2,29) +#define HCSIZE_BLEND_GFX ((0x0) << FShft(HCSIZE_BLEND_POS)) +#define HCSIZE_BLEND_VID ((0x1) << FShft(HCSIZE_BLEND_POS)) +#define HCSIZE_BLEND_CUR ((0x2) << FShft(HCSIZE_BLEND_POS)) +#define HCSIZE_CWIDTH Fld(3,16) +#define Hcsize_Cwidth(x) ((x) << FShft(HCSIZE_CWIDTH)) +#define HCSIZE_CHEIGHT Fld(3,0) +#define Hcsize_Cheight(x) ((x) << FShft(HCSIZE_CHEIGHT)) + +/* HCPOS Hardware Cursor Position Register fields */ +#define HCPOS_SWITCHSRC (1 << 30) +#define HCPOS_CURBLINK Fld(6,24) +#define Hcpos_Curblink(x) ((x) << FShft(HCPOS_CURBLINK)) +#define HCPOS_XSTART Fld(12,12) +#define Hcpos_Xstart(x) ((x) << FShft(HCPOS_XSTART)) +#define HCPOS_YSTART Fld(12,0) +#define Hcpos_Ystart(y) ((y) << FShft(HCPOS_YSTART)) + +/* HCBADR Hardware Cursor Blend Address Register */ +#define HCBADR_GLALPHA Fld(8,24) +#define Hcbadr_Glalpha(x) ((x) << FShft(HCBADR_GLALPHA)) +#define HCBADR_COLKEY Fld(24,0) +#define Hcbadr_Colkey(x) ((x) << FShft(HCBADR_COLKEY)) + +/* HCCKMSK - Hardware Cursor Color Key Mask Register */ +#define HCCKMSK_COLKEY_M Fld(24,0) +#define Hcckmsk_Colkey_M(x) ((x) << FShft(HCCKMSK_COLKEY_M)) + +/* DSCTRL - Display sync control register */ +#define DSCTRL_SYNCGEN_EN (1 << 31) +#define DSCTRL_DPL_RST (1 << 29) +#define DSCTRL_PWRDN_M (1 << 28) +#define DSCTRL_UPDSYNCCNT (1 << 26) +#define DSCTRL_UPDINTCNT (1 << 25) +#define DSCTRL_UPDCNT (1 << 24) +#define DSCTRL_UPDWAIT Fld(4,16) +#define Dsctrl_Updwait(x) ((x) << FShft(DSCTRL_UPDWAIT)) +#define DSCTRL_CLKPOL (1 << 11) +#define DSCTRL_CSYNC_EN (1 << 10) +#define DSCTRL_VS_SLAVE (1 << 7) +#define DSCTRL_HS_SLAVE (1 << 6) +#define DSCTRL_BLNK_POL (1 << 5) +#define DSCTRL_BLNK_DIS (1 << 4) +#define DSCTRL_VS_POL (1 << 3) +#define DSCTRL_VS_DIS (1 << 2) +#define DSCTRL_HS_POL (1 << 1) +#define DSCTRL_HS_DIS (1 << 0) + +/* DHT01 - Display horizontal timing register 01 */ +#define DHT01_HBPS Fld(12,16) +#define Dht01_Hbps(x) ((x) << FShft(DHT01_HBPS)) +#define DHT01_HT Fld(12,0) +#define Dht01_Ht(x) ((x) << FShft(DHT01_HT)) + +/* DHT02 - Display horizontal timing register 02 */ +#define DHT02_HAS Fld(12,16) +#define Dht02_Has(x) ((x) << FShft(DHT02_HAS)) +#define DHT02_HLBS Fld(12,0) +#define Dht02_Hlbs(x) ((x) << FShft(DHT02_HLBS)) + +/* DHT03 - Display horizontal timing register 03 */ +#define DHT03_HFPS Fld(12,16) +#define Dht03_Hfps(x) ((x) << FShft(DHT03_HFPS)) +#define DHT03_HRBS Fld(12,0) +#define Dht03_Hrbs(x) ((x) << FShft(DHT03_HRBS)) + +/* DVT01 - Display vertical timing register 01 */ +#define DVT01_VBPS Fld(12,16) +#define Dvt01_Vbps(x) ((x) << FShft(DVT01_VBPS)) +#define DVT01_VT Fld(12,0) +#define Dvt01_Vt(x) ((x) << FShft(DVT01_VT)) + +/* DVT02 - Display vertical timing register 02 */ +#define DVT02_VAS Fld(12,16) +#define Dvt02_Vas(x) ((x) << FShft(DVT02_VAS)) +#define DVT02_VTBS Fld(12,0) +#define Dvt02_Vtbs(x) ((x) << FShft(DVT02_VTBS)) + +/* DVT03 - Display vertical timing register 03 */ +#define DVT03_VFPS Fld(12,16) +#define Dvt03_Vfps(x) ((x) << FShft(DVT03_VFPS)) +#define DVT03_VBBS Fld(12,0) +#define Dvt03_Vbbs(x) ((x) << FShft(DVT03_VBBS)) + +/* DVECTRL - display vertical event control register */ +#define DVECTRL_VEVENT Fld(12,16) +#define Dvectrl_Vevent(x) ((x) << FShft(DVECTRL_VEVENT)) +#define DVECTRL_VFETCH Fld(12,0) +#define Dvectrl_Vfetch(x) ((x) << FShft(DVECTRL_VFETCH)) + +/* DHDET - display horizontal DE timing register */ +#define DHDET_HDES Fld(12,16) +#define Dhdet_Hdes(x) ((x) << FShft(DHDET_HDES)) +#define DHDET_HDEF Fld(12,0) +#define Dhdet_Hdef(x) ((x) << FShft(DHDET_HDEF)) + +/* DVDET - display vertical DE timing register */ +#define DVDET_VDES Fld(12,16) +#define Dvdet_Vdes(x) ((x) << FShft(DVDET_VDES)) +#define DVDET_VDEF Fld(12,0) +#define Dvdet_Vdef(x) ((x) << FShft(DVDET_VDEF)) + +/* DODMSK - display output data mask register */ +#define DODMSK_MASK_LVL (1 << 31) +#define DODMSK_BLNK_LVL (1 << 30) +#define DODMSK_MASK_B Fld(8,16) +#define Dodmsk_Mask_B(x) ((x) << FShft(DODMSK_MASK_B)) +#define DODMSK_MASK_G Fld(8,8) +#define Dodmsk_Mask_G(x) ((x) << FShft(DODMSK_MASK_G)) +#define DODMSK_MASK_R Fld(8,0) +#define Dodmsk_Mask_R(x) ((x) << FShft(DODMSK_MASK_R)) + +/* DBCOL - display border color control register */ +#define DBCOL_BORDCOL Fld(24,0) +#define Dbcol_Bordcol(x) ((x) << FShft(DBCOL_BORDCOL)) + +/* DVLNUM - display vertical line number register */ +#define DVLNUM_VLINE Fld(12,0) +#define Dvlnum_Vline(x) ((x) << FShft(DVLNUM_VLINE)) + +/* DMCTRL - Display Memory Control Register */ +#define DMCTRL_MEM_REF Fld(2,30) +#define DMCTRL_MEM_REF_ACT ((0x0) << FShft(DMCTRL_MEM_REF)) +#define DMCTRL_MEM_REF_HB ((0x1) << FShft(DMCTRL_MEM_REF)) +#define DMCTRL_MEM_REF_VB ((0x2) << FShft(DMCTRL_MEM_REF)) +#define DMCTRL_MEM_REF_BOTH ((0x3) << FShft(DMCTRL_MEM_REF)) +#define DMCTRL_UV_THRHLD Fld(6,24) +#define Dmctrl_Uv_Thrhld(x) ((x) << FShft(DMCTRL_UV_THRHLD)) +#define DMCTRL_V_THRHLD Fld(7,16) +#define Dmctrl_V_Thrhld(x) ((x) << FShft(DMCTRL_V_THRHLD)) +#define DMCTRL_D_THRHLD Fld(7,8) +#define Dmctrl_D_Thrhld(x) ((x) << FShft(DMCTRL_D_THRHLD)) +#define DMCTRL_BURSTLEN Fld(6,0) +#define Dmctrl_Burstlen(x) ((x) << FShft(DMCTRL_BURSTLEN)) + + +/* DLSTS - display load status register */ +#define DLSTS_RLD_ADONE (1 << 23) +/* #define DLSTS_RLD_ADOUT Fld(23,0) */ + +/* DLLCTRL - display list load control register */ +#define DLLCTRL_RLD_ADRLN Fld(8,24) +#define Dllctrl_Rld_Adrln(x) ((x) << FShft(DLLCTRL_RLD_ADRLN)) + +/* SPOCTRL - Scale Pitch/Order Control Register */ +#define SPOCTRL_H_SC_BP (1 << 31) +#define SPOCTRL_V_SC_BP (1 << 30) +#define SPOCTRL_HV_SC_OR (1 << 29) +#define SPOCTRL_VS_UR_C (1 << 27) +#define SPOCTRL_VORDER Fld(2,16) +#define SPOCTRL_VORDER_1TAP ((0x0) << FShft(SPOCTRL_VORDER)) +#define SPOCTRL_VORDER_2TAP ((0x1) << FShft(SPOCTRL_VORDER)) +#define SPOCTRL_VORDER_4TAP ((0x3) << FShft(SPOCTRL_VORDER)) +#define SPOCTRL_VPITCH Fld(16,0) +#define Spoctrl_Vpitch(x) ((x) << FShft(SPOCTRL_VPITCH)) + +#endif /* __REG_BITS_2700G_ */ diff --git a/drivers/video/mbx/regs.h b/drivers/video/mbx/regs.h new file mode 100644 index 000000000000..ad20be07666b --- /dev/null +++ b/drivers/video/mbx/regs.h @@ -0,0 +1,195 @@ +#ifndef __REGS_2700G_ +#define __REGS_2700G_ + +/* extern unsigned long virt_base_2700; */ +/* #define __REG_2700G(x) (*(volatile unsigned long*)((x)+virt_base_2700)) */ +#define __REG_2700G(x) ((x)+virt_base_2700) + +/* System Configuration Registers (0x0000_0000 0x0000_0010) */ +#define SYSCFG __REG_2700G(0x00000000) +#define PFBASE __REG_2700G(0x00000004) +#define PFCEIL __REG_2700G(0x00000008) +#define POLLFLAG __REG_2700G(0x0000000c) +#define SYSRST __REG_2700G(0x00000010) + +/* Interrupt Control Registers (0x0000_0014 0x0000_002F) */ +#define NINTPW __REG_2700G(0x00000014) +#define MINTENABLE __REG_2700G(0x00000018) +#define MINTSTAT __REG_2700G(0x0000001c) +#define SINTENABLE __REG_2700G(0x00000020) +#define SINTSTAT __REG_2700G(0x00000024) +#define SINTCLR __REG_2700G(0x00000028) + +/* Clock Control Registers (0x0000_002C 0x0000_005F) */ +#define SYSCLKSRC __REG_2700G(0x0000002c) +#define PIXCLKSRC __REG_2700G(0x00000030) +#define CLKSLEEP __REG_2700G(0x00000034) +#define COREPLL __REG_2700G(0x00000038) +#define DISPPLL __REG_2700G(0x0000003c) +#define PLLSTAT __REG_2700G(0x00000040) +#define VOVRCLK __REG_2700G(0x00000044) +#define PIXCLK __REG_2700G(0x00000048) +#define MEMCLK __REG_2700G(0x0000004c) +#define M24CLK __REG_2700G(0x00000054) +#define MBXCLK __REG_2700G(0x00000054) +#define SDCLK __REG_2700G(0x00000058) +#define PIXCLKDIV __REG_2700G(0x0000005c) + +/* LCD Port Control Register (0x0000_0060 0x0000_006F) */ +#define LCD_CONFIG __REG_2700G(0x00000060) + +/* On-Die Frame Buffer Registers (0x0000_0064 0x0000_006B) */ +#define ODFBPWR __REG_2700G(0x00000064) +#define ODFBSTAT __REG_2700G(0x00000068) + +/* GPIO Registers (0x0000_006C 0x0000_007F) */ +#define GPIOCGF __REG_2700G(0x0000006c) +#define GPIOHI __REG_2700G(0x00000070) +#define GPIOLO __REG_2700G(0x00000074) +#define GPIOSTAT __REG_2700G(0x00000078) + +/* Pulse Width Modulator (PWM) Registers (0x0000_0200 0x0000_02FF) */ +#define PWMRST __REG_2700G(0x00000200) +#define PWMCFG __REG_2700G(0x00000204) +#define PWM0DIV __REG_2700G(0x00000210) +#define PWM0DUTY __REG_2700G(0x00000214) +#define PWM0PER __REG_2700G(0x00000218) +#define PWM1DIV __REG_2700G(0x00000220) +#define PWM1DUTY __REG_2700G(0x00000224) +#define PWM1PER __REG_2700G(0x00000228) + +/* Identification (ID) Registers (0x0000_0300 0x0000_0FFF) */ +#define ID __REG_2700G(0x00000FF0) + +/* Local Memory (SDRAM) Interface Registers (0x0000_1000 0x0000_1FFF) */ +#define LMRST __REG_2700G(0x00001000) +#define LMCFG __REG_2700G(0x00001004) +#define LMPWR __REG_2700G(0x00001008) +#define LMPWRSTAT __REG_2700G(0x0000100c) +#define LMCEMR __REG_2700G(0x00001010) +#define LMTYPE __REG_2700G(0x00001014) +#define LMTIM __REG_2700G(0x00001018) +#define LMREFRESH __REG_2700G(0x0000101c) +#define LMPROTMIN __REG_2700G(0x00001020) +#define LMPROTMAX __REG_2700G(0x00001024) +#define LMPROTCFG __REG_2700G(0x00001028) +#define LMPROTERR __REG_2700G(0x0000102c) + +/* Plane Controller Registers (0x0000_2000 0x0000_2FFF) */ +#define GSCTRL __REG_2700G(0x00002000) +#define VSCTRL __REG_2700G(0x00002004) +#define GBBASE __REG_2700G(0x00002020) +#define VBBASE __REG_2700G(0x00002024) +#define GDRCTRL __REG_2700G(0x00002040) +#define VCMSK __REG_2700G(0x00002044) +#define GSCADR __REG_2700G(0x00002060) +#define VSCADR __REG_2700G(0x00002064) +#define VUBASE __REG_2700G(0x00002084) +#define VVBASE __REG_2700G(0x000020a4) +#define GSADR __REG_2700G(0x000020c0) +#define VSADR __REG_2700G(0x000020c4) +#define HCCTRL __REG_2700G(0x00002100) +#define HCSIZE __REG_2700G(0x00002110) +#define HCPOS __REG_2700G(0x00002120) +#define HCBADR __REG_2700G(0x00002130) +#define HCCKMSK __REG_2700G(0x00002140) +#define GPLUT __REG_2700G(0x00002150) +#define DSCTRL __REG_2700G(0x00002154) +#define DHT01 __REG_2700G(0x00002158) +#define DHT02 __REG_2700G(0x0000215c) +#define DHT03 __REG_2700G(0x00002160) +#define DVT01 __REG_2700G(0x00002164) +#define DVT02 __REG_2700G(0x00002168) +#define DVT03 __REG_2700G(0x0000216c) +#define DBCOL __REG_2700G(0x00002170) +#define BGCOLOR __REG_2700G(0x00002174) +#define DINTRS __REG_2700G(0x00002178) +#define DINTRE __REG_2700G(0x0000217c) +#define DINTRCNT __REG_2700G(0x00002180) +#define DSIG __REG_2700G(0x00002184) +#define DMCTRL __REG_2700G(0x00002188) +#define CLIPCTRL __REG_2700G(0x0000218c) +#define SPOCTRL __REG_2700G(0x00002190) +#define SVCTRL __REG_2700G(0x00002194) + +/* 0x0000_2198 */ +/* 0x0000_21A8 VSCOEFF[0:4] Video Scalar Vertical Coefficient [0:4] 4.14.5 */ +#define VSCOEFF0 __REG_2700G(0x00002198) +#define VSCOEFF1 __REG_2700G(0x0000219c) +#define VSCOEFF2 __REG_2700G(0x000021a0) +#define VSCOEFF3 __REG_2700G(0x000021a4) +#define VSCOEFF4 __REG_2700G(0x000021a8) + +#define SHCTRL __REG_2700G(0x000021b0) + +/* 0x0000_21B4 */ +/* 0x0000_21D4 HSCOEFF[0:8] Video Scalar Horizontal Coefficient [0:8] 4.14.7 */ +#define HSCOEFF0 __REG_2700G(0x000021b4) +#define HSCOEFF1 __REG_2700G(0x000021b8) +#define HSCOEFF2 __REG_2700G(0x000021bc) +#define HSCOEFF3 __REG_2700G(0x000021b0) +#define HSCOEFF4 __REG_2700G(0x000021c4) +#define HSCOEFF5 __REG_2700G(0x000021c8) +#define HSCOEFF6 __REG_2700G(0x000021cc) +#define HSCOEFF7 __REG_2700G(0x000021d0) +#define HSCOEFF8 __REG_2700G(0x000021d4) + +#define SSSIZE __REG_2700G(0x000021D8) + +/* 0x0000_2200 */ +/* 0x0000_2240 VIDGAM[0:16] Video Gamma LUT Index [0:16] 4.15.2 */ +#define VIDGAM0 __REG_2700G(0x00002200) +#define VIDGAM1 __REG_2700G(0x00002204) +#define VIDGAM2 __REG_2700G(0x00002208) +#define VIDGAM3 __REG_2700G(0x0000220c) +#define VIDGAM4 __REG_2700G(0x00002210) +#define VIDGAM5 __REG_2700G(0x00002214) +#define VIDGAM6 __REG_2700G(0x00002218) +#define VIDGAM7 __REG_2700G(0x0000221c) +#define VIDGAM8 __REG_2700G(0x00002220) +#define VIDGAM9 __REG_2700G(0x00002224) +#define VIDGAM10 __REG_2700G(0x00002228) +#define VIDGAM11 __REG_2700G(0x0000222c) +#define VIDGAM12 __REG_2700G(0x00002230) +#define VIDGAM13 __REG_2700G(0x00002234) +#define VIDGAM14 __REG_2700G(0x00002238) +#define VIDGAM15 __REG_2700G(0x0000223c) +#define VIDGAM16 __REG_2700G(0x00002240) + +/* 0x0000_2250 */ +/* 0x0000_2290 GFXGAM[0:16] Graphics Gamma LUT Index [0:16] 4.15.3 */ +#define GFXGAM0 __REG_2700G(0x00002250) +#define GFXGAM1 __REG_2700G(0x00002254) +#define GFXGAM2 __REG_2700G(0x00002258) +#define GFXGAM3 __REG_2700G(0x0000225c) +#define GFXGAM4 __REG_2700G(0x00002260) +#define GFXGAM5 __REG_2700G(0x00002264) +#define GFXGAM6 __REG_2700G(0x00002268) +#define GFXGAM7 __REG_2700G(0x0000226c) +#define GFXGAM8 __REG_2700G(0x00002270) +#define GFXGAM9 __REG_2700G(0x00002274) +#define GFXGAM10 __REG_2700G(0x00002278) +#define GFXGAM11 __REG_2700G(0x0000227c) +#define GFXGAM12 __REG_2700G(0x00002280) +#define GFXGAM13 __REG_2700G(0x00002284) +#define GFXGAM14 __REG_2700G(0x00002288) +#define GFXGAM15 __REG_2700G(0x0000228c) +#define GFXGAM16 __REG_2700G(0x00002290) + +#define DLSTS __REG_2700G(0x00002300) +#define DLLCTRL __REG_2700G(0x00002304) +#define DVLNUM __REG_2700G(0x00002308) +#define DUCTRL __REG_2700G(0x0000230c) +#define DVECTRL __REG_2700G(0x00002310) +#define DHDET __REG_2700G(0x00002314) +#define DVDET __REG_2700G(0x00002318) +#define DODMSK __REG_2700G(0x0000231c) +#define CSC01 __REG_2700G(0x00002330) +#define CSC02 __REG_2700G(0x00002334) +#define CSC03 __REG_2700G(0x00002338) +#define CSC04 __REG_2700G(0x0000233c) +#define CSC05 __REG_2700G(0x00002340) + +#define FB_MEMORY_START __REG_2700G(0x00060000) + +#endif /* __REGS_2700G_ */ diff --git a/fs/char_dev.c b/fs/char_dev.c index a4cbc6706ef0..3483d3cf8087 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -182,6 +182,28 @@ int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, return 0; } +/** + * register_chrdev() - Register a major number for character devices. + * @major: major device number or 0 for dynamic allocation + * @name: name of this range of devices + * @fops: file operations associated with this devices + * + * If @major == 0 this functions will dynamically allocate a major and return + * its number. + * + * If @major > 0 this function will attempt to reserve a device with the given + * major number and will return zero on success. + * + * Returns a -ve errno on failure. + * + * The name of this device has nothing to do with the name of the device in + * /dev. It only helps to keep track of the different owners of devices. If + * your module name has only one type of devices it's ok to use e.g. the name + * of the module here. + * + * This function registers a range of 256 minor numbers. The first minor number + * is 0. + */ int register_chrdev(unsigned int major, const char *name, const struct file_operations *fops) { diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index 10c46231ce15..efbb586bed4b 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -2944,7 +2944,7 @@ int jfs_sync(void *arg) * Inode is being freed */ list_del_init(&jfs_ip->anon_inode_list); - } else if (! !mutex_trylock(&jfs_ip->commit_mutex)) { + } else if (mutex_trylock(&jfs_ip->commit_mutex)) { /* * inode will be removed from anonymous list * when it is committed diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 09ea03f62277..295268ad231b 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -165,8 +165,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, out3: txEnd(tid); - mutex_unlock(&JFS_IP(dip)->commit_mutex); mutex_unlock(&JFS_IP(ip)->commit_mutex); + mutex_unlock(&JFS_IP(dip)->commit_mutex); if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; @@ -300,8 +300,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) out3: txEnd(tid); - mutex_unlock(&JFS_IP(dip)->commit_mutex); mutex_unlock(&JFS_IP(ip)->commit_mutex); + mutex_unlock(&JFS_IP(dip)->commit_mutex); if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; @@ -384,8 +384,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) if (rc == -EIO) txAbort(tid, 1); txEnd(tid); - mutex_unlock(&JFS_IP(dip)->commit_mutex); mutex_unlock(&JFS_IP(ip)->commit_mutex); + mutex_unlock(&JFS_IP(dip)->commit_mutex); goto out2; } @@ -422,8 +422,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) txEnd(tid); - mutex_unlock(&JFS_IP(dip)->commit_mutex); mutex_unlock(&JFS_IP(ip)->commit_mutex); + mutex_unlock(&JFS_IP(dip)->commit_mutex); /* * Truncating the directory index table is not guaranteed. It @@ -503,8 +503,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) if (rc == -EIO) txAbort(tid, 1); /* Marks FS Dirty */ txEnd(tid); - mutex_unlock(&JFS_IP(dip)->commit_mutex); mutex_unlock(&JFS_IP(ip)->commit_mutex); + mutex_unlock(&JFS_IP(dip)->commit_mutex); IWRITE_UNLOCK(ip); goto out1; } @@ -527,8 +527,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) if ((new_size = commitZeroLink(tid, ip)) < 0) { txAbort(tid, 1); /* Marks FS Dirty */ txEnd(tid); - mutex_unlock(&JFS_IP(dip)->commit_mutex); mutex_unlock(&JFS_IP(ip)->commit_mutex); + mutex_unlock(&JFS_IP(dip)->commit_mutex); IWRITE_UNLOCK(ip); rc = new_size; goto out1; @@ -556,9 +556,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) txEnd(tid); - mutex_unlock(&JFS_IP(dip)->commit_mutex); mutex_unlock(&JFS_IP(ip)->commit_mutex); - + mutex_unlock(&JFS_IP(dip)->commit_mutex); while (new_size && (rc == 0)) { tid = txBegin(dip->i_sb, 0); @@ -847,8 +846,8 @@ static int jfs_link(struct dentry *old_dentry, out: txEnd(tid); - mutex_unlock(&JFS_IP(dir)->commit_mutex); mutex_unlock(&JFS_IP(ip)->commit_mutex); + mutex_unlock(&JFS_IP(dir)->commit_mutex); jfs_info("jfs_link: rc:%d", rc); return rc; @@ -1037,8 +1036,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, out3: txEnd(tid); - mutex_unlock(&JFS_IP(dip)->commit_mutex); mutex_unlock(&JFS_IP(ip)->commit_mutex); + mutex_unlock(&JFS_IP(dip)->commit_mutex); if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; @@ -1160,10 +1159,11 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (S_ISDIR(new_ip->i_mode)) { new_ip->i_nlink--; if (new_ip->i_nlink) { - mutex_unlock(&JFS_IP(new_dir)->commit_mutex); - mutex_unlock(&JFS_IP(old_ip)->commit_mutex); + mutex_unlock(&JFS_IP(new_ip)->commit_mutex); if (old_dir != new_dir) mutex_unlock(&JFS_IP(old_dir)->commit_mutex); + mutex_unlock(&JFS_IP(old_ip)->commit_mutex); + mutex_unlock(&JFS_IP(new_dir)->commit_mutex); if (!S_ISDIR(old_ip->i_mode) && new_ip) IWRITE_UNLOCK(new_ip); jfs_error(new_ip->i_sb, @@ -1281,13 +1281,12 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, out4: txEnd(tid); - - mutex_unlock(&JFS_IP(new_dir)->commit_mutex); - mutex_unlock(&JFS_IP(old_ip)->commit_mutex); - if (old_dir != new_dir) - mutex_unlock(&JFS_IP(old_dir)->commit_mutex); if (new_ip) mutex_unlock(&JFS_IP(new_ip)->commit_mutex); + if (old_dir != new_dir) + mutex_unlock(&JFS_IP(old_dir)->commit_mutex); + mutex_unlock(&JFS_IP(old_ip)->commit_mutex); + mutex_unlock(&JFS_IP(new_dir)->commit_mutex); while (new_size && (rc == 0)) { tid = txBegin(new_ip->i_sb, 0); diff --git a/fs/namei.c b/fs/namei.c index c9750d755aff..e01070d7bf58 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1712,8 +1712,14 @@ do_link: if (error) goto exit_dput; error = __do_follow_link(&path, nd); - if (error) + if (error) { + /* Does someone understand code flow here? Or it is only + * me so stupid? Anathema to whoever designed this non-sense + * with "intent.open". + */ + release_open_intent(nd); return error; + } nd->flags &= ~LOOKUP_PARENT; if (nd->last_type == LAST_BIND) goto ok; diff --git a/fs/proc/array.c b/fs/proc/array.c index 7495d3e20775..0b615d62a159 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -74,6 +74,7 @@ #include <linux/times.h> #include <linux/cpuset.h> #include <linux/rcupdate.h> +#include <linux/delayacct.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -411,7 +412,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ -%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", +%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu %llu\n", task->pid, tcomm, state, @@ -455,7 +456,8 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) task->exit_signal, task_cpu(task), task->rt_priority, - task->policy); + task->policy, + (unsigned long long)delayacct_blkio_ticks(task)); if(mm) mmput(mm); return res; diff --git a/fs/proc/base.c b/fs/proc/base.c index 243a94af0427..fe8d55fb17cc 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -551,6 +551,27 @@ static int proc_fd_access_allowed(struct inode *inode) return allowed; } +static int proc_setattr(struct dentry *dentry, struct iattr *attr) +{ + int error; + struct inode *inode = dentry->d_inode; + + if (attr->ia_valid & ATTR_MODE) + return -EPERM; + + error = inode_change_ok(inode, attr); + if (!error) { + error = security_inode_setattr(dentry, attr); + if (!error) + error = inode_setattr(inode, attr); + } + return error; +} + +static struct inode_operations proc_def_inode_operations = { + .setattr = proc_setattr, +}; + extern struct seq_operations mounts_op; struct proc_mounts { struct seq_file m; @@ -1111,7 +1132,8 @@ out: static struct inode_operations proc_pid_link_inode_operations = { .readlink = proc_pid_readlink, - .follow_link = proc_pid_follow_link + .follow_link = proc_pid_follow_link, + .setattr = proc_setattr, }; static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir) @@ -1285,6 +1307,7 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st ei = PROC_I(inode); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_ino = fake_ino(task->pid, ino); + inode->i_op = &proc_def_inode_operations; /* * grab the reference to task. @@ -1339,6 +1362,7 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) inode->i_uid = 0; inode->i_gid = 0; } + inode->i_mode &= ~(S_ISUID | S_ISGID); security_task_to_inode(task, inode); put_task_struct(task); return 1; @@ -1389,6 +1413,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) inode->i_uid = 0; inode->i_gid = 0; } + inode->i_mode &= ~(S_ISUID | S_ISGID); security_task_to_inode(task, inode); put_task_struct(task); return 1; @@ -1527,11 +1552,13 @@ static struct file_operations proc_task_operations = { */ static struct inode_operations proc_fd_inode_operations = { .lookup = proc_lookupfd, + .setattr = proc_setattr, }; static struct inode_operations proc_task_inode_operations = { .lookup = proc_task_lookup, .getattr = proc_task_getattr, + .setattr = proc_setattr, }; #ifdef CONFIG_SECURITY @@ -1845,11 +1872,13 @@ static struct file_operations proc_tid_base_operations = { static struct inode_operations proc_tgid_base_inode_operations = { .lookup = proc_tgid_base_lookup, .getattr = pid_getattr, + .setattr = proc_setattr, }; static struct inode_operations proc_tid_base_inode_operations = { .lookup = proc_tid_base_lookup, .getattr = pid_getattr, + .setattr = proc_setattr, }; #ifdef CONFIG_SECURITY @@ -1892,11 +1921,13 @@ static struct dentry *proc_tid_attr_lookup(struct inode *dir, static struct inode_operations proc_tgid_attr_inode_operations = { .lookup = proc_tgid_attr_lookup, .getattr = pid_getattr, + .setattr = proc_setattr, }; static struct inode_operations proc_tid_attr_inode_operations = { .lookup = proc_tid_attr_lookup, .getattr = pid_getattr, + .setattr = proc_setattr, }; #endif diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 6dcef089e18e..49dfb2ab783e 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -192,7 +192,7 @@ int proc_fill_super(struct super_block *s, void *data, int silent) { struct inode * root_inode; - s->s_flags |= MS_NODIRATIME; + s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC; s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = PROC_SUPER_MAGIC; diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index 5d8a8cfebc70..c533ec1bcaec 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -492,9 +492,17 @@ static void add_file(struct super_block *sb, char *name, int reiserfs_proc_info_init(struct super_block *sb) { + char b[BDEVNAME_SIZE]; + char *s; + + /* Some block devices use /'s */ + strlcpy(b, reiserfs_bdevname(sb), BDEVNAME_SIZE); + s = strchr(b, '/'); + if (s) + *s = '!'; + spin_lock_init(&__PINFO(sb).lock); - REISERFS_SB(sb)->procdir = - proc_mkdir(reiserfs_bdevname(sb), proc_info_root); + REISERFS_SB(sb)->procdir = proc_mkdir(b, proc_info_root); if (REISERFS_SB(sb)->procdir) { REISERFS_SB(sb)->procdir->owner = THIS_MODULE; REISERFS_SB(sb)->procdir->data = sb; @@ -508,13 +516,22 @@ int reiserfs_proc_info_init(struct super_block *sb) return 0; } reiserfs_warning(sb, "reiserfs: cannot create /proc/%s/%s", - proc_info_root_name, reiserfs_bdevname(sb)); + proc_info_root_name, b); return 1; } int reiserfs_proc_info_done(struct super_block *sb) { struct proc_dir_entry *de = REISERFS_SB(sb)->procdir; + char b[BDEVNAME_SIZE]; + char *s; + + /* Some block devices use /'s */ + strlcpy(b, reiserfs_bdevname(sb), BDEVNAME_SIZE); + s = strchr(b, '/'); + if (s) + *s = '!'; + if (de) { remove_proc_entry("journal", de); remove_proc_entry("oidmap", de); @@ -528,7 +545,7 @@ int reiserfs_proc_info_done(struct super_block *sb) __PINFO(sb).exiting = 1; spin_unlock(&__PINFO(sb).lock); if (proc_info_root) { - remove_proc_entry(reiserfs_bdevname(sb), proc_info_root); + remove_proc_entry(b, proc_info_root); REISERFS_SB(sb)->procdir = NULL; } return 0; diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index ceda3a2859d2..7858703ed84c 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -246,8 +246,8 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); #define BUF_BUSY XBF_DONT_BLOCK #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) -#define XFS_BUF_ZEROFLAGS(bp) \ - ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI)) +#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ + ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9bdef9d51900..4754f342a5d3 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -314,6 +314,13 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp) return; } + if (xfs_readonly_buftarg(mp->m_ddev_targp)) { + xfs_fs_cmn_err(CE_NOTE, mp, + "Disabling barriers, underlying device is readonly"); + mp->m_flags &= ~XFS_MOUNT_BARRIER; + return; + } + error = xfs_barrier_test(mp); if (error) { xfs_fs_cmn_err(CE_NOTE, mp, diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index e95e99f7168f..f137856c3261 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c @@ -217,17 +217,24 @@ xfs_qm_statvfs( return 0; dp = &dqp->q_core; - limit = dp->d_blk_softlimit ? dp->d_blk_softlimit : dp->d_blk_hardlimit; + limit = dp->d_blk_softlimit ? + be64_to_cpu(dp->d_blk_softlimit) : + be64_to_cpu(dp->d_blk_hardlimit); if (limit && statp->f_blocks > limit) { statp->f_blocks = limit; - statp->f_bfree = (statp->f_blocks > dp->d_bcount) ? - (statp->f_blocks - dp->d_bcount) : 0; + statp->f_bfree = + (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ? + (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0; } - limit = dp->d_ino_softlimit ? dp->d_ino_softlimit : dp->d_ino_hardlimit; + + limit = dp->d_ino_softlimit ? + be64_to_cpu(dp->d_ino_softlimit) : + be64_to_cpu(dp->d_ino_hardlimit); if (limit && statp->f_files > limit) { statp->f_files = limit; - statp->f_ffree = (statp->f_files > dp->d_icount) ? - (statp->f_ffree - dp->d_icount) : 0; + statp->f_ffree = + (statp->f_files > be64_to_cpu(dp->d_icount)) ? + (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0; } xfs_qm_dqput(dqp); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 86c1bf0bba9e..1f8ecff8553a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -334,10 +334,9 @@ xfs_itobp( #if !defined(__KERNEL__) ni = 0; #elif defined(DEBUG) - ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : - (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); + ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; #else /* usual case */ - ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; + ni = 1; #endif for (i = 0; i < ni; i++) { @@ -348,11 +347,15 @@ xfs_itobp( (i << mp->m_sb.sb_inodelog)); di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); - if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, - XFS_RANDOM_ITOBP_INOTOBP))) { + if (unlikely(XFS_TEST_ERROR(!di_ok, mp, + XFS_ERRTAG_ITOBP_INOTOBP, + XFS_RANDOM_ITOBP_INOTOBP))) { + if (imap_flags & XFS_IMAP_BULKSTAT) { + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EINVAL); + } #ifdef DEBUG - if (!(imap_flags & XFS_IMAP_BULKSTAT)) - cmn_err(CE_ALERT, + cmn_err(CE_ALERT, "Device %s - bad inode magic/vsn " "daddr %lld #%d (magic=%x)", XFS_BUFTARG_NAME(mp->m_ddev_targp), diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e730328636c3..21ac1a67e3e0 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1413,7 +1413,7 @@ xlog_sync(xlog_t *log, ops = iclog->ic_header.h_num_logops; INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); - bp = iclog->ic_bp; + bp = iclog->ic_bp; ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); @@ -1430,15 +1430,14 @@ xlog_sync(xlog_t *log, } XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ + XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); /* * Do an ordered write for the log block. - * - * It may not be needed to flush the first split block in the log wrap - * case, but do it anyways to be safe -AK + * Its unnecessary to flush the first split block in the log wrap case. */ - if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) + if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER)) XFS_BUF_ORDERED(bp); ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); @@ -1460,7 +1459,7 @@ xlog_sync(xlog_t *log, return error; } if (split) { - bp = iclog->ic_log->l_xbuf; + bp = iclog->ic_log->l_xbuf; ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); @@ -1468,6 +1467,7 @@ xlog_sync(xlog_t *log, XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ (__psint_t)count), split); XFS_BUF_SET_FSPRIVATE(bp, iclog); + XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 6c96391f3f1a..b427d220a169 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -515,7 +515,7 @@ xfs_mount( if (error) goto error2; - if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY)) + if (mp->m_flags & XFS_MOUNT_BARRIER) xfs_mountfs_check_barriers(mp); error = XFS_IOINIT(vfsp, args, flags); diff --git a/include/asm-alpha/barrier.h b/include/asm-alpha/barrier.h index 681ff581afa5..384dc08d6f53 100644 --- a/include/asm-alpha/barrier.h +++ b/include/asm-alpha/barrier.h @@ -30,7 +30,4 @@ __asm__ __volatile__("mb": : :"memory") #define set_mb(var, value) \ do { var = value; mb(); } while (0) -#define set_wmb(var, value) \ -do { var = value; wmb(); } while (0) - #endif /* __BARRIER_H */ diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 6001febfe63b..0947cbf9b69a 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -176,7 +176,6 @@ extern unsigned int user_debug; #define wmb() mb() #define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); /* diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h index d1f69d706198..00ae32aa1dba 100644 --- a/include/asm-arm26/system.h +++ b/include/asm-arm26/system.h @@ -90,7 +90,6 @@ extern unsigned int user_debug; #define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) /* * We assume knowledge of how diff --git a/include/asm-cris/system.h b/include/asm-cris/system.h index b1c593b6dbff..b869f6161aaa 100644 --- a/include/asm-cris/system.h +++ b/include/asm-cris/system.h @@ -17,7 +17,6 @@ extern struct task_struct *resume(struct task_struct *prev, struct task_struct * #define wmb() mb() #define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #ifdef CONFIG_SMP #define smp_mb() mb() diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h index 351863dfd06e..1166899317d7 100644 --- a/include/asm-frv/system.h +++ b/include/asm-frv/system.h @@ -179,7 +179,6 @@ do { \ #define rmb() asm volatile ("membar" : : :"memory") #define wmb() asm volatile ("membar" : : :"memory") #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define smp_mb() mb() #define smp_rmb() rmb() diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index d8d0bcecd23f..6b16dda18115 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm @@ -1,11 +1,8 @@ unifdef-y += a.out.h auxvec.h byteorder.h errno.h fcntl.h ioctl.h \ - ioctls.h ipcbuf.h irq.h mman.h msgbuf.h param.h poll.h \ + ioctls.h ipcbuf.h mman.h msgbuf.h param.h poll.h \ posix_types.h ptrace.h resource.h sembuf.h shmbuf.h shmparam.h \ sigcontext.h siginfo.h signal.h socket.h sockios.h stat.h \ statfs.h termbits.h termios.h timex.h types.h unistd.h user.h -# These really shouldn't be exported -unifdef-y += atomic.h io.h - # These probably shouldn't be exported unifdef-y += elf.h page.h diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h index 134e0929fce5..5084a9d42922 100644 --- a/include/asm-h8300/system.h +++ b/include/asm-h8300/system.h @@ -84,7 +84,6 @@ asmlinkage void resume(void); #define wmb() asm volatile ("" : : :"memory") #define set_rmb(var, value) do { xchg(&var, value); } while (0) #define set_mb(var, value) set_rmb(var, value) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #ifdef CONFIG_SMP #define smp_mb() mb() diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 2db168ef949f..49928eb33f8b 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -453,8 +453,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - #include <linux/irqflags.h> /* diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 65db43ce4de6..fc9677bc87ee 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -98,12 +98,11 @@ extern struct ia64_boot_param { #endif /* - * XXX check on these---I suspect what Linus really wants here is + * XXX check on this ---I suspect what Linus really wants here is * acquire vs release semantics but we can't discuss this stuff with * Linus just yet. Grrr... */ #define set_mb(var, value) do { (var) = (value); mb(); } while (0) -#define set_wmb(var, value) do { (var) = (value); mb(); } while (0) #define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 311cebf44eff..9e618afec6ed 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -336,7 +336,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #endif #define set_mb(var, value) do { xchg(&var, value); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define arch_align_stack(x) (x) diff --git a/include/asm-m68k/oplib.h b/include/asm-m68k/oplib.h index c3594f473ef7..06caa2d08451 100644 --- a/include/asm-m68k/oplib.h +++ b/include/asm-m68k/oplib.h @@ -244,11 +244,6 @@ extern void prom_getstring(int node, char *prop, char *buf, int bufsize); /* Does the passed node have the given "name"? YES=1 NO=0 */ extern int prom_nodematch(int thisnode, char *name); -/* Puts in buffer a prom name in the form name@x,y or name (x for which_io - * and y for first regs phys address - */ -extern int prom_getname(int node, char *buf, int buflen); - /* Search all siblings starting at the passed node for "name" matching * the given string. Returns the node on success, zero on failure. */ diff --git a/include/asm-m68k/system.h b/include/asm-m68k/system.h index d6dd8052cd6f..131a0cb0f491 100644 --- a/include/asm-m68k/system.h +++ b/include/asm-m68k/system.h @@ -80,7 +80,6 @@ static inline int irqs_disabled(void) #define wmb() barrier() #define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { xchg(&var, value); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define smp_mb() barrier() #define smp_rmb() barrier() diff --git a/include/asm-m68knommu/system.h b/include/asm-m68knommu/system.h index 2bbe2db00a22..2a814498672d 100644 --- a/include/asm-m68knommu/system.h +++ b/include/asm-m68knommu/system.h @@ -106,7 +106,6 @@ asmlinkage void resume(void); #define wmb() asm volatile ("" : : :"memory") #define set_rmb(var, value) do { xchg(&var, value); } while (0) #define set_mb(var, value) set_rmb(var, value) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #ifdef CONFIG_SMP #define smp_mb() mb() diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 13c98dde82dc..dcb4701d5728 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -143,9 +143,6 @@ #define set_mb(var, value) \ do { var = value; mb(); } while (0) -#define set_wmb(var, value) \ -do { var = value; wmb(); } while (0) - /* * switch_to(n) should switch tasks to task nr n, first * checking that n isn't the current task, in which case it does nothing. diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 5fe2d2329ab5..74f037a39e6f 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h @@ -143,8 +143,6 @@ static inline void set_eiem(unsigned long val) #define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - #ifndef CONFIG_PA20 /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index c6569516ba35..7307aa775671 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -39,7 +39,6 @@ #define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #ifdef __KERNEL__ #ifdef CONFIG_SMP diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index fb49c0c49ea1..738943584c01 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h @@ -33,7 +33,6 @@ #define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #ifdef CONFIG_SMP #define smp_mb() mb() diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 9ab186ffde23..16040048cd1b 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -128,8 +128,13 @@ extern void account_system_vtime(struct task_struct *); #define nop() __asm__ __volatile__ ("nop") -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr)))) +#define xchg(ptr,x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \ + __ret; \ +}) static inline unsigned long __xchg(unsigned long x, void * ptr, int size) { @@ -299,7 +304,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #ifdef __s390x__ diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h index 4848057dafe4..5d0332a4c2bd 100644 --- a/include/asm-s390/timex.h +++ b/include/asm-s390/timex.h @@ -19,7 +19,7 @@ static inline cycles_t get_cycles(void) { cycles_t cycles; - __asm__("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc"); + __asm__ __volatile__ ("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc"); return cycles >> 2; } @@ -27,7 +27,7 @@ static inline unsigned long long get_clock (void) { unsigned long long clk; - __asm__("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); + __asm__ __volatile__ ("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); return clk; } diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index ce2e60664a86..ad35ad4958f4 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -101,7 +101,6 @@ extern void __xchg_called_with_bad_pointer(void); #endif #define set_mb(var, value) do { xchg(&var, value); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) /* Interrupt Control */ static __inline__ void local_irq_enable(void) diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h index 7606f6e1f01e..87ef6f1ad5a4 100644 --- a/include/asm-sh64/system.h +++ b/include/asm-sh64/system.h @@ -66,7 +66,6 @@ extern void __xchg_called_with_bad_pointer(void); #define set_rmb(var, value) do { xchg(&var, value); } while (0) #define set_mb(var, value) set_rmb(var, value) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) /* Interrupt Control */ #ifndef HARD_CLI diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h index f283f8aaf6a9..91691e52c058 100644 --- a/include/asm-sparc/oplib.h +++ b/include/asm-sparc/oplib.h @@ -267,11 +267,6 @@ extern void prom_getstring(int node, char *prop, char *buf, int bufsize); /* Does the passed node have the given "name"? YES=1 NO=0 */ extern int prom_nodematch(int thisnode, char *name); -/* Puts in buffer a prom name in the form name@x,y or name (x for which_io - * and y for first regs phys address - */ -extern int prom_getname(int node, char *buf, int buflen); - /* Search all siblings starting at the passed node for "name" matching * the given string. Returns the node on success, zero on failure. */ diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h index 0ae5084c427b..d03a21c97abb 100644 --- a/include/asm-sparc/signal.h +++ b/include/asm-sparc/signal.h @@ -168,7 +168,7 @@ struct sigstack { * statically allocated data.. which is NOT GOOD. * */ -#define SA_STATIC_ALLOC 0x80 +#define SA_STATIC_ALLOC 0x8000 #endif #include <asm-generic/signal.h> diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index cb7dda1e5e91..100c3eaf3c1f 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h @@ -199,7 +199,6 @@ static inline unsigned long getipl(void) #define wmb() mb() #define read_barrier_depends() do { } while(0) #define set_mb(__var, __value) do { __var = __value; mb(); } while(0) -#define set_wmb(__var, __value) set_mb(__var, __value) #define smp_mb() __asm__ __volatile__("":::"memory") #define smp_rmb() __asm__ __volatile__("":::"memory") #define smp_wmb() __asm__ __volatile__("":::"memory") diff --git a/include/asm-sparc64/openprom.h b/include/asm-sparc64/openprom.h index b4959d2b0d99..e01b80559c93 100644 --- a/include/asm-sparc64/openprom.h +++ b/include/asm-sparc64/openprom.h @@ -175,7 +175,7 @@ struct linux_nodeops { }; /* More fun PROM structures for device probing. */ -#define PROMREG_MAX 16 +#define PROMREG_MAX 24 #define PROMVADDR_MAX 16 #define PROMINTR_MAX 15 diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index a68b0bb05958..6a0da3b1695c 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -287,11 +287,6 @@ extern void prom_getstring(int node, const char *prop, char *buf, int bufsize); /* Does the passed node have the given "name"? YES=1 NO=0 */ extern int prom_nodematch(int thisnode, const char *name); -/* Puts in buffer a prom name in the form name@x,y or name (x for which_io - * and y for first regs phys address - */ -extern int prom_getname(int node, char *buf, int buflen); - /* Search all siblings starting at the passed node for "name" matching * the given string. Returns the node on success, zero on failure. */ diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 03f5bc9b6bec..1ba19eb34ce3 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -339,7 +339,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" " andn %0, %4, %0\n" - " or %0, %3, %0\n" + " or %0, %5, %0\n" " .previous\n" : "=r" (val) : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h index 5015bb8d6c32..89d42431efb5 100644 --- a/include/asm-sparc64/sfp-machine.h +++ b/include/asm-sparc64/sfp-machine.h @@ -34,7 +34,7 @@ #define _FP_MUL_MEAT_D(R,X,Y) \ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) #define _FP_MUL_MEAT_Q(R,X,Y) \ - _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) + _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index 4ca68600c670..a8b7432c9a70 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -123,8 +123,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define read_barrier_depends() do { } while(0) #define set_mb(__var, __value) \ do { __var = __value; membar_storeload_storestore(); } while(0) -#define set_wmb(__var, __value) \ - do { __var = __value; wmb(); } while(0) #ifdef CONFIG_SMP #define smp_mb() mb() diff --git a/include/asm-v850/system.h b/include/asm-v850/system.h index 7091af4b7866..da39916f10b0 100644 --- a/include/asm-v850/system.h +++ b/include/asm-v850/system.h @@ -68,7 +68,6 @@ static inline int irqs_disabled (void) #define read_barrier_depends() ((void)0) #define set_rmb(var, value) do { xchg (&var, value); } while (0) #define set_mb(var, value) set_rmb (var, value) -#define set_wmb(var, value) do { var = value; wmb (); } while (0) #define smp_mb() mb () #define smp_rmb() rmb () diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h index fbfb50136edb..4e3919524240 100644 --- a/include/asm-x86_64/calgary.h +++ b/include/asm-x86_64/calgary.h @@ -60,9 +60,4 @@ static inline int calgary_iommu_init(void) { return 1; } static inline void detect_calgary(void) { return; } #endif -static inline unsigned int bus_to_phb(unsigned char busno) -{ - return ((busno % 15 == 0) ? 0 : busno / 2 + 1); -} - #endif /* _ASM_X86_64_CALGARY_H */ diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index f7bf875aae40..10f346165cab 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -19,7 +19,7 @@ #define EXCEPTION_STACK_ORDER 0 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) -#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER +#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) #define IRQSTACK_ORDER 2 diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h index 5f9a01805821..ba94ab3d2673 100644 --- a/include/asm-x86_64/swiotlb.h +++ b/include/asm-x86_64/swiotlb.h @@ -42,6 +42,8 @@ extern void swiotlb_free_coherent (struct device *hwdev, size_t size, extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); extern void swiotlb_init(void); +extern int swiotlb_force; + #ifdef CONFIG_SWIOTLB extern int swiotlb; #else diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index f67f2873a922..6bf170bceae1 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -240,7 +240,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #endif #define read_barrier_depends() do {} while(0) #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h index f986170bd2a1..932bda92a21c 100644 --- a/include/asm-xtensa/system.h +++ b/include/asm-xtensa/system.h @@ -99,7 +99,6 @@ static inline void disable_coprocessor(int i) #endif #define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #if !defined (__ASSEMBLY__) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 44a11f1ccaf2..8fb344a9abd8 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -48,7 +48,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) { } #endif -extern int current_in_cpu_hotplug(void); int cpu_up(unsigned int cpu); @@ -61,10 +60,6 @@ static inline int register_cpu_notifier(struct notifier_block *nb) static inline void unregister_cpu_notifier(struct notifier_block *nb) { } -static inline int current_in_cpu_hotplug(void) -{ - return 0; -} #endif /* CONFIG_SMP */ extern struct sysdev_class cpu_sysdev_class; @@ -73,7 +68,6 @@ extern struct sysdev_class cpu_sysdev_class; /* Stop CPUs going up and down. */ extern void lock_cpu_hotplug(void); extern void unlock_cpu_hotplug(void); -extern int lock_cpu_hotplug_interruptible(void); #define hotcpu_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = pri }; \ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 35e137636b0b..4ea39fee99c7 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -172,9 +172,6 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation); -/* pass an event to the cpufreq governor */ -int cpufreq_governor(unsigned int cpu, unsigned int event); - int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h new file mode 100644 index 000000000000..7e8b6011b8f3 --- /dev/null +++ b/include/linux/delayacct.h @@ -0,0 +1,119 @@ +/* delayacct.h - per-task delay accounting + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + */ + +#ifndef _LINUX_DELAYACCT_H +#define _LINUX_DELAYACCT_H + +#include <linux/sched.h> +#include <linux/taskstats_kern.h> + +/* + * Per-task flags relevant to delay accounting + * maintained privately to avoid exhausting similar flags in sched.h:PF_* + * Used to set current->delays->flags + */ +#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */ + +#ifdef CONFIG_TASK_DELAY_ACCT + +extern int delayacct_on; /* Delay accounting turned on/off */ +extern kmem_cache_t *delayacct_cache; +extern void delayacct_init(void); +extern void __delayacct_tsk_init(struct task_struct *); +extern void __delayacct_tsk_exit(struct task_struct *); +extern void __delayacct_blkio_start(void); +extern void __delayacct_blkio_end(void); +extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); +extern __u64 __delayacct_blkio_ticks(struct task_struct *); + +static inline void delayacct_set_flag(int flag) +{ + if (current->delays) + current->delays->flags |= flag; +} + +static inline void delayacct_clear_flag(int flag) +{ + if (current->delays) + current->delays->flags &= ~flag; +} + +static inline void delayacct_tsk_init(struct task_struct *tsk) +{ + /* reinitialize in case parent's non-null pointer was dup'ed*/ + tsk->delays = NULL; + if (unlikely(delayacct_on)) + __delayacct_tsk_init(tsk); +} + +static inline void delayacct_tsk_exit(struct task_struct *tsk) +{ + if (tsk->delays) + __delayacct_tsk_exit(tsk); +} + +static inline void delayacct_blkio_start(void) +{ + if (current->delays) + __delayacct_blkio_start(); +} + +static inline void delayacct_blkio_end(void) +{ + if (current->delays) + __delayacct_blkio_end(); +} + +static inline int delayacct_add_tsk(struct taskstats *d, + struct task_struct *tsk) +{ + if (likely(!delayacct_on)) + return -EINVAL; + if (!tsk->delays) + return 0; + return __delayacct_add_tsk(d, tsk); +} + +static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) +{ + if (tsk->delays) + return __delayacct_blkio_ticks(tsk); + return 0; +} + +#else +static inline void delayacct_set_flag(int flag) +{} +static inline void delayacct_clear_flag(int flag) +{} +static inline void delayacct_init(void) +{} +static inline void delayacct_tsk_init(struct task_struct *tsk) +{} +static inline void delayacct_tsk_exit(struct task_struct *tsk) +{} +static inline void delayacct_blkio_start(void) +{} +static inline void delayacct_blkio_end(void) +{} +static inline int delayacct_add_tsk(struct taskstats *d, + struct task_struct *tsk) +{ return 0; } +static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) +{ return 0; } +#endif /* CONFIG_TASK_DELAY_ACCT */ + +#endif diff --git a/include/linux/futex.h b/include/linux/futex.h index 34c3a215f2cd..d097b5b72bc6 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -96,7 +96,8 @@ struct robust_list_head { long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, u32 __user *uaddr2, u32 val2, u32 val3); -extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); +extern int +handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 21338bb3441d..9418519a55d1 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -115,6 +115,7 @@ #define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */ #define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */ #define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */ +#define I2C_DRIVERID_ISL1208 88 /* Intersil ISL1208 RTC */ #define I2C_DRIVERID_I2CDEV 900 #define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */ diff --git a/include/linux/ide.h b/include/linux/ide.h index dc7abef10965..99620451d958 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -571,6 +571,7 @@ typedef struct ide_drive_s { u8 waiting_for_dma; /* dma currently in progress */ u8 unmask; /* okay to unmask other irqs */ u8 bswap; /* byte swap data */ + u8 noflush; /* don't attempt flushes */ u8 dsc_overlap; /* DSC overlap */ u8 nice1; /* give potential excess bandwidth */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index eef0876d8307..383627ad328f 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -23,8 +23,8 @@ struct vlan_collection; struct vlan_dev_info; struct hlist_node; -#include <linux/proc_fs.h> /* for proc_dir_entry */ #include <linux/netdevice.h> +#include <linux/etherdevice.h> #define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) * that VLAN requires. @@ -185,7 +185,8 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, * This allows the VLAN to have a different MAC than the underlying * device, and still route correctly. */ - if (!memcmp(eth_hdr(skb)->h_dest, skb->dev->dev_addr, ETH_ALEN)) + if (!compare_ether_addr(eth_hdr(skb)->h_dest, + skb->dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; }; diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 7cce5dfa092f..1c65e7a9f186 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -28,7 +28,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), void kthread_bind(struct task_struct *k, unsigned int cpu); int kthread_stop(struct task_struct *k); -int kthread_stop_sem(struct task_struct *k, struct semaphore *s); int kthread_should_stop(void); #endif /* _LINUX_KTHREAD_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 6cc497a2b6da..66c3100c2b94 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -265,12 +265,14 @@ enum { /* ata_eh_info->flags */ ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ - ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */ + ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ ATA_EHI_QUIET = (1 << 3), /* be quiet */ ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ + ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK, + /* max repeat if error condition is still set after ->error_handler */ ATA_EH_MAX_REPEAT = 5, diff --git a/include/linux/list.h b/include/linux/list.h index 6b74adf5297f..65a5b5ceda49 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -265,6 +265,17 @@ static inline void list_move_tail(struct list_head *list, } /** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** * list_empty - tests whether a list is empty * @head: the list to test. */ diff --git a/include/linux/module.h b/include/linux/module.h index d06c74fb8c26..0dfb794c52d3 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -362,10 +362,8 @@ int is_module_address(unsigned long addr); /* Returns module and fills in value, defined and namebuf, or NULL if symnum out of range. */ -struct module *module_get_kallsym(unsigned int symnum, - unsigned long *value, - char *type, - char namebuf[128]); +struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, size_t namelen); /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name); @@ -535,8 +533,8 @@ static inline const char *module_address_lookup(unsigned long addr, static inline struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, - char namebuf[128]) + char *type, char *name, + size_t namelen) { return NULL; } diff --git a/include/linux/namei.h b/include/linux/namei.h index 58cb3d3d44b4..45511a5918d3 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -11,7 +11,7 @@ struct open_intent { struct file *file; }; -enum { MAX_NESTED_LINKS = 5 }; +enum { MAX_NESTED_LINKS = 8 }; struct nameidata { struct dentry *dentry; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 76cc099c8580..75f02d8c6ed3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -924,10 +924,10 @@ static inline void netif_tx_lock_bh(struct net_device *dev) static inline int netif_tx_trylock(struct net_device *dev) { - int err = spin_trylock(&dev->_xmit_lock); - if (!err) + int ok = spin_trylock(&dev->_xmit_lock); + if (likely(ok)) dev->xmit_lock_owner = smp_processor_id(); - return err; + return ok; } static inline void netif_tx_unlock(struct net_device *dev) diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 87764022cc67..31f02ba036ce 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -79,6 +79,8 @@ struct bridge_skb_cb { __u32 ipv4; } daddr; }; + +extern int brnf_deferred_hooks; #endif /* CONFIG_BRIDGE_NETFILTER */ #endif /* __KERNEL__ */ diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h index 135742cfada5..7da0cf3702ee 100644 --- a/include/linux/nsc_gpio.h +++ b/include/linux/nsc_gpio.h @@ -25,8 +25,6 @@ struct nsc_gpio_ops { void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor); int (*gpio_get) (unsigned iminor); void (*gpio_set) (unsigned iminor, int state); - void (*gpio_set_high)(unsigned iminor); - void (*gpio_set_low) (unsigned iminor); void (*gpio_change) (unsigned iminor); int (*gpio_current) (unsigned iminor); struct device* dev; /* for dev_dbg() support, set in init */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 1c876e27ff93..6afa72e080cb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -463,6 +463,10 @@ struct signal_struct { #ifdef CONFIG_BSD_PROCESS_ACCT struct pacct_struct pacct; /* per-process accounting information */ #endif +#ifdef CONFIG_TASKSTATS + spinlock_t stats_lock; + struct taskstats *stats; +#endif }; /* Context switch must be unlocked if interrupts are to be enabled */ @@ -537,7 +541,7 @@ extern struct user_struct root_user; struct backing_dev_info; struct reclaim_state; -#ifdef CONFIG_SCHEDSTATS +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info { /* cumulative counters */ unsigned long cpu_time, /* time spent on the cpu */ @@ -548,9 +552,53 @@ struct sched_info { unsigned long last_arrival, /* when we last ran on a cpu */ last_queued; /* when we were last queued to run */ }; +#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ +#ifdef CONFIG_SCHEDSTATS extern struct file_operations proc_schedstat_operations; +#endif /* CONFIG_SCHEDSTATS */ + +#ifdef CONFIG_TASK_DELAY_ACCT +struct task_delay_info { + spinlock_t lock; + unsigned int flags; /* Private per-task flags */ + + /* For each stat XXX, add following, aligned appropriately + * + * struct timespec XXX_start, XXX_end; + * u64 XXX_delay; + * u32 XXX_count; + * + * Atomicity of updates to XXX_delay, XXX_count protected by + * single lock above (split into XXX_lock if contention is an issue). + */ + + /* + * XXX_count is incremented on every XXX operation, the delay + * associated with the operation is added to XXX_delay. + * XXX_delay contains the accumulated delay time in nanoseconds. + */ + struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ + u64 blkio_delay; /* wait for sync block io completion */ + u64 swapin_delay; /* wait for swapin block io completion */ + u32 blkio_count; /* total count of the number of sync block */ + /* io operations performed */ + u32 swapin_count; /* total count of the number of swapin block */ + /* io operations performed */ +}; +#endif /* CONFIG_TASK_DELAY_ACCT */ + +static inline int sched_info_on(void) +{ +#ifdef CONFIG_SCHEDSTATS + return 1; +#elif defined(CONFIG_TASK_DELAY_ACCT) + extern int delayacct_on; + return delayacct_on; +#else + return 0; #endif +} enum idle_type { @@ -747,7 +795,7 @@ struct task_struct { cpumask_t cpus_allowed; unsigned int time_slice, first_time_slice; -#ifdef CONFIG_SCHEDSTATS +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; #endif @@ -945,6 +993,10 @@ struct task_struct { * cache last used pipe for splice */ struct pipe_inode_info *splice_pipe; +#ifdef CONFIG_TASK_DELAY_ACCT + spinlock_t delays_lock; + struct task_delay_info *delays; +#endif }; static inline pid_t process_group(struct task_struct *tsk) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0bf31b83578c..4307e764ef0a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1066,9 +1066,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) kfree_skb(skb); } -#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB /** - * __dev_alloc_skb - allocate an skbuff for sending + * __dev_alloc_skb - allocate an skbuff for receiving * @length: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb * @@ -1087,12 +1086,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, skb_reserve(skb, NET_SKB_PAD); return skb; } -#else -extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); -#endif /** - * dev_alloc_skb - allocate an skbuff for sending + * dev_alloc_skb - allocate an skbuff for receiving * @length: length to allocate * * Allocate a new &sk_buff and assign it a usage count of one. The diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h new file mode 100644 index 000000000000..f1cb6cddd19d --- /dev/null +++ b/include/linux/taskstats.h @@ -0,0 +1,137 @@ +/* taskstats.h - exporting per-task statistics + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * (C) Balbir Singh, IBM Corp. 2006 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef _LINUX_TASKSTATS_H +#define _LINUX_TASKSTATS_H + +/* Format for per-task data returned to userland when + * - a task exits + * - listener requests stats for a task + * + * The struct is versioned. Newer versions should only add fields to + * the bottom of the struct to maintain backward compatibility. + * + * + * To add new fields + * a) bump up TASKSTATS_VERSION + * b) add comment indicating new version number at end of struct + * c) add new fields after version comment; maintain 64-bit alignment + */ + +#define TASKSTATS_VERSION 1 + +struct taskstats { + + /* Version 1 */ + __u16 version; + __u16 padding[3]; /* Userspace should not interpret the padding + * field which can be replaced by useful + * fields if struct taskstats is extended. + */ + + /* Delay accounting fields start + * + * All values, until comment "Delay accounting fields end" are + * available only if delay accounting is enabled, even though the last + * few fields are not delays + * + * xxx_count is the number of delay values recorded + * xxx_delay_total is the corresponding cumulative delay in nanoseconds + * + * xxx_delay_total wraps around to zero on overflow + * xxx_count incremented regardless of overflow + */ + + /* Delay waiting for cpu, while runnable + * count, delay_total NOT updated atomically + */ + __u64 cpu_count; + __u64 cpu_delay_total; + + /* Following four fields atomically updated using task->delays->lock */ + + /* Delay waiting for synchronous block I/O to complete + * does not account for delays in I/O submission + */ + __u64 blkio_count; + __u64 blkio_delay_total; + + /* Delay waiting for page fault I/O (swap in only) */ + __u64 swapin_count; + __u64 swapin_delay_total; + + /* cpu "wall-clock" running time + * On some architectures, value will adjust for cpu time stolen + * from the kernel in involuntary waits due to virtualization. + * Value is cumulative, in nanoseconds, without a corresponding count + * and wraps around to zero silently on overflow + */ + __u64 cpu_run_real_total; + + /* cpu "virtual" running time + * Uses time intervals seen by the kernel i.e. no adjustment + * for kernel's involuntary waits due to virtualization. + * Value is cumulative, in nanoseconds, without a corresponding count + * and wraps around to zero silently on overflow + */ + __u64 cpu_run_virtual_total; + /* Delay accounting fields end */ + /* version 1 ends here */ +}; + + +/* + * Commands sent from userspace + * Not versioned. New commands should only be inserted at the enum's end + * prior to __TASKSTATS_CMD_MAX + */ + +enum { + TASKSTATS_CMD_UNSPEC = 0, /* Reserved */ + TASKSTATS_CMD_GET, /* user->kernel request/get-response */ + TASKSTATS_CMD_NEW, /* kernel->user event */ + __TASKSTATS_CMD_MAX, +}; + +#define TASKSTATS_CMD_MAX (__TASKSTATS_CMD_MAX - 1) + +enum { + TASKSTATS_TYPE_UNSPEC = 0, /* Reserved */ + TASKSTATS_TYPE_PID, /* Process id */ + TASKSTATS_TYPE_TGID, /* Thread group id */ + TASKSTATS_TYPE_STATS, /* taskstats structure */ + TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */ + TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */ + __TASKSTATS_TYPE_MAX, +}; + +#define TASKSTATS_TYPE_MAX (__TASKSTATS_TYPE_MAX - 1) + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID, + TASKSTATS_CMD_ATTR_TGID, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK, + __TASKSTATS_CMD_ATTR_MAX, +}; + +#define TASKSTATS_CMD_ATTR_MAX (__TASKSTATS_CMD_ATTR_MAX - 1) + +/* NETLINK_GENERIC related info */ + +#define TASKSTATS_GENL_NAME "TASKSTATS" +#define TASKSTATS_GENL_VERSION 0x1 + +#endif /* _LINUX_TASKSTATS_H */ diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h new file mode 100644 index 000000000000..16894b7edcc8 --- /dev/null +++ b/include/linux/taskstats_kern.h @@ -0,0 +1,89 @@ +/* taskstats_kern.h - kernel header for per-task statistics interface + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * (C) Balbir Singh, IBM Corp. 2006 + */ + +#ifndef _LINUX_TASKSTATS_KERN_H +#define _LINUX_TASKSTATS_KERN_H + +#include <linux/taskstats.h> +#include <linux/sched.h> +#include <net/genetlink.h> + +#ifdef CONFIG_TASKSTATS +extern kmem_cache_t *taskstats_cache; +extern struct mutex taskstats_exit_mutex; + +static inline void taskstats_exit_free(struct taskstats *tidstats) +{ + if (tidstats) + kmem_cache_free(taskstats_cache, tidstats); +} + +static inline void taskstats_tgid_init(struct signal_struct *sig) +{ + spin_lock_init(&sig->stats_lock); + sig->stats = NULL; +} + +static inline void taskstats_tgid_alloc(struct signal_struct *sig) +{ + struct taskstats *stats; + unsigned long flags; + + stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL); + if (!stats) + return; + + spin_lock_irqsave(&sig->stats_lock, flags); + if (!sig->stats) { + sig->stats = stats; + stats = NULL; + } + spin_unlock_irqrestore(&sig->stats_lock, flags); + + if (stats) + kmem_cache_free(taskstats_cache, stats); +} + +static inline void taskstats_tgid_free(struct signal_struct *sig) +{ + struct taskstats *stats = NULL; + unsigned long flags; + + spin_lock_irqsave(&sig->stats_lock, flags); + if (sig->stats) { + stats = sig->stats; + sig->stats = NULL; + } + spin_unlock_irqrestore(&sig->stats_lock, flags); + if (stats) + kmem_cache_free(taskstats_cache, stats); +} + +extern void taskstats_exit_alloc(struct taskstats **, unsigned int *); +extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int); +extern void taskstats_init_early(void); +extern void taskstats_tgid_alloc(struct signal_struct *); +#else +static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu) +{} +static inline void taskstats_exit_free(struct taskstats *ptidstats) +{} +static inline void taskstats_exit_send(struct task_struct *tsk, + struct taskstats *tidstats, + int group_dead, unsigned int cpu) +{} +static inline void taskstats_tgid_init(struct signal_struct *sig) +{} +static inline void taskstats_tgid_alloc(struct signal_struct *sig) +{} +static inline void taskstats_tgid_free(struct signal_struct *sig) +{} +static inline void taskstats_init_early(void) +{} +#endif /* CONFIG_TASKSTATS */ + +#endif + diff --git a/include/linux/time.h b/include/linux/time.h index c05f8bb9a323..a5b739967b74 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -71,6 +71,18 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon, extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); /* + * sub = lhs - rhs, in normalized form + */ +static inline struct timespec timespec_sub(struct timespec lhs, + struct timespec rhs) +{ + struct timespec ts_delta; + set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, + lhs.tv_nsec - rhs.tv_nsec); + return ts_delta; +} + +/* * Returns true if the timespec is norm, false if denorm: */ #define timespec_valid(ts) \ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index f6024ab4eff0..71b6363caaaf 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -11,6 +11,7 @@ struct vm_area_struct; #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ +#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ /* bits [20..32] reserved for arch specific ioremap internals */ /* diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 805de50df00d..8c2287264266 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -150,4 +150,24 @@ static inline int genlmsg_unicast(struct sk_buff *skb, u32 pid) return nlmsg_unicast(genl_sock, skb, pid); } +/** + * gennlmsg_data - head of message payload + * @gnlh: genetlink messsage header + */ +static inline void *genlmsg_data(const struct genlmsghdr *gnlh) +{ + return ((unsigned char *) gnlh + GENL_HDRLEN); +} + +/** + * genlmsg_len - length of message payload + * @gnlh: genetlink message header + */ +static inline int genlmsg_len(const struct genlmsghdr *gnlh) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *)((unsigned char *)gnlh - + NLMSG_HDRLEN); + return (nlh->nlmsg_len - GENL_HDRLEN - NLMSG_HDRLEN); +} + #endif /* __NET_GENERIC_NETLINK_H */ diff --git a/include/net/netdma.h b/include/net/netdma.h index 19760eb131aa..ceae5ee85c04 100644 --- a/include/net/netdma.h +++ b/include/net/netdma.h @@ -37,7 +37,7 @@ static inline struct dma_chan *get_softnet_dma(void) } int dma_skb_copy_datagram_iovec(struct dma_chan* chan, - const struct sk_buff *skb, int offset, struct iovec *to, + struct sk_buff *skb, int offset, struct iovec *to, size_t len, struct dma_pinned_list *pinned_list); #endif /* CONFIG_NET_DMA */ diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 1925c65e617b..f6afee73235d 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -169,23 +169,17 @@ psched_tod_diff(int delta_sec, int bound) #define PSCHED_TADD2(tv, delta, tv_res) \ ({ \ - int __delta = (delta); \ - (tv_res) = (tv); \ - while(__delta >= USEC_PER_SEC){ \ - (tv_res).tv_sec++; \ - __delta -= USEC_PER_SEC; \ - } \ + int __delta = (tv).tv_usec + (delta); \ + (tv_res).tv_sec = (tv).tv_sec; \ + while (__delta >= USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \ (tv_res).tv_usec = __delta; \ }) #define PSCHED_TADD(tv, delta) \ ({ \ - int __delta = (delta); \ - while(__delta >= USEC_PER_SEC){ \ - (tv).tv_sec++; \ - __delta -= USEC_PER_SEC; \ - } \ - (tv).tv_usec = __delta; \ + (tv).tv_usec += (delta); \ + while ((tv).tv_usec >= USEC_PER_SEC) { (tv).tv_sec++; \ + (tv).tv_usec -= USEC_PER_SEC; } \ }) /* Set/check that time is in the "past perfect"; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 5f69158c1006..e5aa7ff1f5b5 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -445,6 +445,7 @@ typedef struct sctp_sender_hb_info { struct sctp_paramhdr param_hdr; union sctp_addr daddr; unsigned long sent_at; + __u64 hb_nonce; } __attribute__((packed)) sctp_sender_hb_info_t; /* @@ -730,13 +731,10 @@ void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *, const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); /* This is a structure for holding either an IPv6 or an IPv4 address. */ -/* sin_family -- AF_INET or AF_INET6 - * sin_port -- ordinary port number - * sin_addr -- cast to either (struct in_addr) or (struct in6_addr) - */ struct sctp_sockaddr_entry { struct list_head list; union sctp_addr a; + __u8 use_as_src; }; typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); @@ -984,6 +982,9 @@ struct sctp_transport { */ char cacc_saw_newack; } cacc; + + /* 64-bit random number sent with heartbeat. */ + __u64 hb_nonce; }; struct sctp_transport *sctp_transport_new(const union sctp_addr *, @@ -1138,7 +1139,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest, sctp_scope_t scope, gfp_t gfp, int flags); int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, - gfp_t gfp); + __u8 use_as_src, gfp_t gfp); int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, struct sctp_sock *); diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 8a6bef6f91eb..1b7aae6cdd82 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -560,9 +560,18 @@ struct sctp_paddrinfo { } __attribute__((packed, aligned(4))); /* Peer addresses's state. */ +/* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x] + * calls. + * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters. + * Not yet confirmed by a heartbeat and not available for data + * transfers. + * ACTIVE : Peer address confirmed, active and available for data transfers. + * INACTIVE: Peer address inactive and not available for data transfers. + */ enum sctp_spinfo_state { SCTP_INACTIVE, SCTP_ACTIVE, + SCTP_UNCONFIRMED, SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */ }; diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index fcb5ba87dcc5..0ff67398928d 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -89,9 +89,10 @@ static inline void ib_addr_set_pkey(struct rdma_dev_addr *dev_addr, u16 pkey) dev_addr->broadcast[9] = (unsigned char) pkey; } -static inline union ib_gid *ib_addr_get_sgid(struct rdma_dev_addr *dev_addr) +static inline void ib_addr_get_sgid(struct rdma_dev_addr *dev_addr, + union ib_gid *gid) { - return (union ib_gid *) (dev_addr->src_dev_addr + 4); + memcpy(gid, dev_addr->src_dev_addr + 4, sizeof *gid); } static inline void ib_addr_set_sgid(struct rdma_dev_addr *dev_addr, @@ -100,9 +101,10 @@ static inline void ib_addr_set_sgid(struct rdma_dev_addr *dev_addr, memcpy(dev_addr->src_dev_addr + 4, gid, sizeof *gid); } -static inline union ib_gid *ib_addr_get_dgid(struct rdma_dev_addr *dev_addr) +static inline void ib_addr_get_dgid(struct rdma_dev_addr *dev_addr, + union ib_gid *gid) { - return (union ib_gid *) (dev_addr->dst_dev_addr + 4); + memcpy(gid, dev_addr->dst_dev_addr + 4, sizeof *gid); } static inline void ib_addr_set_dgid(struct rdma_dev_addr *dev_addr, diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h index 4ace54cd0cce..00dadbf94e1d 100644 --- a/include/rdma/ib_fmr_pool.h +++ b/include/rdma/ib_fmr_pool.h @@ -88,7 +88,7 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool); struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, u64 *page_list, int list_len, - u64 *io_virtual_address); + u64 io_virtual_address); int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr); diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 5ff77558013b..585d28e960dd 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -75,6 +75,7 @@ #define IB_MGMT_METHOD_TRAP_REPRESS 0x07 #define IB_MGMT_METHOD_RESP 0x80 +#define IB_BM_ATTR_MOD_RESP cpu_to_be32(1) #define IB_MGMT_MAX_METHODS 128 @@ -247,6 +248,12 @@ struct ib_mad_send_buf { }; /** + * ib_response_mad - Returns if the specified MAD has been generated in + * response to a sent request or trap. + */ +int ib_response_mad(struct ib_mad *mad); + +/** * ib_get_rmpp_resptime - Returns the RMPP response time. * @rmpp_hdr: An RMPP header. */ diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 371f70d9aa92..58e6444eebee 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -58,9 +58,7 @@ struct scsi_cmnd { int timeout_per_command; unsigned char cmd_len; - unsigned char old_cmd_len; enum dma_data_direction sc_data_direction; - enum dma_data_direction sc_old_data_direction; /* These elements define the operation we are about to perform */ #define MAX_COMMAND_SIZE 16 @@ -71,18 +69,11 @@ struct scsi_cmnd { void *request_buffer; /* Actual requested buffer */ /* These elements define the operation we ultimately want to perform */ - unsigned char data_cmnd[MAX_COMMAND_SIZE]; - unsigned short old_use_sg; /* We save use_sg here when requesting - * sense info */ unsigned short use_sg; /* Number of pieces of scatter-gather */ unsigned short sglist_len; /* size of malloc'd scatter-gather list */ - unsigned bufflen; /* Size of data buffer */ - void *buffer; /* Data buffer */ unsigned underflow; /* Return error if less than this amount is transferred */ - unsigned old_underflow; /* save underflow here when reusing the - * command for error handling */ unsigned transfersize; /* How much we are guaranteed to transfer with each SCSI transfer diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index e3c503cd175e..6cc2314098cf 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -106,6 +106,7 @@ struct sas_end_device { struct sas_expander_device { int level; + int next_port_id; #define SAS_EXPANDER_VENDOR_ID_LEN 8 char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1]; @@ -127,8 +128,10 @@ struct sas_expander_device { struct sas_port { struct device dev; - u8 port_identifier; + int port_identifier; int num_phys; + /* port flags */ + unsigned int is_backlink:1; /* the other end of the link */ struct sas_rphy *rphy; @@ -168,11 +171,13 @@ extern void sas_rphy_delete(struct sas_rphy *); extern int scsi_is_sas_rphy(const struct device *); struct sas_port *sas_port_alloc(struct device *, int); +struct sas_port *sas_port_alloc_num(struct device *); int sas_port_add(struct sas_port *); void sas_port_free(struct sas_port *); void sas_port_delete(struct sas_port *); void sas_port_add_phy(struct sas_port *, struct sas_phy *); void sas_port_delete_phy(struct sas_port *, struct sas_phy *); +void sas_port_mark_backlink(struct sas_port *); int scsi_is_sas_port(const struct device *); extern struct scsi_transport_template * diff --git a/include/video/mbxfb.h b/include/video/mbxfb.h new file mode 100644 index 000000000000..3bde0f5cd55c --- /dev/null +++ b/include/video/mbxfb.h @@ -0,0 +1,28 @@ +#ifndef __MBX_FB_H +#define __MBX_FB_H + +struct mbxfb_val { + unsigned int defval; + unsigned int min; + unsigned int max; +}; + +struct fb_info; + +struct mbxfb_platform_data { + /* Screen info */ + struct mbxfb_val xres; + struct mbxfb_val yres; + struct mbxfb_val bpp; + + /* Memory info */ + unsigned long memsize; /* if 0 use ODFB? */ + unsigned long timings1; + unsigned long timings2; + unsigned long timings3; + + int (*probe)(struct fb_info *fb); + int (*remove)(struct fb_info *fb); +}; + +#endif /* __MBX_FB_H */ diff --git a/init/Kconfig b/init/Kconfig index a5b073a103e7..a099fc6526d9 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -158,6 +158,30 @@ config BSD_PROCESS_ACCT_V3 for processing it. A preliminary version of these tools is available at <http://www.physik3.uni-rostock.de/tim/kernel/utils/acct/>. +config TASKSTATS + bool "Export task/process statistics through netlink (EXPERIMENTAL)" + depends on NET + default n + help + Export selected statistics for tasks/processes through the + generic netlink interface. Unlike BSD process accounting, the + statistics are available during the lifetime of tasks/processes as + responses to commands. Like BSD accounting, they are sent to user + space on task exit. + + Say N if unsure. + +config TASK_DELAY_ACCT + bool "Enable per-task delay accounting (EXPERIMENTAL)" + depends on TASKSTATS + help + Collect information on time spent by a task waiting for system + resources like cpu, synchronous block I/O completion and swapping + in pages. Such statistics can help in setting a task's priorities + relative to other tasks for cpu, io, rss limits etc. + + Say N if unsure. + config SYSCTL bool "Sysctl support" if EMBEDDED default y diff --git a/init/main.c b/init/main.c index 628b8e9e841a..8651a720a092 100644 --- a/init/main.c +++ b/init/main.c @@ -41,6 +41,8 @@ #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/efi.h> +#include <linux/taskstats_kern.h> +#include <linux/delayacct.h> #include <linux/unistd.h> #include <linux/rmap.h> #include <linux/mempolicy.h> @@ -574,6 +576,8 @@ asmlinkage void __init start_kernel(void) proc_root_init(); #endif cpuset_init(); + taskstats_init_early(); + delayacct_init(); check_bugs(); diff --git a/kernel/Makefile b/kernel/Makefile index 47dbcd570cd8..d62ec66c1af2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -48,6 +48,8 @@ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o obj-$(CONFIG_RELAY) += relay.o +obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o +obj-$(CONFIG_TASKSTATS) += taskstats.o ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is diff --git a/kernel/acct.c b/kernel/acct.c index f18e0b8df3e1..2a7c933651c7 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -488,7 +488,7 @@ static void do_acct_process(struct file *file) old_encode_dev(tty_devnum(current->signal->tty)) : 0; read_unlock(&tasklist_lock); - spin_lock(¤t->sighand->siglock); + spin_lock_irq(¤t->sighand->siglock); ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime))); ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime))); ac.ac_flag = pacct->ac_flag; @@ -496,7 +496,7 @@ static void do_acct_process(struct file *file) ac.ac_minflt = encode_comp_t(pacct->ac_minflt); ac.ac_majflt = encode_comp_t(pacct->ac_majflt); ac.ac_exitcode = pacct->ac_exitcode; - spin_unlock(¤t->sighand->siglock); + spin_unlock_irq(¤t->sighand->siglock); ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */ ac.ac_rw = encode_comp_t(ac.ac_io / 1024); ac.ac_swaps = encode_comp_t(0); diff --git a/kernel/cpu.c b/kernel/cpu.c index 70fbf2e83766..f230f9ae01c2 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -16,56 +16,48 @@ #include <linux/mutex.h> /* This protects CPUs going up and down... */ -static DEFINE_MUTEX(cpucontrol); +static DEFINE_MUTEX(cpu_add_remove_lock); +static DEFINE_MUTEX(cpu_bitmask_lock); static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); #ifdef CONFIG_HOTPLUG_CPU -static struct task_struct *lock_cpu_hotplug_owner; -static int lock_cpu_hotplug_depth; -static int __lock_cpu_hotplug(int interruptible) -{ - int ret = 0; - - if (lock_cpu_hotplug_owner != current) { - if (interruptible) - ret = mutex_lock_interruptible(&cpucontrol); - else - mutex_lock(&cpucontrol); - } - - /* - * Set only if we succeed in locking - */ - if (!ret) { - lock_cpu_hotplug_depth++; - lock_cpu_hotplug_owner = current; - } - - return ret; -} +/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ +static struct task_struct *recursive; +static int recursive_depth; void lock_cpu_hotplug(void) { - __lock_cpu_hotplug(0); + struct task_struct *tsk = current; + + if (tsk == recursive) { + static int warnings = 10; + if (warnings) { + printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n"); + WARN_ON(1); + warnings--; + } + recursive_depth++; + return; + } + mutex_lock(&cpu_bitmask_lock); + recursive = tsk; } EXPORT_SYMBOL_GPL(lock_cpu_hotplug); void unlock_cpu_hotplug(void) { - if (--lock_cpu_hotplug_depth == 0) { - lock_cpu_hotplug_owner = NULL; - mutex_unlock(&cpucontrol); + WARN_ON(recursive != current); + if (recursive_depth) { + recursive_depth--; + return; } + mutex_unlock(&cpu_bitmask_lock); + recursive = NULL; } EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); -int lock_cpu_hotplug_interruptible(void) -{ - return __lock_cpu_hotplug(1); -} -EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); #endif /* CONFIG_HOTPLUG_CPU */ /* Need to know about CPUs going up/down? */ @@ -122,9 +114,7 @@ int cpu_down(unsigned int cpu) struct task_struct *p; cpumask_t old_allowed, tmp; - if ((err = lock_cpu_hotplug_interruptible()) != 0) - return err; - + mutex_lock(&cpu_add_remove_lock); if (num_online_cpus() == 1) { err = -EBUSY; goto out; @@ -150,7 +140,10 @@ int cpu_down(unsigned int cpu) cpu_clear(cpu, tmp); set_cpus_allowed(current, tmp); + mutex_lock(&cpu_bitmask_lock); p = __stop_machine_run(take_cpu_down, NULL, cpu); + mutex_unlock(&cpu_bitmask_lock); + if (IS_ERR(p)) { /* CPU didn't die: tell everyone. Can't complain. */ if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, @@ -187,7 +180,7 @@ out_thread: out_allowed: set_cpus_allowed(current, old_allowed); out: - unlock_cpu_hotplug(); + mutex_unlock(&cpu_add_remove_lock); return err; } #endif /*CONFIG_HOTPLUG_CPU*/ @@ -197,9 +190,7 @@ int __devinit cpu_up(unsigned int cpu) int ret; void *hcpu = (void *)(long)cpu; - if ((ret = lock_cpu_hotplug_interruptible()) != 0) - return ret; - + mutex_lock(&cpu_add_remove_lock); if (cpu_online(cpu) || !cpu_present(cpu)) { ret = -EINVAL; goto out; @@ -214,7 +205,9 @@ int __devinit cpu_up(unsigned int cpu) } /* Arch-specific enabling code. */ + mutex_lock(&cpu_bitmask_lock); ret = __cpu_up(cpu); + mutex_unlock(&cpu_bitmask_lock); if (ret != 0) goto out_notify; BUG_ON(!cpu_online(cpu)); @@ -227,6 +220,6 @@ out_notify: blocking_notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); out: - unlock_cpu_hotplug(); + mutex_unlock(&cpu_add_remove_lock); return ret; } diff --git a/kernel/cpuset.c b/kernel/cpuset.c index c232dc077438..1a649f2bb9bb 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -762,6 +762,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) * * Call with manage_mutex held. May nest a call to the * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. + * Must not be called holding callback_mutex, because we must + * not call lock_cpu_hotplug() while holding callback_mutex. */ static void update_cpu_domains(struct cpuset *cur) @@ -781,7 +783,7 @@ static void update_cpu_domains(struct cpuset *cur) if (is_cpu_exclusive(c)) cpus_andnot(pspan, pspan, c->cpus_allowed); } - if (is_removed(cur) || !is_cpu_exclusive(cur)) { + if (!is_cpu_exclusive(cur)) { cpus_or(pspan, pspan, cur->cpus_allowed); if (cpus_equal(pspan, cur->cpus_allowed)) return; @@ -1917,6 +1919,17 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode) return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); } +/* + * Locking note on the strange update_flag() call below: + * + * If the cpuset being removed is marked cpu_exclusive, then simulate + * turning cpu_exclusive off, which will call update_cpu_domains(). + * The lock_cpu_hotplug() call in update_cpu_domains() must not be + * made while holding callback_mutex. Elsewhere the kernel nests + * callback_mutex inside lock_cpu_hotplug() calls. So the reverse + * nesting would risk an ABBA deadlock. + */ + static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) { struct cpuset *cs = dentry->d_fsdata; @@ -1936,11 +1949,16 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) mutex_unlock(&manage_mutex); return -EBUSY; } + if (is_cpu_exclusive(cs)) { + int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0"); + if (retval < 0) { + mutex_unlock(&manage_mutex); + return retval; + } + } parent = cs->parent; mutex_lock(&callback_mutex); set_bit(CS_REMOVED, &cs->flags); - if (is_cpu_exclusive(cs)) - update_cpu_domains(cs); list_del(&cs->sibling); /* delete my sibling from parent->children */ spin_lock(&cs->dentry->d_lock); d = dget(cs->dentry); diff --git a/kernel/delayacct.c b/kernel/delayacct.c new file mode 100644 index 000000000000..f05392d64267 --- /dev/null +++ b/kernel/delayacct.c @@ -0,0 +1,178 @@ +/* delayacct.c - per-task delay accounting + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + */ + +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/time.h> +#include <linux/sysctl.h> +#include <linux/delayacct.h> + +int delayacct_on __read_mostly; /* Delay accounting turned on/off */ +kmem_cache_t *delayacct_cache; + +static int __init delayacct_setup_enable(char *str) +{ + delayacct_on = 1; + return 1; +} +__setup("delayacct", delayacct_setup_enable); + +void delayacct_init(void) +{ + delayacct_cache = kmem_cache_create("delayacct_cache", + sizeof(struct task_delay_info), + 0, + SLAB_PANIC, + NULL, NULL); + delayacct_tsk_init(&init_task); +} + +void __delayacct_tsk_init(struct task_struct *tsk) +{ + spin_lock_init(&tsk->delays_lock); + /* No need to acquire tsk->delays_lock for allocation here unless + __delayacct_tsk_init called after tsk is attached to tasklist + */ + tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL); + if (tsk->delays) + spin_lock_init(&tsk->delays->lock); +} + +void __delayacct_tsk_exit(struct task_struct *tsk) +{ + struct task_delay_info *delays = tsk->delays; + spin_lock(&tsk->delays_lock); + tsk->delays = NULL; + spin_unlock(&tsk->delays_lock); + kmem_cache_free(delayacct_cache, delays); +} + +/* + * Start accounting for a delay statistic using + * its starting timestamp (@start) + */ + +static inline void delayacct_start(struct timespec *start) +{ + do_posix_clock_monotonic_gettime(start); +} + +/* + * Finish delay accounting for a statistic using + * its timestamps (@start, @end), accumalator (@total) and @count + */ + +static void delayacct_end(struct timespec *start, struct timespec *end, + u64 *total, u32 *count) +{ + struct timespec ts; + s64 ns; + + do_posix_clock_monotonic_gettime(end); + ts = timespec_sub(*end, *start); + ns = timespec_to_ns(&ts); + if (ns < 0) + return; + + spin_lock(¤t->delays->lock); + *total += ns; + (*count)++; + spin_unlock(¤t->delays->lock); +} + +void __delayacct_blkio_start(void) +{ + delayacct_start(¤t->delays->blkio_start); +} + +void __delayacct_blkio_end(void) +{ + if (current->delays->flags & DELAYACCT_PF_SWAPIN) + /* Swapin block I/O */ + delayacct_end(¤t->delays->blkio_start, + ¤t->delays->blkio_end, + ¤t->delays->swapin_delay, + ¤t->delays->swapin_count); + else /* Other block I/O */ + delayacct_end(¤t->delays->blkio_start, + ¤t->delays->blkio_end, + ¤t->delays->blkio_delay, + ¤t->delays->blkio_count); +} + +int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) +{ + s64 tmp; + struct timespec ts; + unsigned long t1,t2,t3; + + spin_lock(&tsk->delays_lock); + + /* Though tsk->delays accessed later, early exit avoids + * unnecessary returning of other data + */ + if (!tsk->delays) + goto done; + + tmp = (s64)d->cpu_run_real_total; + cputime_to_timespec(tsk->utime + tsk->stime, &ts); + tmp += timespec_to_ns(&ts); + d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; + + /* + * No locking available for sched_info (and too expensive to add one) + * Mitigate by taking snapshot of values + */ + t1 = tsk->sched_info.pcnt; + t2 = tsk->sched_info.run_delay; + t3 = tsk->sched_info.cpu_time; + + d->cpu_count += t1; + + jiffies_to_timespec(t2, &ts); + tmp = (s64)d->cpu_delay_total + timespec_to_ns(&ts); + d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; + + tmp = (s64)d->cpu_run_virtual_total + (s64)jiffies_to_usecs(t3) * 1000; + d->cpu_run_virtual_total = + (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp; + + /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ + + spin_lock(&tsk->delays->lock); + tmp = d->blkio_delay_total + tsk->delays->blkio_delay; + d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; + tmp = d->swapin_delay_total + tsk->delays->swapin_delay; + d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; + d->blkio_count += tsk->delays->blkio_count; + d->swapin_count += tsk->delays->swapin_count; + spin_unlock(&tsk->delays->lock); + +done: + spin_unlock(&tsk->delays_lock); + return 0; +} + +__u64 __delayacct_blkio_ticks(struct task_struct *tsk) +{ + __u64 ret; + + spin_lock(&tsk->delays->lock); + ret = nsec_to_clock_t(tsk->delays->blkio_delay + + tsk->delays->swapin_delay); + spin_unlock(&tsk->delays->lock); + return ret; +} + diff --git a/kernel/exit.c b/kernel/exit.c index 6664c084783d..dba194a8d416 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -25,6 +25,8 @@ #include <linux/mount.h> #include <linux/proc_fs.h> #include <linux/mempolicy.h> +#include <linux/taskstats_kern.h> +#include <linux/delayacct.h> #include <linux/cpuset.h> #include <linux/syscalls.h> #include <linux/signal.h> @@ -843,7 +845,9 @@ static void exit_notify(struct task_struct *tsk) fastcall NORET_TYPE void do_exit(long code) { struct task_struct *tsk = current; + struct taskstats *tidstats; int group_dead; + unsigned int mycpu; profile_task_exit(tsk); @@ -881,6 +885,8 @@ fastcall NORET_TYPE void do_exit(long code) current->comm, current->pid, preempt_count()); + taskstats_exit_alloc(&tidstats, &mycpu); + acct_update_integrals(tsk); if (tsk->mm) { update_hiwater_rss(tsk->mm); @@ -900,6 +906,10 @@ fastcall NORET_TYPE void do_exit(long code) #endif if (unlikely(tsk->audit_context)) audit_free(tsk); + taskstats_exit_send(tsk, tidstats, group_dead, mycpu); + taskstats_exit_free(tidstats); + delayacct_tsk_exit(tsk); + exit_mm(tsk); if (group_dead) diff --git a/kernel/fork.c b/kernel/fork.c index 926e5a68ea9e..1b0f7b1e0881 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -43,6 +43,8 @@ #include <linux/rmap.h> #include <linux/acct.h> #include <linux/cn_proc.h> +#include <linux/delayacct.h> +#include <linux/taskstats_kern.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -818,6 +820,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts if (clone_flags & CLONE_THREAD) { atomic_inc(¤t->signal->count); atomic_inc(¤t->signal->live); + taskstats_tgid_alloc(current->signal); return 0; } sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); @@ -862,6 +865,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts INIT_LIST_HEAD(&sig->cpu_timers[0]); INIT_LIST_HEAD(&sig->cpu_timers[1]); INIT_LIST_HEAD(&sig->cpu_timers[2]); + taskstats_tgid_init(sig); task_lock(current->group_leader); memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); @@ -883,6 +887,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts void __cleanup_signal(struct signal_struct *sig) { exit_thread_group_keys(sig); + taskstats_tgid_free(sig); kmem_cache_free(signal_cachep, sig); } @@ -1000,6 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto bad_fork_cleanup_put_domain; p->did_exec = 0; + delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ copy_flags(clone_flags, p); p->pid = pid; retval = -EFAULT; diff --git a/kernel/futex.c b/kernel/futex.c index cf0c8e21d1ab..dda2049692a2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -415,15 +415,15 @@ out_unlock: */ void exit_pi_state_list(struct task_struct *curr) { - struct futex_hash_bucket *hb; struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; + struct futex_hash_bucket *hb; union futex_key key; /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful - * versus waiters unqueueing themselfs + * versus waiters unqueueing themselves: */ spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) { @@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr) next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; + hb = hash_futex(&key); spin_unlock_irq(&curr->pi_lock); - hb = hash_futex(&key); spin_lock(&hb->lock); spin_lock_irq(&curr->pi_lock); + /* + * We dropped the pi-lock, so re-check whether this + * task still owns the PI-state: + */ if (head->next != next) { spin_unlock(&hb->lock); continue; } - list_del_init(&pi_state->list); - WARN_ON(pi_state->owner != curr); - + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); pi_state->owner = NULL; spin_unlock_irq(&curr->pi_lock); @@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) head = &hb->chain; list_for_each_entry_safe(this, next, head, list) { - if (match_futex (&this->key, &me->key)) { + if (match_futex(&this->key, &me->key)) { /* * Another waiter already exists - bump up * the refcount and return its pi_state: @@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) if (unlikely(!pi_state)) return -EINVAL; + WARN_ON(!atomic_read(&pi_state->refcount)); + atomic_inc(&pi_state->refcount); me->pi_state = pi_state; @@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) } /* - * We are the first waiter - try to look up the real owner and - * attach the new pi_state to it: + * We are the first waiter - try to look up the real owner and attach + * the new pi_state to it, but bail out when the owner died bit is set + * and TID = 0: */ pid = uval & FUTEX_TID_MASK; + if (!pid && (uval & FUTEX_OWNER_DIED)) + return -ESRCH; p = futex_find_get_task(pid); if (!p) return -ESRCH; @@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) pi_state->key = me->key; spin_lock_irq(&p->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &p->pi_state_list); pi_state->owner = p; spin_unlock_irq(&p->pi_lock); @@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) * kept enabled while there is PI state around. We must also * preserve the owner died bit.) */ - newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; + if (!(uval & FUTEX_OWNER_DIED)) { + newval = FUTEX_WAITERS | new_owner->pid; - inc_preempt_count(); - curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); - dec_preempt_count(); + inc_preempt_count(); + curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); + dec_preempt_count(); + if (curval == -EFAULT) + return -EFAULT; + if (curval != uval) + return -EINVAL; + } - if (curval == -EFAULT) - return -EFAULT; - if (curval != uval) - return -EINVAL; + spin_lock_irq(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + spin_unlock_irq(&pi_state->owner->pi_lock); - list_del_init(&pi_state->owner->pi_state_list); + spin_lock_irq(&new_owner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; + spin_unlock_irq(&new_owner->pi_lock); + rt_mutex_unlock(&pi_state->pi_mutex); return 0; @@ -1236,6 +1254,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, /* Owner died? */ if (q.pi_state->owner != NULL) { spin_lock_irq(&q.pi_state->owner->pi_lock); + WARN_ON(list_empty(&q.pi_state->list)); list_del_init(&q.pi_state->list); spin_unlock_irq(&q.pi_state->owner->pi_lock); } else @@ -1244,6 +1263,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, q.pi_state->owner = current; spin_lock_irq(¤t->pi_lock); + WARN_ON(!list_empty(&q.pi_state->list)); list_add(&q.pi_state->list, ¤t->pi_state_list); spin_unlock_irq(¤t->pi_lock); @@ -1427,9 +1447,11 @@ retry_locked: * again. If it succeeds then we can return without waking * anyone else up: */ - inc_preempt_count(); - uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); - dec_preempt_count(); + if (!(uval & FUTEX_OWNER_DIED)) { + inc_preempt_count(); + uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); + dec_preempt_count(); + } if (unlikely(uval == -EFAULT)) goto pi_faulted; @@ -1462,9 +1484,11 @@ retry_locked: /* * No waiters - kernel unlocks the futex: */ - ret = unlock_futex_pi(uaddr, uval); - if (ret == -EFAULT) - goto pi_faulted; + if (!(uval & FUTEX_OWNER_DIED)) { + ret = unlock_futex_pi(uaddr, uval); + if (ret == -EFAULT) + goto pi_faulted; + } out_unlock: spin_unlock(&hb->lock); @@ -1683,9 +1707,9 @@ err_unlock: * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ -int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) +int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) { - u32 uval, nval; + u32 uval, nval, mval; retry: if (get_user(uval, uaddr)) @@ -1702,21 +1726,45 @@ retry: * thread-death.) The rest of the cleanup is done in * userspace. */ - nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, - uval | FUTEX_OWNER_DIED); + mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; + nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); + if (nval == -EFAULT) return -1; if (nval != uval) goto retry; - if (uval & FUTEX_WAITERS) - futex_wake(uaddr, 1); + /* + * Wake robust non-PI futexes here. The wakeup of + * PI futexes happens in exit_pi_state(): + */ + if (!pi) { + if (uval & FUTEX_WAITERS) + futex_wake(uaddr, 1); + } } return 0; } /* + * Fetch a robust-list pointer. Bit 0 signals PI futexes: + */ +static inline int fetch_robust_entry(struct robust_list __user **entry, + struct robust_list __user **head, int *pi) +{ + unsigned long uentry; + + if (get_user(uentry, (unsigned long *)head)) + return -EFAULT; + + *entry = (void *)(uentry & ~1UL); + *pi = uentry & 1; + + return 0; +} + +/* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * @@ -1726,14 +1774,14 @@ void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *pending; - unsigned int limit = ROBUST_LIST_LIMIT; + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned long futex_offset; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ - if (get_user(entry, &head->list.next)) + if (fetch_robust_entry(&entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: @@ -1744,10 +1792,11 @@ void exit_robust_list(struct task_struct *curr) * Fetch any possibly pending lock-add first, and handle it * if it exists: */ - if (get_user(pending, &head->list_op_pending)) + if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) return; + if (pending) - handle_futex_death((void *)pending + futex_offset, curr); + handle_futex_death((void *)pending + futex_offset, curr, pip); while (entry != &head->list) { /* @@ -1756,12 +1805,12 @@ void exit_robust_list(struct task_struct *curr) */ if (entry != pending) if (handle_futex_death((void *)entry + futex_offset, - curr)) + curr, pi)) return; /* * Fetch the next entry in the list: */ - if (get_user(entry, &entry->next)) + if (fetch_robust_entry(&entry, &entry->next, &pi)) return; /* * Avoid excessively long or circular lists: diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d1d92b441fb7..d1aab1a452cc 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -12,6 +12,23 @@ #include <asm/uaccess.h> + +/* + * Fetch a robust-list pointer. Bit 0 signals PI futexes: + */ +static inline int +fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, + compat_uptr_t *head, int *pi) +{ + if (get_user(*uentry, head)) + return -EFAULT; + + *entry = compat_ptr((*uentry) & ~1); + *pi = (unsigned int)(*uentry) & 1; + + return 0; +} + /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. @@ -22,17 +39,16 @@ void compat_exit_robust_list(struct task_struct *curr) { struct compat_robust_list_head __user *head = curr->compat_robust_list; struct robust_list __user *entry, *pending; + unsigned int limit = ROBUST_LIST_LIMIT, pi; compat_uptr_t uentry, upending; - unsigned int limit = ROBUST_LIST_LIMIT; compat_long_t futex_offset; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ - if (get_user(uentry, &head->list.next)) + if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) return; - entry = compat_ptr(uentry); /* * Fetch the relative futex offset: */ @@ -42,11 +58,11 @@ void compat_exit_robust_list(struct task_struct *curr) * Fetch any possibly pending lock-add first, and handle it * if it exists: */ - if (get_user(upending, &head->list_op_pending)) + if (fetch_robust_entry(&upending, &pending, + &head->list_op_pending, &pi)) return; - pending = compat_ptr(upending); if (upending) - handle_futex_death((void *)pending + futex_offset, curr); + handle_futex_death((void *)pending + futex_offset, curr, pi); while (compat_ptr(uentry) != &head->list) { /* @@ -55,15 +71,15 @@ void compat_exit_robust_list(struct task_struct *curr) */ if (entry != pending) if (handle_futex_death((void *)entry + futex_offset, - curr)) + curr, pi)) return; /* * Fetch the next entry in the list: */ - if (get_user(uentry, (compat_uptr_t *)&entry->next)) + if (fetch_robust_entry(&uentry, &entry, + (compat_uptr_t *)&entry->next, &pi)) return; - entry = compat_ptr(uentry); /* * Avoid excessively long or circular lists: */ diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 39277dd6bf90..ab16a5a4cfe9 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -275,8 +275,8 @@ static void upcase_if_global(struct kallsym_iter *iter) static int get_ksymbol_mod(struct kallsym_iter *iter) { iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms, - &iter->value, - &iter->type, iter->name); + &iter->value, &iter->type, + iter->name, sizeof(iter->name)); if (iter->owner == NULL) return 0; diff --git a/kernel/kthread.c b/kernel/kthread.c index 24be714b04c7..4f9c60ef95e8 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -216,23 +216,6 @@ EXPORT_SYMBOL(kthread_bind); */ int kthread_stop(struct task_struct *k) { - return kthread_stop_sem(k, NULL); -} -EXPORT_SYMBOL(kthread_stop); - -/** - * kthread_stop_sem - stop a thread created by kthread_create(). - * @k: thread created by kthread_create(). - * @s: semaphore that @k waits on while idle. - * - * Does essentially the same thing as kthread_stop() above, but wakes - * @k by calling up(@s). - * - * Returns the result of threadfn(), or %-EINTR if wake_up_process() - * was never called. - */ -int kthread_stop_sem(struct task_struct *k, struct semaphore *s) -{ int ret; mutex_lock(&kthread_stop_lock); @@ -246,10 +229,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s) /* Now set kthread_should_stop() to true, and wake it up. */ kthread_stop_info.k = k; - if (s) - up(s); - else - wake_up_process(k); + wake_up_process(k); put_task_struct(k); /* Once it dies, reset stop ptr, gather result and we're done. */ @@ -260,7 +240,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s) return ret; } -EXPORT_SYMBOL(kthread_stop_sem); +EXPORT_SYMBOL(kthread_stop); static __init int helper_init(void) { diff --git a/kernel/module.c b/kernel/module.c index 35e1b1f859d7..2a19cd47c046 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2019,10 +2019,8 @@ const char *module_address_lookup(unsigned long addr, return NULL; } -struct module *module_get_kallsym(unsigned int symnum, - unsigned long *value, - char *type, - char namebuf[128]) +struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, size_t namelen) { struct module *mod; @@ -2031,9 +2029,8 @@ struct module *module_get_kallsym(unsigned int symnum, if (symnum < mod->num_symtab) { *value = mod->symtab[symnum].st_value; *type = mod->symtab[symnum].st_info; - strncpy(namebuf, - mod->strtab + mod->symtab[symnum].st_name, - 127); + strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, + namelen); mutex_unlock(&module_mutex); return mod; } diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index 494dac872a13..948bd8f643e2 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c @@ -275,6 +275,7 @@ static int test_func(void *data) /* Wait for the next command to be executed */ schedule(); + try_to_freeze(); if (signal_pending(current)) flush_signals(current); diff --git a/kernel/sched.c b/kernel/sched.c index d714611f1691..b44b9a43b0fc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -51,6 +51,7 @@ #include <linux/times.h> #include <linux/acct.h> #include <linux/kprobes.h> +#include <linux/delayacct.h> #include <asm/tlb.h> #include <asm/unistd.h> @@ -501,9 +502,36 @@ struct file_operations proc_schedstat_operations = { .release = single_release, }; +/* + * Expects runqueue lock to be held for atomicity of update + */ +static inline void +rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies) +{ + if (rq) { + rq->rq_sched_info.run_delay += delta_jiffies; + rq->rq_sched_info.pcnt++; + } +} + +/* + * Expects runqueue lock to be held for atomicity of update + */ +static inline void +rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies) +{ + if (rq) + rq->rq_sched_info.cpu_time += delta_jiffies; +} # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) #else /* !CONFIG_SCHEDSTATS */ +static inline void +rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies) +{} +static inline void +rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies) +{} # define schedstat_inc(rq, field) do { } while (0) # define schedstat_add(rq, field, amt) do { } while (0) #endif @@ -523,7 +551,7 @@ static inline struct rq *this_rq_lock(void) return rq; } -#ifdef CONFIG_SCHEDSTATS +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) /* * Called when a process is dequeued from the active array and given * the cpu. We should note that with the exception of interactive @@ -551,21 +579,16 @@ static inline void sched_info_dequeued(struct task_struct *t) */ static void sched_info_arrive(struct task_struct *t) { - unsigned long now = jiffies, diff = 0; - struct rq *rq = task_rq(t); + unsigned long now = jiffies, delta_jiffies = 0; if (t->sched_info.last_queued) - diff = now - t->sched_info.last_queued; + delta_jiffies = now - t->sched_info.last_queued; sched_info_dequeued(t); - t->sched_info.run_delay += diff; + t->sched_info.run_delay += delta_jiffies; t->sched_info.last_arrival = now; t->sched_info.pcnt++; - if (!rq) - return; - - rq->rq_sched_info.run_delay += diff; - rq->rq_sched_info.pcnt++; + rq_sched_info_arrive(task_rq(t), delta_jiffies); } /* @@ -585,8 +608,9 @@ static void sched_info_arrive(struct task_struct *t) */ static inline void sched_info_queued(struct task_struct *t) { - if (!t->sched_info.last_queued) - t->sched_info.last_queued = jiffies; + if (unlikely(sched_info_on())) + if (!t->sched_info.last_queued) + t->sched_info.last_queued = jiffies; } /* @@ -595,13 +619,10 @@ static inline void sched_info_queued(struct task_struct *t) */ static inline void sched_info_depart(struct task_struct *t) { - struct rq *rq = task_rq(t); - unsigned long diff = jiffies - t->sched_info.last_arrival; - - t->sched_info.cpu_time += diff; + unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival; - if (rq) - rq->rq_sched_info.cpu_time += diff; + t->sched_info.cpu_time += delta_jiffies; + rq_sched_info_depart(task_rq(t), delta_jiffies); } /* @@ -610,7 +631,7 @@ static inline void sched_info_depart(struct task_struct *t) * the idle task.) We are only called when prev != next. */ static inline void -sched_info_switch(struct task_struct *prev, struct task_struct *next) +__sched_info_switch(struct task_struct *prev, struct task_struct *next) { struct rq *rq = task_rq(prev); @@ -625,10 +646,16 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) if (next != rq->idle) sched_info_arrive(next); } +static inline void +sched_info_switch(struct task_struct *prev, struct task_struct *next) +{ + if (unlikely(sched_info_on())) + __sched_info_switch(prev, next); +} #else #define sched_info_queued(t) do { } while (0) #define sched_info_switch(t, next) do { } while (0) -#endif /* CONFIG_SCHEDSTATS */ +#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ /* * Adding/removing a task to/from a priority array: @@ -1530,8 +1557,9 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags) INIT_LIST_HEAD(&p->run_list); p->array = NULL; -#ifdef CONFIG_SCHEDSTATS - memset(&p->sched_info, 0, sizeof(p->sched_info)); +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) + if (unlikely(sched_info_on())) + memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) p->oncpu = 0; @@ -1788,7 +1816,15 @@ context_switch(struct rq *rq, struct task_struct *prev, WARN_ON(rq->prev_mm); rq->prev_mm = oldmm; } + /* + * Since the runqueue lock will be released by the next + * task (which is an invalid locking op but in the case + * of the scheduler it's an obvious special-case), so we + * do an early lockdep release here: + */ +#ifndef __ARCH_WANT_UNLOCKED_CTXSW spin_release(&rq->lock.dep_map, 1, _THIS_IP_); +#endif /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); @@ -4526,9 +4562,11 @@ void __sched io_schedule(void) { struct rq *rq = &__raw_get_cpu_var(runqueues); + delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); schedule(); atomic_dec(&rq->nr_iowait); + delayacct_blkio_end(); } EXPORT_SYMBOL(io_schedule); @@ -4537,9 +4575,11 @@ long __sched io_schedule_timeout(long timeout) struct rq *rq = &__raw_get_cpu_var(runqueues); long ret; + delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); ret = schedule_timeout(timeout); atomic_dec(&rq->nr_iowait); + delayacct_blkio_end(); return ret; } diff --git a/kernel/softirq.c b/kernel/softirq.c index fd12f2556f0d..0f08a84ae307 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -311,8 +311,6 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data) softirq_vec[nr].action = action; } -EXPORT_UNUSED_SYMBOL(open_softirq); /* June 2006 */ - /* Tasklets */ struct tasklet_head { diff --git a/kernel/taskstats.c b/kernel/taskstats.c new file mode 100644 index 000000000000..f45179ce028e --- /dev/null +++ b/kernel/taskstats.c @@ -0,0 +1,568 @@ +/* + * taskstats.c - Export per-task statistics to userland + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * (C) Balbir Singh, IBM Corp. 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/taskstats_kern.h> +#include <linux/delayacct.h> +#include <linux/cpumask.h> +#include <linux/percpu.h> +#include <net/genetlink.h> +#include <asm/atomic.h> + +/* + * Maximum length of a cpumask that can be specified in + * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute + */ +#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) + +static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 }; +static int family_registered; +kmem_cache_t *taskstats_cache; + +static struct genl_family family = { + .id = GENL_ID_GENERATE, + .name = TASKSTATS_GENL_NAME, + .version = TASKSTATS_GENL_VERSION, + .maxattr = TASKSTATS_CMD_ATTR_MAX, +}; + +static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] +__read_mostly = { + [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 }, + [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 }, + [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, + [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; + +struct listener { + struct list_head list; + pid_t pid; + char valid; +}; + +struct listener_list { + struct rw_semaphore sem; + struct list_head list; +}; +static DEFINE_PER_CPU(struct listener_list, listener_array); + +enum actions { + REGISTER, + DEREGISTER, + CPU_DONT_CARE +}; + +static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, + void **replyp, size_t size) +{ + struct sk_buff *skb; + void *reply; + + /* + * If new attributes are added, please revisit this allocation + */ + skb = nlmsg_new(size); + if (!skb) + return -ENOMEM; + + if (!info) { + int seq = get_cpu_var(taskstats_seqnum)++; + put_cpu_var(taskstats_seqnum); + + reply = genlmsg_put(skb, 0, seq, + family.id, 0, 0, + cmd, family.version); + } else + reply = genlmsg_put(skb, info->snd_pid, info->snd_seq, + family.id, 0, 0, + cmd, family.version); + if (reply == NULL) { + nlmsg_free(skb); + return -EINVAL; + } + + *skbp = skb; + *replyp = reply; + return 0; +} + +/* + * Send taskstats data in @skb to listener with nl_pid @pid + */ +static int send_reply(struct sk_buff *skb, pid_t pid) +{ + struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); + void *reply = genlmsg_data(genlhdr); + int rc; + + rc = genlmsg_end(skb, reply); + if (rc < 0) { + nlmsg_free(skb); + return rc; + } + + return genlmsg_unicast(skb, pid); +} + +/* + * Send taskstats data in @skb to listeners registered for @cpu's exit data + */ +static int send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) +{ + struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); + struct listener_list *listeners; + struct listener *s, *tmp; + struct sk_buff *skb_next, *skb_cur = skb; + void *reply = genlmsg_data(genlhdr); + int rc, ret, delcount = 0; + + rc = genlmsg_end(skb, reply); + if (rc < 0) { + nlmsg_free(skb); + return rc; + } + + rc = 0; + listeners = &per_cpu(listener_array, cpu); + down_read(&listeners->sem); + list_for_each_entry_safe(s, tmp, &listeners->list, list) { + skb_next = NULL; + if (!list_is_last(&s->list, &listeners->list)) { + skb_next = skb_clone(skb_cur, GFP_KERNEL); + if (!skb_next) { + nlmsg_free(skb_cur); + rc = -ENOMEM; + break; + } + } + ret = genlmsg_unicast(skb_cur, s->pid); + if (ret == -ECONNREFUSED) { + s->valid = 0; + delcount++; + rc = ret; + } + skb_cur = skb_next; + } + up_read(&listeners->sem); + + if (!delcount) + return rc; + + /* Delete invalidated entries */ + down_write(&listeners->sem); + list_for_each_entry_safe(s, tmp, &listeners->list, list) { + if (!s->valid) { + list_del(&s->list); + kfree(s); + } + } + up_write(&listeners->sem); + return rc; +} + +static int fill_pid(pid_t pid, struct task_struct *pidtsk, + struct taskstats *stats) +{ + int rc; + struct task_struct *tsk = pidtsk; + + if (!pidtsk) { + read_lock(&tasklist_lock); + tsk = find_task_by_pid(pid); + if (!tsk) { + read_unlock(&tasklist_lock); + return -ESRCH; + } + get_task_struct(tsk); + read_unlock(&tasklist_lock); + } else + get_task_struct(tsk); + + /* + * Each accounting subsystem adds calls to its functions to + * fill in relevant parts of struct taskstsats as follows + * + * rc = per-task-foo(stats, tsk); + * if (rc) + * goto err; + */ + + rc = delayacct_add_tsk(stats, tsk); + stats->version = TASKSTATS_VERSION; + + /* Define err: label here if needed */ + put_task_struct(tsk); + return rc; + +} + +static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk, + struct taskstats *stats) +{ + struct task_struct *tsk, *first; + unsigned long flags; + + /* + * Add additional stats from live tasks except zombie thread group + * leaders who are already counted with the dead tasks + */ + first = tgidtsk; + if (!first) { + read_lock(&tasklist_lock); + first = find_task_by_pid(tgid); + if (!first) { + read_unlock(&tasklist_lock); + return -ESRCH; + } + get_task_struct(first); + read_unlock(&tasklist_lock); + } else + get_task_struct(first); + + /* Start with stats from dead tasks */ + spin_lock_irqsave(&first->signal->stats_lock, flags); + if (first->signal->stats) + memcpy(stats, first->signal->stats, sizeof(*stats)); + spin_unlock_irqrestore(&first->signal->stats_lock, flags); + + tsk = first; + read_lock(&tasklist_lock); + do { + if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk)) + continue; + /* + * Accounting subsystem can call its functions here to + * fill in relevant parts of struct taskstsats as follows + * + * per-task-foo(stats, tsk); + */ + delayacct_add_tsk(stats, tsk); + + } while_each_thread(first, tsk); + read_unlock(&tasklist_lock); + stats->version = TASKSTATS_VERSION; + + /* + * Accounting subsytems can also add calls here to modify + * fields of taskstats. + */ + + return 0; +} + + +static void fill_tgid_exit(struct task_struct *tsk) +{ + unsigned long flags; + + spin_lock_irqsave(&tsk->signal->stats_lock, flags); + if (!tsk->signal->stats) + goto ret; + + /* + * Each accounting subsystem calls its functions here to + * accumalate its per-task stats for tsk, into the per-tgid structure + * + * per-task-foo(tsk->signal->stats, tsk); + */ + delayacct_add_tsk(tsk->signal->stats, tsk); +ret: + spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); + return; +} + +static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) +{ + struct listener_list *listeners; + struct listener *s, *tmp; + unsigned int cpu; + cpumask_t mask = *maskp; + + if (!cpus_subset(mask, cpu_possible_map)) + return -EINVAL; + + if (isadd == REGISTER) { + for_each_cpu_mask(cpu, mask) { + s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, + cpu_to_node(cpu)); + if (!s) + goto cleanup; + s->pid = pid; + INIT_LIST_HEAD(&s->list); + s->valid = 1; + + listeners = &per_cpu(listener_array, cpu); + down_write(&listeners->sem); + list_add(&s->list, &listeners->list); + up_write(&listeners->sem); + } + return 0; + } + + /* Deregister or cleanup */ +cleanup: + for_each_cpu_mask(cpu, mask) { + listeners = &per_cpu(listener_array, cpu); + down_write(&listeners->sem); + list_for_each_entry_safe(s, tmp, &listeners->list, list) { + if (s->pid == pid) { + list_del(&s->list); + kfree(s); + break; + } + } + up_write(&listeners->sem); + } + return 0; +} + +static int parse(struct nlattr *na, cpumask_t *mask) +{ + char *data; + int len; + int ret; + + if (na == NULL) + return 1; + len = nla_len(na); + if (len > TASKSTATS_CPUMASK_MAXLEN) + return -E2BIG; + if (len < 1) + return -EINVAL; + data = kmalloc(len, GFP_KERNEL); + if (!data) + return -ENOMEM; + nla_strlcpy(data, na, len); + ret = cpulist_parse(data, *mask); + kfree(data); + return ret; +} + +static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) +{ + int rc = 0; + struct sk_buff *rep_skb; + struct taskstats stats; + void *reply; + size_t size; + struct nlattr *na; + cpumask_t mask; + + rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask); + if (rc < 0) + return rc; + if (rc == 0) + return add_del_listener(info->snd_pid, &mask, REGISTER); + + rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask); + if (rc < 0) + return rc; + if (rc == 0) + return add_del_listener(info->snd_pid, &mask, DEREGISTER); + + /* + * Size includes space for nested attributes + */ + size = nla_total_size(sizeof(u32)) + + nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); + + memset(&stats, 0, sizeof(stats)); + rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); + if (rc < 0) + return rc; + + if (info->attrs[TASKSTATS_CMD_ATTR_PID]) { + u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); + rc = fill_pid(pid, NULL, &stats); + if (rc < 0) + goto err; + + na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID); + NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid); + NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, + stats); + } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) { + u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); + rc = fill_tgid(tgid, NULL, &stats); + if (rc < 0) + goto err; + + na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID); + NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid); + NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, + stats); + } else { + rc = -EINVAL; + goto err; + } + + nla_nest_end(rep_skb, na); + + return send_reply(rep_skb, info->snd_pid); + +nla_put_failure: + return genlmsg_cancel(rep_skb, reply); +err: + nlmsg_free(rep_skb); + return rc; +} + +void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu) +{ + struct listener_list *listeners; + struct taskstats *tmp; + /* + * This is the cpu on which the task is exiting currently and will + * be the one for which the exit event is sent, even if the cpu + * on which this function is running changes later. + */ + *mycpu = raw_smp_processor_id(); + + *ptidstats = NULL; + tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL); + if (!tmp) + return; + + listeners = &per_cpu(listener_array, *mycpu); + down_read(&listeners->sem); + if (!list_empty(&listeners->list)) { + *ptidstats = tmp; + tmp = NULL; + } + up_read(&listeners->sem); + kfree(tmp); +} + +/* Send pid data out on exit */ +void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, + int group_dead, unsigned int mycpu) +{ + int rc; + struct sk_buff *rep_skb; + void *reply; + size_t size; + int is_thread_group; + struct nlattr *na; + unsigned long flags; + + if (!family_registered || !tidstats) + return; + + spin_lock_irqsave(&tsk->signal->stats_lock, flags); + is_thread_group = tsk->signal->stats ? 1 : 0; + spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); + + rc = 0; + /* + * Size includes space for nested attributes + */ + size = nla_total_size(sizeof(u32)) + + nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); + + if (is_thread_group) + size = 2 * size; /* PID + STATS + TGID + STATS */ + + rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); + if (rc < 0) + goto ret; + + rc = fill_pid(tsk->pid, tsk, tidstats); + if (rc < 0) + goto err_skb; + + na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID); + NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid); + NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, + *tidstats); + nla_nest_end(rep_skb, na); + + if (!is_thread_group) + goto send; + + /* + * tsk has/had a thread group so fill the tsk->signal->stats structure + * Doesn't matter if tsk is the leader or the last group member leaving + */ + + fill_tgid_exit(tsk); + if (!group_dead) + goto send; + + na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID); + NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid); + /* No locking needed for tsk->signal->stats since group is dead */ + NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, + *tsk->signal->stats); + nla_nest_end(rep_skb, na); + +send: + send_cpu_listeners(rep_skb, mycpu); + return; + +nla_put_failure: + genlmsg_cancel(rep_skb, reply); + goto ret; +err_skb: + nlmsg_free(rep_skb); +ret: + return; +} + +static struct genl_ops taskstats_ops = { + .cmd = TASKSTATS_CMD_GET, + .doit = taskstats_user_cmd, + .policy = taskstats_cmd_get_policy, +}; + +/* Needed early in initialization */ +void __init taskstats_init_early(void) +{ + unsigned int i; + + taskstats_cache = kmem_cache_create("taskstats_cache", + sizeof(struct taskstats), + 0, SLAB_PANIC, NULL, NULL); + for_each_possible_cpu(i) { + INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); + init_rwsem(&(per_cpu(listener_array, i).sem)); + } +} + +static int __init taskstats_init(void) +{ + int rc; + + rc = genl_register_family(&family); + if (rc) + return rc; + + rc = genl_register_ops(&family, &taskstats_ops); + if (rc < 0) + goto err; + + family_registered = 1; + return 0; +err: + genl_unregister_family(&family); + return rc; +} + +/* + * late initcall ensures initialization of statistics collection + * mechanisms precedes initialization of the taskstats interface + */ +late_initcall(taskstats_init); diff --git a/kernel/timer.c b/kernel/timer.c index 2a87430a58d4..05809c2e2fd6 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -374,6 +374,7 @@ int del_timer_sync(struct timer_list *timer) int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; + cpu_relax(); } } @@ -968,6 +969,7 @@ void __init timekeeping_init(void) } +static int timekeeping_suspended; /* * timekeeping_resume - Resumes the generic timekeeping subsystem. * @dev: unused @@ -983,6 +985,18 @@ static int timekeeping_resume(struct sys_device *dev) write_seqlock_irqsave(&xtime_lock, flags); /* restart the last cycle value */ clock->cycle_last = clocksource_read(clock); + clock->error = 0; + timekeeping_suspended = 0; + write_sequnlock_irqrestore(&xtime_lock, flags); + return 0; +} + +static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) +{ + unsigned long flags; + + write_seqlock_irqsave(&xtime_lock, flags); + timekeeping_suspended = 1; write_sequnlock_irqrestore(&xtime_lock, flags); return 0; } @@ -990,6 +1004,7 @@ static int timekeeping_resume(struct sys_device *dev) /* sysfs resume/suspend bits for timekeeping */ static struct sysdev_class timekeeping_sysclass = { .resume = timekeeping_resume, + .suspend = timekeeping_suspend, set_kset_name("timekeeping"), }; @@ -1100,13 +1115,16 @@ static void update_wall_time(void) { cycle_t offset; - clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; + /* Make sure we're fully resumed: */ + if (unlikely(timekeeping_suspended)) + return; #ifdef CONFIG_GENERIC_TIME offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; #else offset = clock->cycle_interval; #endif + clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; /* normally this loop will run just once, however in the * case of lost or late ticks, it will accumulate correctly. diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e5889b1a33ff..554ee688a9f8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -158,7 +158,7 @@ config DEBUG_RWSEMS config DEBUG_LOCK_ALLOC bool "Lock debugging: detect incorrect freeing of live locks" - depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select DEBUG_SPINLOCK select DEBUG_MUTEXES select DEBUG_RWSEMS @@ -173,7 +173,7 @@ config DEBUG_LOCK_ALLOC config PROVE_LOCKING bool "Lock debugging: prove locking correctness" - depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select LOCKDEP select DEBUG_SPINLOCK select DEBUG_MUTEXES @@ -216,7 +216,7 @@ config PROVE_LOCKING config LOCKDEP bool - depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select STACKTRACE select FRAME_POINTER select KALLSYMS @@ -224,13 +224,14 @@ config LOCKDEP config DEBUG_LOCKDEP bool "Lock dependency engine debugging" - depends on LOCKDEP + depends on DEBUG_KERNEL && LOCKDEP help If you say Y here, the lock dependency engine will do additional runtime checks to debug itself, at the price of more runtime overhead. config TRACE_IRQFLAGS + depends on DEBUG_KERNEL bool default y depends on TRACE_IRQFLAGS_SUPPORT @@ -256,6 +257,7 @@ config DEBUG_LOCKING_API_SELFTESTS config STACKTRACE bool + depends on DEBUG_KERNEL depends on STACKTRACE_SUPPORT config DEBUG_KOBJECT diff --git a/lib/idr.c b/lib/idr.c index 4d096819511a..16d2143fea48 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -38,14 +38,15 @@ static kmem_cache_t *idr_layer_cache; static struct idr_layer *alloc_layer(struct idr *idp) { struct idr_layer *p; + unsigned long flags; - spin_lock(&idp->lock); + spin_lock_irqsave(&idp->lock, flags); if ((p = idp->id_free)) { idp->id_free = p->ary[0]; idp->id_free_cnt--; p->ary[0] = NULL; } - spin_unlock(&idp->lock); + spin_unlock_irqrestore(&idp->lock, flags); return(p); } @@ -59,12 +60,14 @@ static void __free_layer(struct idr *idp, struct idr_layer *p) static void free_layer(struct idr *idp, struct idr_layer *p) { + unsigned long flags; + /* * Depends on the return element being zeroed. */ - spin_lock(&idp->lock); + spin_lock_irqsave(&idp->lock, flags); __free_layer(idp, p); - spin_unlock(&idp->lock); + spin_unlock_irqrestore(&idp->lock, flags); } /** @@ -168,6 +171,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) { struct idr_layer *p, *new; int layers, v, id; + unsigned long flags; id = starting_id; build_up: @@ -191,14 +195,14 @@ build_up: * The allocation failed. If we built part of * the structure tear it down. */ - spin_lock(&idp->lock); + spin_lock_irqsave(&idp->lock, flags); for (new = p; p && p != idp->top; new = p) { p = p->ary[0]; new->ary[0] = NULL; new->bitmap = new->count = 0; __free_layer(idp, new); } - spin_unlock(&idp->lock); + spin_unlock_irqrestore(&idp->lock, flags); return -1; } new->ary[0] = p; diff --git a/mm/filemap.c b/mm/filemap.c index d087fc3d3281..b9a60c43b61a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -849,8 +849,6 @@ static void shrink_readahead_size_eio(struct file *filp, return; ra->ra_pages /= 4; - printk(KERN_WARNING "Reducing readahead size to %luK\n", - ra->ra_pages << (PAGE_CACHE_SHIFT - 10)); } /** diff --git a/mm/memory.c b/mm/memory.c index dc0d82cf2a1c..109e9866237e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -47,6 +47,7 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/module.h> +#include <linux/delayacct.h> #include <linux/init.h> #include <asm/pgalloc.h> @@ -1549,9 +1550,9 @@ gotten: flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); + lazy_mmu_prot_update(entry); ptep_establish(vma, address, page_table, entry); update_mmu_cache(vma, address, entry); - lazy_mmu_prot_update(entry); lru_cache_add_active(new_page); page_add_new_anon_rmap(new_page, vma, address); @@ -1934,6 +1935,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, migration_entry_wait(mm, pmd, address); goto out; } + delayacct_set_flag(DELAYACCT_PF_SWAPIN); page = lookup_swap_cache(entry); if (!page) { swapin_readahead(entry, address, vma); @@ -1946,6 +1948,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) ret = VM_FAULT_OOM; + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto unlock; } @@ -1955,6 +1958,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, grab_swap_token(); } + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); mark_page_accessed(page); lock_page(page); diff --git a/mm/nommu.c b/mm/nommu.c index 5151c44a8257..c576df71e3bb 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1070,6 +1070,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; return 0; } +EXPORT_SYMBOL(remap_pfn_range); void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) { @@ -1090,6 +1091,7 @@ void unmap_mapping_range(struct address_space *mapping, int even_cows) { } +EXPORT_SYMBOL(unmap_mapping_range); /* * Check that a process has enough memory to allocate a new virtual diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7b450798b458..266162d2ba28 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -340,7 +340,7 @@ void __vunmap(void *addr, int deallocate_pages) __free_page(area->pages[i]); } - if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) + if (area->flags & VM_VPAGES) vfree(area->pages); else kfree(area->pages); @@ -427,9 +427,10 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ - if (array_size > PAGE_SIZE) + if (array_size > PAGE_SIZE) { pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); - else + area->flags |= VM_VPAGES; + } else pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); area->pages = pages; if (!area->pages) { diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 458031bfff55..18fcb9fa518d 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -67,10 +67,6 @@ static struct packet_type vlan_packet_type = { .func = vlan_skb_recv, /* VLAN receive method */ }; -/* Bits of netdev state that are propagated from real device to virtual */ -#define VLAN_LINK_STATE_MASK \ - ((1<<__LINK_STATE_PRESENT)|(1<<__LINK_STATE_NOCARRIER)|(1<<__LINK_STATE_DORMANT)) - /* End of global variables definitions. */ /* @@ -479,7 +475,9 @@ static struct net_device *register_vlan_device(const char *eth_IF_name, new_dev->flags = real_dev->flags; new_dev->flags &= ~IFF_UP; - new_dev->state = real_dev->state & ~(1<<__LINK_STATE_START); + new_dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | + (1<<__LINK_STATE_DORMANT))) | + (1<<__LINK_STATE_PRESENT); /* need 4 bytes for extra VLAN header info, * hope the underlying device can handle it. @@ -542,12 +540,11 @@ static struct net_device *register_vlan_device(const char *eth_IF_name, * so it cannot "appear" on us. */ if (!grp) { /* need to add a new group */ - grp = kmalloc(sizeof(struct vlan_group), GFP_KERNEL); + grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); if (!grp) goto out_free_unregister; /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ - memset(grp, 0, sizeof(struct vlan_group)); grp->real_dev_ifindex = real_dev->ifindex; hlist_add_head_rcu(&grp->hlist, diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 5ee96d4b40e9..96dc6bb52d14 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -227,12 +227,11 @@ static void atif_drop_device(struct net_device *dev) static struct atalk_iface *atif_add_device(struct net_device *dev, struct atalk_addr *sa) { - struct atalk_iface *iface = kmalloc(sizeof(*iface), GFP_KERNEL); + struct atalk_iface *iface = kzalloc(sizeof(*iface), GFP_KERNEL); if (!iface) goto out; - memset(iface, 0, sizeof(*iface)); dev_hold(dev); iface->dev = dev; dev->atalk_ptr = iface; @@ -559,12 +558,11 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint) } if (!rt) { - rt = kmalloc(sizeof(*rt), GFP_ATOMIC); + rt = kzalloc(sizeof(*rt), GFP_ATOMIC); retval = -ENOBUFS; if (!rt) goto out_unlock; - memset(rt, 0, sizeof(*rt)); rt->next = atalk_routes; atalk_routes = rt; diff --git a/net/atm/br2684.c b/net/atm/br2684.c index a487233dc466..d00cca97eb33 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -508,10 +508,9 @@ Note: we do not have explicit unassign, but look at _push() if (copy_from_user(&be, arg, sizeof be)) return -EFAULT; - brvcc = kmalloc(sizeof(struct br2684_vcc), GFP_KERNEL); + brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL); if (!brvcc) return -ENOMEM; - memset(brvcc, 0, sizeof(struct br2684_vcc)); write_lock_irq(&devs_lock); net_dev = br2684_find_dev(&be.ifspec); if (net_dev == NULL) { diff --git a/net/atm/clip.c b/net/atm/clip.c index 2e62105d91bd..7ce7bfe3fbad 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -929,12 +929,11 @@ static int arp_seq_open(struct inode *inode, struct file *file) struct seq_file *seq; int rc = -EAGAIN; - state = kmalloc(sizeof(*state), GFP_KERNEL); + state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) { rc = -ENOMEM; goto out_kfree; } - memset(state, 0, sizeof(*state)); state->ns.neigh_sub_iter = clip_seq_sub_iter; rc = seq_open(file, &arp_seq_ops); diff --git a/net/atm/lec.c b/net/atm/lec.c index 4b68a18171cf..b4aa489849df 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -1811,12 +1811,11 @@ make_entry(struct lec_priv *priv, unsigned char *mac_addr) { struct lec_arp_table *to_return; - to_return = kmalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); + to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); if (!to_return) { printk("LEC: Arp entry kmalloc failed\n"); return NULL; } - memset(to_return, 0, sizeof(struct lec_arp_table)); memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); init_timer(&to_return->timer); to_return->timer.function = lec_arp_expire_arp; diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 9aafe1e2f048..00704661e83f 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -258,10 +258,9 @@ static struct mpoa_client *alloc_mpc(void) { struct mpoa_client *mpc; - mpc = kmalloc(sizeof (struct mpoa_client), GFP_KERNEL); + mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL); if (mpc == NULL) return NULL; - memset(mpc, 0, sizeof(struct mpoa_client)); rwlock_init(&mpc->ingress_lock); rwlock_init(&mpc->egress_lock); mpc->next = mpcs; diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 76a7d8ff6c0e..19d5dfc0702f 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -287,10 +287,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg) if (be.encaps != PPPOATM_ENCAPS_AUTODETECT && be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC) return -EINVAL; - pvcc = kmalloc(sizeof(*pvcc), GFP_KERNEL); + pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL); if (pvcc == NULL) return -ENOMEM; - memset(pvcc, 0, sizeof(*pvcc)); pvcc->atmvcc = atmvcc; pvcc->old_push = atmvcc->push; pvcc->old_pop = atmvcc->pop; diff --git a/net/atm/resources.c b/net/atm/resources.c index de25c6408b04..529f7e64aa2c 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c @@ -33,10 +33,9 @@ static struct atm_dev *__alloc_atm_dev(const char *type) { struct atm_dev *dev; - dev = kmalloc(sizeof(*dev), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; - memset(dev, 0, sizeof(*dev)); dev->type = type; dev->signal = ATM_PHY_SIG_UNKNOWN; dev->link_rate = ATM_OC3_PCR; diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index 369a75b160f2..867d42537979 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c @@ -203,13 +203,11 @@ void ax25_register_sysctl(void) for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) ax25_table_size += sizeof(ctl_table); - if ((ax25_table = kmalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { + if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { spin_unlock_bh(&ax25_dev_lock); return; } - memset(ax25_table, 0x00, ax25_table_size); - for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC); if (!child) { diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 77eab8f4c7fd..332dd8f436ea 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -55,6 +55,7 @@ #define VERSION "1.8" static int disable_cfc = 0; +static int channel_mtu = -1; static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; static struct task_struct *rfcomm_thread; @@ -812,7 +813,10 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d pn->credits = 0; } - pn->mtu = htobs(d->mtu); + if (cr && channel_mtu >= 0) + pn->mtu = htobs(channel_mtu); + else + pn->mtu = htobs(d->mtu); *ptr = __fcs(buf); ptr++; @@ -1243,7 +1247,10 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn) d->priority = pn->priority; - d->mtu = s->mtu = btohs(pn->mtu); + d->mtu = btohs(pn->mtu); + + if (cr && d->mtu > s->mtu) + d->mtu = s->mtu; return 0; } @@ -1770,6 +1777,11 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s) s = rfcomm_session_add(nsock, BT_OPEN); if (s) { rfcomm_session_hold(s); + + /* We should adjust MTU on incoming sessions. + * L2CAP MTU minus UIH header and FCS. */ + s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5; + rfcomm_schedule(RFCOMM_SCHED_RX); } else sock_release(nsock); @@ -2087,6 +2099,9 @@ module_exit(rfcomm_exit); module_param(disable_cfc, bool, 0644); MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); +module_param(channel_mtu, int, 0644); +MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel"); + module_param(l2cap_mtu, uint, 0644); MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 159fb8409824..4e4119a12139 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -162,12 +162,10 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (num > BR_MAX_PORTS) num = BR_MAX_PORTS; - indices = kmalloc(num*sizeof(int), GFP_KERNEL); + indices = kcalloc(num, sizeof(int), GFP_KERNEL); if (indices == NULL) return -ENOMEM; - memset(indices, 0, num*sizeof(int)); - get_port_ifindices(br, indices, num); if (copy_to_user((void __user *)args[1], indices, num*sizeof(int))) num = -EFAULT; @@ -327,11 +325,10 @@ static int old_deviceless(void __user *uarg) if (args[2] >= 2048) return -ENOMEM; - indices = kmalloc(args[2]*sizeof(int), GFP_KERNEL); + indices = kcalloc(args[2], sizeof(int), GFP_KERNEL); if (indices == NULL) return -ENOMEM; - memset(indices, 0, args[2]*sizeof(int)); args[2] = get_bridge_ifindices(indices, args[2]); ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index cbc8a389a0a8..05b3de888243 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -61,6 +61,9 @@ static int brnf_filter_vlan_tagged = 1; #define brnf_filter_vlan_tagged 1 #endif +int brnf_deferred_hooks; +EXPORT_SYMBOL_GPL(brnf_deferred_hooks); + static __be16 inline vlan_proto(const struct sk_buff *skb) { return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; @@ -890,6 +893,8 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb, return NF_ACCEPT; else if (ip->version == 6 && !brnf_call_ip6tables) return NF_ACCEPT; + else if (!brnf_deferred_hooks) + return NF_ACCEPT; #endif if (hook == NF_IP_POST_ROUTING) return NF_ACCEPT; diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 27ce1683caf5..2797e2815418 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -437,7 +437,7 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) { struct ethtool_pauseparam pauseparam; - if (!dev->ethtool_ops->get_pauseparam) + if (!dev->ethtool_ops->set_pauseparam) return -EOPNOTSUPP; if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 44f6a181a754..476aa3978504 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -257,11 +257,11 @@ nodata: } -static void skb_drop_fraglist(struct sk_buff *skb) +static void skb_drop_list(struct sk_buff **listp) { - struct sk_buff *list = skb_shinfo(skb)->frag_list; + struct sk_buff *list = *listp; - skb_shinfo(skb)->frag_list = NULL; + *listp = NULL; do { struct sk_buff *this = list; @@ -270,6 +270,11 @@ static void skb_drop_fraglist(struct sk_buff *skb) } while (list); } +static inline void skb_drop_fraglist(struct sk_buff *skb) +{ + skb_drop_list(&skb_shinfo(skb)->frag_list); +} + static void skb_clone_fraglist(struct sk_buff *skb) { struct sk_buff *list; @@ -830,41 +835,75 @@ free_skb: int ___pskb_trim(struct sk_buff *skb, unsigned int len) { + struct sk_buff **fragp; + struct sk_buff *frag; int offset = skb_headlen(skb); int nfrags = skb_shinfo(skb)->nr_frags; int i; + int err; + + if (skb_cloned(skb) && + unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) + return err; for (i = 0; i < nfrags; i++) { int end = offset + skb_shinfo(skb)->frags[i].size; - if (end > len) { - if (skb_cloned(skb)) { - if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) - return -ENOMEM; - } - if (len <= offset) { - put_page(skb_shinfo(skb)->frags[i].page); - skb_shinfo(skb)->nr_frags--; - } else { - skb_shinfo(skb)->frags[i].size = len - offset; - } + + if (end < len) { + offset = end; + continue; } - offset = end; + + if (len > offset) + skb_shinfo(skb)->frags[i++].size = len - offset; + + skb_shinfo(skb)->nr_frags = i; + + for (; i < nfrags; i++) + put_page(skb_shinfo(skb)->frags[i].page); + + if (skb_shinfo(skb)->frag_list) + skb_drop_fraglist(skb); + break; } - if (offset < len) { + for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); + fragp = &frag->next) { + int end = offset + frag->len; + + if (skb_shared(frag)) { + struct sk_buff *nfrag; + + nfrag = skb_clone(frag, GFP_ATOMIC); + if (unlikely(!nfrag)) + return -ENOMEM; + + nfrag->next = frag->next; + frag = nfrag; + *fragp = frag; + } + + if (end < len) { + offset = end; + continue; + } + + if (end > len && + unlikely((err = pskb_trim(frag, len - offset)))) + return err; + + if (frag->next) + skb_drop_list(&frag->next); + break; + } + + if (len > skb_headlen(skb)) { skb->data_len -= skb->len - len; skb->len = len; } else { - if (len <= skb_headlen(skb)) { - skb->len = len; - skb->data_len = 0; - skb->tail = skb->data + len; - if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) - skb_drop_fraglist(skb); - } else { - skb->data_len -= skb->len - len; - skb->len = len; - } + skb->len = len; + skb->data_len = 0; + skb->tail = skb->data + len; } return 0; diff --git a/net/core/user_dma.c b/net/core/user_dma.c index b7c98dbcdb81..248a6b666aff 100644 --- a/net/core/user_dma.c +++ b/net/core/user_dma.c @@ -29,6 +29,7 @@ #include <linux/socket.h> #include <linux/rtnetlink.h> /* for BUG_TRAP */ #include <net/tcp.h> +#include <net/netdma.h> #define NET_DMA_DEFAULT_COPYBREAK 4096 diff --git a/net/dccp/feat.h b/net/dccp/feat.h index 6048373c7186..b44c45504fb6 100644 --- a/net/dccp/feat.h +++ b/net/dccp/feat.h @@ -26,4 +26,6 @@ extern void dccp_feat_clean(struct dccp_minisock *dmsk); extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); extern int dccp_feat_init(struct dccp_minisock *dmsk); +extern int dccp_feat_default_sequence_window; + #endif /* _DCCP_FEAT_H */ diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index c3073e7e81d3..7f56f7e8f571 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -504,8 +504,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ireq = inet_rsk(req); ireq->loc_addr = daddr; ireq->rmt_addr = saddr; - req->rcv_wnd = 100; /* Fake, option parsing will get the - right value */ + req->rcv_wnd = dccp_feat_default_sequence_window; ireq->opt = NULL; /* diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index ff42bc43263d..9f3d4d7cd0bf 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -31,6 +31,7 @@ #include "dccp.h" #include "ipv6.h" +#include "feat.h" /* Socket used for sending RSTs and ACKs */ static struct socket *dccp_v6_ctl_socket; @@ -707,8 +708,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ireq = inet_rsk(req); ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); - req->rcv_wnd = 100; /* Fake, option parsing will get the - right value */ + req->rcv_wnd = dccp_feat_default_sequence_window; ireq6->pktopts = NULL; if (ipv6_opt_accepted(sk, skb) || diff --git a/net/dccp/options.c b/net/dccp/options.c index c3cda1e39aa8..daf72bb671f0 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -29,6 +29,8 @@ int dccp_feat_default_ack_ratio = DCCPF_INITIAL_ACK_RATIO; int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; +EXPORT_SYMBOL_GPL(dccp_feat_default_sequence_window); + void dccp_minisock_init(struct dccp_minisock *dmsk) { dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window; diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 98a25208440d..476455fbdb03 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -413,11 +413,7 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void) { struct dn_ifaddr *ifa; - ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); - - if (ifa) { - memset(ifa, 0, sizeof(*ifa)); - } + ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); return ifa; } @@ -1105,10 +1101,9 @@ struct dn_dev *dn_dev_create(struct net_device *dev, int *err) return NULL; *err = -ENOBUFS; - if ((dn_db = kmalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) + if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) return NULL; - memset(dn_db, 0, sizeof(struct dn_dev)); memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); smp_wmb(); dev->dn_ptr = dn_db; diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 0375077391b7..fa20e2efcfc1 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -283,11 +283,10 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta goto err_inval; } - fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); err = -ENOBUFS; if (fi == NULL) goto failure; - memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct dn_fib_nh)); fi->fib_protocol = r->rtm_protocol; fi->fib_nhs = nhs; diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index 5ce9c9e0565c..ff0ebe99137d 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -580,12 +580,11 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file) { struct seq_file *seq; int rc = -ENOMEM; - struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; - memset(s, 0, sizeof(*s)); rc = seq_open(file, &dn_neigh_seq_ops); if (rc) goto out_kfree; diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 22f321d9bf9d..6986be754ef2 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -151,10 +151,9 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) } } - new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); + new_r = kzalloc(sizeof(*new_r), GFP_KERNEL); if (!new_r) return -ENOMEM; - memset(new_r, 0, sizeof(*new_r)); if (rta[RTA_SRC-1]) memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2); diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index 37d9d0a1ac8c..e926c952e363 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -158,12 +158,10 @@ static void dn_rehash_zone(struct dn_zone *dz) break; } - ht = kmalloc(new_divisor*sizeof(struct dn_fib_node*), GFP_KERNEL); - + ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL); if (ht == NULL) return; - memset(ht, 0, new_divisor*sizeof(struct dn_fib_node *)); write_lock_bh(&dn_fib_tables_lock); old_ht = dz->dz_hash; dz->dz_hash = ht; @@ -184,11 +182,10 @@ static void dn_free_node(struct dn_fib_node *f) static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) { int i; - struct dn_zone *dz = kmalloc(sizeof(struct dn_zone), GFP_KERNEL); + struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL); if (!dz) return NULL; - memset(dz, 0, sizeof(struct dn_zone)); if (z) { dz->dz_divisor = 16; dz->dz_hashmask = 0x0F; @@ -197,14 +194,12 @@ static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) dz->dz_hashmask = 0; } - dz->dz_hash = kmalloc(dz->dz_divisor*sizeof(struct dn_fib_node *), GFP_KERNEL); - + dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL); if (!dz->dz_hash) { kfree(dz); return NULL; } - memset(dz->dz_hash, 0, dz->dz_divisor*sizeof(struct dn_fib_node*)); dz->dz_order = z; dz->dz_mask = dnet_make_mask(z); diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 309ae4c6549a..4d66aac13483 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -673,12 +673,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg) edev = dev->ec_ptr; if (edev == NULL) { /* Magic up a new one. */ - edev = kmalloc(sizeof(struct ec_device), GFP_KERNEL); + edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL); if (edev == NULL) { err = -ENOMEM; break; } - memset(edev, 0, sizeof(struct ec_device)); dev->ec_ptr = edev; } else net2dev_map[edev->net] = NULL; diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig index dbb08528ddf5..f7e84e9d13ad 100644 --- a/net/ieee80211/Kconfig +++ b/net/ieee80211/Kconfig @@ -58,6 +58,7 @@ config IEEE80211_CRYPT_TKIP depends on IEEE80211 && NET_RADIO select CRYPTO select CRYPTO_MICHAEL_MIC + select CRC32 ---help--- Include software based cipher suites in support of IEEE 802.11i (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index cb71d794a7d1..5ed0a98b2d76 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c @@ -110,11 +110,10 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops) unsigned long flags; struct ieee80211_crypto_alg *alg; - alg = kmalloc(sizeof(*alg), GFP_KERNEL); + alg = kzalloc(sizeof(*alg), GFP_KERNEL); if (alg == NULL) return -ENOMEM; - memset(alg, 0, sizeof(*alg)); alg->ops = ops; spin_lock_irqsave(&ieee80211_crypto_lock, flags); diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 492647382ad0..ed90a8af1444 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -76,10 +76,9 @@ static void *ieee80211_ccmp_init(int key_idx) { struct ieee80211_ccmp_data *priv; - priv = kmalloc(sizeof(*priv), GFP_ATOMIC); + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; - memset(priv, 0, sizeof(*priv)); priv->key_idx = key_idx; priv->tfm = crypto_alloc_tfm("aes", 0); diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index c5a87724aabe..0ebf235f6939 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -39,10 +39,9 @@ static void *prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; - priv = kmalloc(sizeof(*priv), GFP_ATOMIC); + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; - memset(priv, 0, sizeof(*priv)); priv->key_idx = keyidx; priv->tfm = crypto_alloc_tfm("arc4", 0); diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index a78c4f845f66..5cb9cfd35397 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -369,11 +369,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, struct ieee80211_crypt_data *new_crypt; /* take WEP into use */ - new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data), + new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL); if (new_crypt == NULL) return -ENOMEM; - memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ieee80211_get_crypto_ops("WEP"); if (!new_crypt->ops) { request_module("ieee80211_crypt_wep"); @@ -616,13 +615,11 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, ieee80211_crypt_delayed_deinit(ieee, crypt); - new_crypt = (struct ieee80211_crypt_data *) - kmalloc(sizeof(*new_crypt), GFP_KERNEL); + new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL); if (new_crypt == NULL) { ret = -ENOMEM; goto done; } - memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ops; if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) new_crypt->priv = new_crypt->ops->init(idx); diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index ebc33ca6e692..4cef39e171d0 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c @@ -116,6 +116,16 @@ ieee80211softmac_auth_queue(void *data) kfree(auth); } +/* Sends a response to an auth challenge (for shared key auth). */ +static void +ieee80211softmac_auth_challenge_response(void *_aq) +{ + struct ieee80211softmac_auth_queue_item *aq = _aq; + + /* Send our response */ + ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); +} + /* Handle the auth response from the AP * This should be registered with ieee80211 as handle_auth */ @@ -197,24 +207,30 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE: /* Check to make sure we have a challenge IE */ data = (u8 *)auth->info_element; - if(*data++ != MFIE_TYPE_CHALLENGE){ + if (*data++ != MFIE_TYPE_CHALLENGE) { printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n"); break; } /* Save the challenge */ spin_lock_irqsave(&mac->lock, flags); net->challenge_len = *data++; - if(net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) + if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; - if(net->challenge != NULL) + if (net->challenge != NULL) kfree(net->challenge); net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC); memcpy(net->challenge, data, net->challenge_len); aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; - spin_unlock_irqrestore(&mac->lock, flags); - /* Send our response */ - ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); + /* We reuse the work struct from the auth request here. + * It is safe to do so as each one is per-request, and + * at this point (dealing with authentication response) + * we have obviously already sent the initial auth + * request. */ + cancel_delayed_work(&aq->work); + INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); + schedule_work(&aq->work); + spin_unlock_irqrestore(&mac->lock, flags); return 0; case IEEE80211SOFTMAC_AUTH_SHARED_PASS: kfree(net->challenge); diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c index 8cc8b20f5cda..6ae5a1dc7956 100644 --- a/net/ieee80211/softmac/ieee80211softmac_io.c +++ b/net/ieee80211/softmac/ieee80211softmac_io.c @@ -96,8 +96,7 @@ ieee80211softmac_alloc_mgt(u32 size) if(size > IEEE80211_DATA_LEN) return NULL; /* Allocate the frame */ - data = kmalloc(size, GFP_ATOMIC); - memset(data, 0, size); + data = kzalloc(size, GFP_ATOMIC); return data; } diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 8e748be36c5a..1366bc6ce6a5 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -215,12 +215,10 @@ static int ah_init_state(struct xfrm_state *x) if (x->encap) goto error; - ahp = kmalloc(sizeof(*ahp), GFP_KERNEL); + ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); if (ahp == NULL) return -ENOMEM; - memset(ahp, 0, sizeof(*ahp)); - ahp->key = x->aalg->alg_key; ahp->key_len = (x->aalg->alg_key_len+7)/8; ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 7b51b3bdb548..c8a3723bc001 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1372,12 +1372,11 @@ static int arp_seq_open(struct inode *inode, struct file *file) { struct seq_file *seq; int rc = -ENOMEM; - struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; - memset(s, 0, sizeof(*s)); rc = seq_open(file, &arp_seq_ops); if (rc) goto out_kfree; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index a7c65e9e5ec9..a6cc31d911eb 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -93,10 +93,9 @@ static void devinet_sysctl_unregister(struct ipv4_devconf *p); static struct in_ifaddr *inet_alloc_ifa(void) { - struct in_ifaddr *ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); + struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); if (ifa) { - memset(ifa, 0, sizeof(*ifa)); INIT_RCU_HEAD(&ifa->rcu_head); } @@ -140,10 +139,9 @@ struct in_device *inetdev_init(struct net_device *dev) ASSERT_RTNL(); - in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL); + in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL); if (!in_dev) goto out; - memset(in_dev, 0, sizeof(*in_dev)); INIT_RCU_HEAD(&in_dev->rcu_head); memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf)); in_dev->cnf.sysctl = NULL; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 4e112738b3fa..fc2f8ce441de 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -316,12 +316,10 @@ static int esp_init_state(struct xfrm_state *x) if (x->ealg == NULL) goto error; - esp = kmalloc(sizeof(*esp), GFP_KERNEL); + esp = kzalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; - memset(esp, 0, sizeof(*esp)); - if (x->aalg) { struct xfrm_algo_desc *aalg_desc; diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 3c1d32ad35f2..72c633b357cf 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c @@ -204,11 +204,10 @@ static struct fn_zone * fn_new_zone(struct fn_hash *table, int z) { int i; - struct fn_zone *fz = kmalloc(sizeof(struct fn_zone), GFP_KERNEL); + struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL); if (!fz) return NULL; - memset(fz, 0, sizeof(struct fn_zone)); if (z) { fz->fz_divisor = 16; } else { @@ -1046,7 +1045,7 @@ static int fib_seq_open(struct inode *inode, struct file *file) { struct seq_file *seq; int rc = -ENOMEM; - struct fib_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; @@ -1057,7 +1056,6 @@ static int fib_seq_open(struct inode *inode, struct file *file) seq = file->private_data; seq->private = s; - memset(s, 0, sizeof(*s)); out: return rc; out_kfree: diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 773b12ba4e3c..79b04718bdfd 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -196,10 +196,9 @@ int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) } } - new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); + new_r = kzalloc(sizeof(*new_r), GFP_KERNEL); if (!new_r) return -ENOMEM; - memset(new_r, 0, sizeof(*new_r)); if (rta[RTA_SRC-1]) memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 5f87533684d5..9be53a8e72c3 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -709,11 +709,10 @@ fib_create_info(const struct rtmsg *r, struct kern_rta *rta, goto failure; } - fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); if (fi == NULL) goto failure; fib_info_cnt++; - memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct fib_nh)); fi->fib_protocol = r->rtm_protocol; @@ -962,10 +961,6 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, rtm->rtm_protocol = fi->fib_protocol; if (fi->fib_priority) RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); -#ifdef CONFIG_NET_CLS_ROUTE - if (fi->fib_nh[0].nh_tclassid) - RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid); -#endif if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) goto rtattr_failure; if (fi->fib_prefsrc) @@ -975,6 +970,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw); if (fi->fib_nh->nh_oif) RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); +#ifdef CONFIG_NET_CLS_ROUTE + if (fi->fib_nh[0].nh_tclassid) + RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid); +#endif } #ifdef CONFIG_IP_ROUTE_MULTIPATH if (fi->fib_nhs > 1) { @@ -993,6 +992,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, nhp->rtnh_ifindex = nh->nh_oif; if (nh->nh_gw) RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw); +#ifdef CONFIG_NET_CLS_ROUTE + if (nh->nh_tclassid) + RTA_PUT(skb, RTA_FLOW, 4, &nh->nh_tclassid); +#endif nhp->rtnh_len = skb->tail - (unsigned char*)nhp; } endfor_nexthops(fi); mp_head->rta_type = RTA_MULTIPATH; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index d299c8e547d6..9f4b752f5a33 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1028,10 +1028,9 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) * for deleted items allows change reports to use common code with * non-deleted or query-response MCA's. */ - pmc = kmalloc(sizeof(*pmc), GFP_KERNEL); + pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); if (!pmc) return; - memset(pmc, 0, sizeof(*pmc)); spin_lock_bh(&im->lock); pmc->interface = im->interface; in_dev_hold(in_dev); @@ -1529,10 +1528,9 @@ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode, psf_prev = psf; } if (!psf) { - psf = kmalloc(sizeof(*psf), GFP_ATOMIC); + psf = kzalloc(sizeof(*psf), GFP_ATOMIC); if (!psf) return -ENOBUFS; - memset(psf, 0, sizeof(*psf)); psf->sf_inaddr = *psfsrc; if (psf_prev) { psf_prev->sf_next = psf; @@ -2380,7 +2378,7 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file) { struct seq_file *seq; int rc = -ENOMEM; - struct igmp_mc_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct igmp_mc_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; @@ -2390,7 +2388,6 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file) seq = file->private_data; seq->private = s; - memset(s, 0, sizeof(*s)); out: return rc; out_kfree: @@ -2555,7 +2552,7 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file) { struct seq_file *seq; int rc = -ENOMEM; - struct igmp_mcf_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct igmp_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; @@ -2565,7 +2562,6 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file) seq = file->private_data; seq->private = s; - memset(s, 0, sizeof(*s)); out: return rc; out_kfree: diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 8e7e41b66c79..492858e6faf0 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -909,11 +909,10 @@ static int __init inet_diag_init(void) sizeof(struct inet_diag_handler *)); int err = -ENOMEM; - inet_diag_table = kmalloc(inet_diag_table_size, GFP_KERNEL); + inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL); if (!inet_diag_table) goto out; - memset(inet_diag_table, 0, inet_diag_table_size); idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv, THIS_MODULE); if (idiagnl == NULL) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 6ff9b10d9563..0f9b3a31997b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -617,7 +617,6 @@ static int ipgre_rcv(struct sk_buff *skb) skb->mac.raw = skb->nh.raw; skb->nh.raw = __pskb_pull(skb, offset); skb_postpull_rcsum(skb, skb->h.raw, offset); - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); skb->pkt_type = PACKET_HOST; #ifdef CONFIG_NET_IPGRE_BROADCAST if (MULTICAST(iph->daddr)) { diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index e1a7dba2fa8a..212734ca238f 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -428,6 +428,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, goto drop; } + /* Remove any debris in the socket control block */ + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); + return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, ip_rcv_finish); diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index cbcae6544622..406056edc02b 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -256,7 +256,6 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb) if (!opt) { opt = &(IPCB(skb)->opt); - memset(opt, 0, sizeof(struct ip_options)); iph = skb->nh.raw; opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); optptr = iph + sizeof(struct iphdr); diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 8a8b5cf2f7fe..a0c28b2b756e 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -410,11 +410,10 @@ static int ipcomp_init_state(struct xfrm_state *x) goto out; err = -ENOMEM; - ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL); + ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); if (!ipcd) goto out; - memset(ipcd, 0, sizeof(*ipcd)); x->props.header_len = 0; if (x->props.mode) x->props.header_len += sizeof(struct iphdr); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 3291d5192aad..76ab50b0d6ef 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -487,7 +487,6 @@ static int ipip_rcv(struct sk_buff *skb) skb->mac.raw = skb->nh.raw; skb->nh.raw = skb->data; - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); skb->protocol = htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index ba33f8621c67..85893eef6b16 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1461,7 +1461,6 @@ int pim_rcv_v1(struct sk_buff * skb) skb_pull(skb, (u8*)encap - skb->data); skb->nh.iph = (struct iphdr *)skb->data; skb->dev = reg_dev; - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); skb->protocol = htons(ETH_P_IP); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; @@ -1517,7 +1516,6 @@ static int pim_rcv(struct sk_buff * skb) skb_pull(skb, (u8*)encap - skb->data); skb->nh.iph = (struct iphdr *)skb->data; skb->dev = reg_dev; - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); skb->protocol = htons(ETH_P_IP); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; @@ -1580,6 +1578,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); if (cache==NULL) { + struct sk_buff *skb2; struct net_device *dev; int vif; @@ -1593,12 +1592,18 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) read_unlock(&mrt_lock); return -ENODEV; } - skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); - skb->nh.iph->ihl = sizeof(struct iphdr)>>2; - skb->nh.iph->saddr = rt->rt_src; - skb->nh.iph->daddr = rt->rt_dst; - skb->nh.iph->version = 0; - err = ipmr_cache_unresolved(vif, skb); + skb2 = skb_clone(skb, GFP_ATOMIC); + if (!skb2) { + read_unlock(&mrt_lock); + return -ENOMEM; + } + + skb2->nh.raw = skb_push(skb2, sizeof(struct iphdr)); + skb2->nh.iph->ihl = sizeof(struct iphdr)>>2; + skb2->nh.iph->saddr = rt->rt_src; + skb2->nh.iph->daddr = rt->rt_dst; + skb2->nh.iph->version = 0; + err = ipmr_cache_unresolved(vif, skb2); read_unlock(&mrt_lock); return err; } diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index f28ec6882162..6a28fafe910c 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -735,12 +735,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, if (atype != RTN_LOCAL && atype != RTN_UNICAST) return -EINVAL; - dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); + dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); if (dest == NULL) { IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n"); return -ENOMEM; } - memset(dest, 0, sizeof(struct ip_vs_dest)); dest->protocol = svc->protocol; dest->vaddr = svc->addr; @@ -1050,14 +1049,12 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) goto out_mod_dec; } - svc = (struct ip_vs_service *) - kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); + svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); if (svc == NULL) { IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); ret = -ENOMEM; goto out_err; } - memset(svc, 0, sizeof(struct ip_vs_service)); /* I'm the first user of the service */ atomic_set(&svc->usecnt, 1); @@ -1797,7 +1794,7 @@ static int ip_vs_info_open(struct inode *inode, struct file *file) { struct seq_file *seq; int rc = -ENOMEM; - struct ip_vs_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct ip_vs_iter *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; @@ -1808,7 +1805,6 @@ static int ip_vs_info_open(struct inode *inode, struct file *file) seq = file->private_data; seq->private = s; - memset(s, 0, sizeof(*s)); out: return rc; out_kfree: diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c index 4c1940381ba0..7d68b80c4c19 100644 --- a/net/ipv4/ipvs/ip_vs_est.c +++ b/net/ipv4/ipvs/ip_vs_est.c @@ -123,11 +123,10 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats) { struct ip_vs_estimator *est; - est = kmalloc(sizeof(*est), GFP_KERNEL); + est = kzalloc(sizeof(*est), GFP_KERNEL); if (est == NULL) return -ENOMEM; - memset(est, 0, sizeof(*est)); est->stats = stats; est->last_conns = stats->conns; est->cps = stats->cps<<10; diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c index af35235672d5..9a39e2969712 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c @@ -1200,7 +1200,7 @@ static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct, tuple.dst.protonum = IPPROTO_TCP; exp = __ip_conntrack_expect_find(&tuple); - if (exp->master == ct) + if (exp && exp->master == ct) return exp; return NULL; } diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index 7bd3c22003a2..7a9fa04a467a 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c @@ -534,6 +534,8 @@ static struct nf_hook_ops ip_conntrack_ops[] = { /* Sysctl support */ +int ip_conntrack_checksum = 1; + #ifdef CONFIG_SYSCTL /* From ip_conntrack_core.c */ @@ -568,8 +570,6 @@ extern unsigned int ip_ct_generic_timeout; static int log_invalid_proto_min = 0; static int log_invalid_proto_max = 255; -int ip_conntrack_checksum = 1; - static struct ctl_table_header *ip_ct_sysctl_header; static ctl_table ip_ct_sysctl_table[] = { diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c index 0b1b416759cc..18b7fbdccb61 100644 --- a/net/ipv4/netfilter/ip_nat_snmp_basic.c +++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c @@ -1255,9 +1255,9 @@ static int help(struct sk_buff **pskb, struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); /* SNMP replies and originating SNMP traps get mangled */ - if (udph->source == ntohs(SNMP_PORT) && dir != IP_CT_DIR_REPLY) + if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY) return NF_ACCEPT; - if (udph->dest == ntohs(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) + if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) return NF_ACCEPT; /* No NAT? */ diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index cbffeae3f565..d994c5f5744c 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -172,11 +172,10 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip, struct clusterip_config *c; char buffer[16]; - c = kmalloc(sizeof(*c), GFP_ATOMIC); + c = kzalloc(sizeof(*c), GFP_ATOMIC); if (!c) return NULL; - memset(c, 0, sizeof(*c)); c->dev = dev; c->clusterip = ip; memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bd221ec3f81e..62b2762a2420 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -609,6 +609,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = skb->nh.iph->saddr; + sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a891133f00e4..f6f39e814291 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1640,10 +1640,9 @@ static int tcp_seq_open(struct inode *inode, struct file *file) if (unlikely(afinfo == NULL)) return -EINVAL; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; - memset(s, 0, sizeof(*s)); s->family = afinfo->family; s->seq_ops.start = tcp_seq_start; s->seq_ops.next = tcp_seq_next; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9bfcddad695b..f136cec96d95 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1468,11 +1468,10 @@ static int udp_seq_open(struct inode *inode, struct file *file) struct udp_seq_afinfo *afinfo = PDE(inode)->data; struct seq_file *seq; int rc = -ENOMEM; - struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; - memset(s, 0, sizeof(*s)); s->family = afinfo->family; s->seq_ops.start = udp_seq_start; s->seq_ops.next = udp_seq_next; diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index f8d880beb12f..13cafbe56ce3 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -92,7 +92,6 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) skb->mac.raw = memmove(skb->data - skb->mac_len, skb->mac.raw, skb->mac_len); skb->nh.raw = skb->data; - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); err = 0; out: diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index df8f051c0fce..25c2a9e03895 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -71,6 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt goto out; } + memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); + /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index bc77c0e1a943..84d7ebdb9d21 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -567,10 +567,9 @@ static inline struct ipv6_txoptions *create_tel(__u8 encap_limit) int opt_len = sizeof(*opt) + 8; - if (!(opt = kmalloc(opt_len, GFP_ATOMIC))) { + if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) { return NULL; } - memset(opt, 0, opt_len); opt->tot_len = opt_len; opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1); opt->opt_nflen = 8; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index fa1ce0ae123e..d57e61ce4a7d 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -411,6 +411,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, /* Copy the address. */ if (sin6) { sin6->sin6_family = AF_INET6; + sin6->sin6_port = 0; ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index c56aeece2bf5..836eecd7e62b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -380,7 +380,6 @@ static int ipip6_rcv(struct sk_buff *skb) secpath_reset(skb); skb->mac.raw = skb->nh.raw; skb->nh.raw = skb->data; - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 6b44fe8516c3..c8f9369c2a87 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -31,27 +31,6 @@ #include <linux/icmpv6.h> #include <linux/mutex.h> -#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG -# define X6TDEBUG 3 -#else -# define X6TDEBUG 1 -#endif - -#define X6TPRINTK(fmt, args...) printk(fmt, ## args) -#define X6TNOPRINTK(fmt, args...) do { ; } while(0) - -#if X6TDEBUG >= 1 -# define X6TPRINTK1 X6TPRINTK -#else -# define X6TPRINTK1 X6TNOPRINTK -#endif - -#if X6TDEBUG >= 3 -# define X6TPRINTK3 X6TPRINTK -#else -# define X6TPRINTK3 X6TNOPRINTK -#endif - /* * xfrm_tunnel_spi things are for allocating unique id ("spi") * per xfrm_address_t. @@ -62,15 +41,8 @@ struct xfrm6_tunnel_spi { xfrm_address_t addr; u32 spi; atomic_t refcnt; -#ifdef XFRM6_TUNNEL_SPI_MAGIC - u32 magic; -#endif }; -#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG -# define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef -#endif - static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); static u32 xfrm6_tunnel_spi; @@ -86,43 +58,15 @@ static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly; static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; -#ifdef XFRM6_TUNNEL_SPI_MAGIC -static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi, - const char *name) -{ - if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) { - X6TPRINTK3(KERN_DEBUG "%s(): x6spi object " - "at %p has corrupted magic %08x " - "(should be %08x)\n", - name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC); - return -1; - } - return 0; -} -#else -static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi, - const char *name) -{ - return 0; -} -#endif - -#define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__) - - static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) { unsigned h; - X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr); - h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]; h ^= h >> 16; h ^= h >> 8; h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; - X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h); - return h; } @@ -136,19 +80,13 @@ static int xfrm6_tunnel_spi_init(void) { int i; - X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); - xfrm6_tunnel_spi = 0; xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", sizeof(struct xfrm6_tunnel_spi), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); - if (!xfrm6_tunnel_spi_kmem) { - X6TPRINTK1(KERN_ERR - "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n", - __FUNCTION__); + if (!xfrm6_tunnel_spi_kmem) return -ENOMEM; - } for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]); @@ -161,22 +99,16 @@ static void xfrm6_tunnel_spi_fini(void) { int i; - X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); - for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) { if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i])) - goto err; + return; } for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) { if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) - goto err; + return; } kmem_cache_destroy(xfrm6_tunnel_spi_kmem); xfrm6_tunnel_spi_kmem = NULL; - return; -err: - X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__); - return; } static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) @@ -184,19 +116,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) struct xfrm6_tunnel_spi *x6spi; struct hlist_node *pos; - X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); - hlist_for_each_entry(x6spi, pos, &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], list_byaddr) { - if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { - X6SPI_CHECK_MAGIC(x6spi); - X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi); + if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) return x6spi; - } } - X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__); return NULL; } @@ -205,8 +131,6 @@ u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) struct xfrm6_tunnel_spi *x6spi; u32 spi; - X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); - read_lock_bh(&xfrm6_tunnel_spi_lock); x6spi = __xfrm6_tunnel_spi_lookup(saddr); spi = x6spi ? x6spi->spi : 0; @@ -223,8 +147,6 @@ static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) struct hlist_node *pos; unsigned index; - X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); - if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; @@ -258,18 +180,10 @@ try_next_2:; spi = 0; goto out; alloc_spi: - X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " NIP6_FMT "\n", - __FUNCTION__, - NIP6(*(struct in6_addr *)saddr)); x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); - if (!x6spi) { - X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n", - __FUNCTION__); + if (!x6spi) goto out; - } -#ifdef XFRM6_TUNNEL_SPI_MAGIC - x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC; -#endif + memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); x6spi->spi = spi; atomic_set(&x6spi->refcnt, 1); @@ -278,9 +192,7 @@ alloc_spi: index = xfrm6_tunnel_spi_hash_byaddr(saddr); hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); - X6SPI_CHECK_MAGIC(x6spi); out: - X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi); return spi; } @@ -289,8 +201,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) struct xfrm6_tunnel_spi *x6spi; u32 spi; - X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); - write_lock_bh(&xfrm6_tunnel_spi_lock); x6spi = __xfrm6_tunnel_spi_lookup(saddr); if (x6spi) { @@ -300,8 +210,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) spi = __xfrm6_tunnel_alloc_spi(saddr); write_unlock_bh(&xfrm6_tunnel_spi_lock); - X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi); - return spi; } @@ -312,8 +220,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) struct xfrm6_tunnel_spi *x6spi; struct hlist_node *pos, *n; - X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); - write_lock_bh(&xfrm6_tunnel_spi_lock); hlist_for_each_entry_safe(x6spi, pos, n, @@ -321,12 +227,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) list_byaddr) { if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { - X6TPRINTK3(KERN_DEBUG "%s(): x6spi object for " NIP6_FMT - " found at %p\n", - __FUNCTION__, - NIP6(*(struct in6_addr *)saddr), - x6spi); - X6SPI_CHECK_MAGIC(x6spi); if (atomic_dec_and_test(&x6spi->refcnt)) { hlist_del(&x6spi->list_byaddr); hlist_del(&x6spi->list_byspi); @@ -377,20 +277,14 @@ static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, case ICMPV6_ADDR_UNREACH: case ICMPV6_PORT_UNREACH: default: - X6TPRINTK3(KERN_DEBUG - "xfrm6_tunnel: Destination Unreach.\n"); break; } break; case ICMPV6_PKT_TOOBIG: - X6TPRINTK3(KERN_DEBUG - "xfrm6_tunnel: Packet Too Big.\n"); break; case ICMPV6_TIME_EXCEED: switch (code) { case ICMPV6_EXC_HOPLIMIT: - X6TPRINTK3(KERN_DEBUG - "xfrm6_tunnel: Too small Hoplimit.\n"); break; case ICMPV6_EXC_FRAGTIME: default: @@ -447,22 +341,14 @@ static struct xfrm6_tunnel xfrm6_tunnel_handler = { static int __init xfrm6_tunnel_init(void) { - X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); - - if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) { - X6TPRINTK1(KERN_ERR - "xfrm6_tunnel init: can't add xfrm type\n"); + if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) return -EAGAIN; - } + if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) { - X6TPRINTK1(KERN_ERR - "xfrm6_tunnel init(): can't add handler\n"); xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); return -EAGAIN; } if (xfrm6_tunnel_spi_init() < 0) { - X6TPRINTK1(KERN_ERR - "xfrm6_tunnel init: failed to initialize spi\n"); xfrm6_tunnel_deregister(&xfrm6_tunnel_handler); xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); return -EAGAIN; @@ -472,15 +358,9 @@ static int __init xfrm6_tunnel_init(void) static void __exit xfrm6_tunnel_fini(void) { - X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); - xfrm6_tunnel_spi_fini(); - if (xfrm6_tunnel_deregister(&xfrm6_tunnel_handler)) - X6TPRINTK1(KERN_ERR - "xfrm6_tunnel close: can't remove handler\n"); - if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0) - X6TPRINTK1(KERN_ERR - "xfrm6_tunnel close: can't remove xfrm type\n"); + xfrm6_tunnel_deregister(&xfrm6_tunnel_handler); + xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); } module_init(xfrm6_tunnel_init); diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 7fae48a53bff..17699eeb64d7 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -308,7 +308,7 @@ static void irda_connect_response(struct irda_sock *self) IRDA_ASSERT(self != NULL, return;); - skb = dev_alloc_skb(64); + skb = alloc_skb(64, GFP_ATOMIC); if (skb == NULL) { IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", __FUNCTION__); diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c index 9c4a902a9dba..ad6b6af3dd97 100644 --- a/net/irda/ircomm/ircomm_core.c +++ b/net/irda/ircomm/ircomm_core.c @@ -115,12 +115,10 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line) IRDA_ASSERT(ircomm != NULL, return NULL;); - self = kmalloc(sizeof(struct ircomm_cb), GFP_ATOMIC); + self = kzalloc(sizeof(struct ircomm_cb), GFP_ATOMIC); if (self == NULL) return NULL; - memset(self, 0, sizeof(struct ircomm_cb)); - self->notify = *notify; self->magic = IRCOMM_MAGIC; diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c index d9097207aed3..959874b6451f 100644 --- a/net/irda/ircomm/ircomm_lmp.c +++ b/net/irda/ircomm/ircomm_lmp.c @@ -81,7 +81,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self, /* Any userdata supplied? */ if (userdata == NULL) { - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; @@ -115,7 +115,7 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self, IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); if (!userdata) { - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index 6009bab05091..a39f5735a90b 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c @@ -121,7 +121,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) skb = self->ctrl_skb; if (!skb) { - skb = dev_alloc_skb(256); + skb = alloc_skb(256, GFP_ATOMIC); if (!skb) { spin_unlock_irqrestore(&self->spinlock, flags); return -ENOMEM; diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index b400f27851fc..3bcdb467efc5 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -379,12 +379,11 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) self = hashbin_lock_find(ircomm_tty, line, NULL); if (!self) { /* No, so make new instance */ - self = kmalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); + self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); if (self == NULL) { IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__); return -ENOMEM; } - memset(self, 0, sizeof(struct ircomm_tty_cb)); self->magic = IRCOMM_TTY_MAGIC; self->flow = FLOW_STOP; @@ -759,8 +758,9 @@ static int ircomm_tty_write(struct tty_struct *tty, } } else { /* Prepare a full sized frame */ - skb = dev_alloc_skb(self->max_data_size+ - self->max_header_size); + skb = alloc_skb(self->max_data_size+ + self->max_header_size, + GFP_ATOMIC); if (!skb) { spin_unlock_irqrestore(&self->spinlock, flags); return -ENOBUFS; diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index ba40e5495f58..7e7a31798d8d 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c @@ -401,12 +401,10 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type) } /* Allocate dongle info for this instance */ - dongle = kmalloc(sizeof(dongle_t), GFP_KERNEL); + dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL); if (!dongle) goto out; - memset(dongle, 0, sizeof(dongle_t)); - /* Bind the registration info to this particular instance */ dongle->issue = reg; dongle->dev = dev; diff --git a/net/irda/iriap.c b/net/irda/iriap.c index a0472652a44e..61128aa05b40 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -345,7 +345,7 @@ static void iriap_disconnect_request(struct iriap_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (tx_skb == NULL) { IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n", __FUNCTION__, 64); @@ -396,7 +396,7 @@ int iriap_getvaluebyclass_request(struct iriap_cb *self, attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */ skb_len = self->max_header_size+2+name_len+1+attr_len+4; - tx_skb = dev_alloc_skb(skb_len); + tx_skb = alloc_skb(skb_len, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; @@ -562,7 +562,8 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self, * value. We add 32 bytes because of the 6 bytes for the frame and * max 5 bytes for the value coding. */ - tx_skb = dev_alloc_skb(value->len + self->max_header_size + 32); + tx_skb = alloc_skb(value->len + self->max_header_size + 32, + GFP_ATOMIC); if (!tx_skb) return; @@ -700,7 +701,7 @@ void iriap_send_ack(struct iriap_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return; diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c index a73607450de1..da17395df05a 100644 --- a/net/irda/iriap_event.c +++ b/net/irda/iriap_event.c @@ -365,7 +365,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, switch (event) { case IAP_LM_CONNECT_INDICATION: - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (tx_skb == NULL) { IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); return; diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c index 82e665c79991..a154b1d71c0f 100644 --- a/net/irda/irias_object.c +++ b/net/irda/irias_object.c @@ -82,13 +82,12 @@ struct ias_object *irias_new_object( char *name, int id) IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); - obj = kmalloc(sizeof(struct ias_object), GFP_ATOMIC); + obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); if (obj == NULL) { IRDA_WARNING("%s(), Unable to allocate object!\n", __FUNCTION__); return NULL; } - memset(obj, 0, sizeof( struct ias_object)); obj->magic = IAS_OBJECT_MAGIC; obj->name = strndup(name, IAS_MAX_CLASSNAME); @@ -346,13 +345,12 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value, IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); IRDA_ASSERT(name != NULL, return;); - attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); + attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { IRDA_WARNING("%s: Unable to allocate attribute!\n", __FUNCTION__); return; } - memset(attrib, 0, sizeof( struct ias_attrib)); attrib->magic = IAS_ATTRIB_MAGIC; attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); @@ -382,13 +380,12 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets, IRDA_ASSERT(name != NULL, return;); IRDA_ASSERT(octets != NULL, return;); - attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); + attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { IRDA_WARNING("%s: Unable to allocate attribute!\n", __FUNCTION__); return; } - memset(attrib, 0, sizeof( struct ias_attrib)); attrib->magic = IAS_ATTRIB_MAGIC; attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); @@ -416,13 +413,12 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value, IRDA_ASSERT(name != NULL, return;); IRDA_ASSERT(value != NULL, return;); - attrib = kmalloc(sizeof( struct ias_attrib), GFP_ATOMIC); + attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { IRDA_WARNING("%s: Unable to allocate attribute!\n", __FUNCTION__); return; } - memset(attrib, 0, sizeof( struct ias_attrib)); attrib->magic = IAS_ATTRIB_MAGIC; attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); @@ -443,12 +439,11 @@ struct ias_value *irias_new_integer_value(int integer) { struct ias_value *value; - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); return NULL; } - memset(value, 0, sizeof(struct ias_value)); value->type = IAS_INTEGER; value->len = 4; @@ -469,12 +464,11 @@ struct ias_value *irias_new_string_value(char *string) { struct ias_value *value; - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); return NULL; } - memset( value, 0, sizeof( struct ias_value)); value->type = IAS_STRING; value->charset = CS_ASCII; @@ -495,12 +489,11 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) { struct ias_value *value; - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); return NULL; } - memset(value, 0, sizeof(struct ias_value)); value->type = IAS_OCT_SEQ; /* Check length */ @@ -522,12 +515,11 @@ struct ias_value *irias_new_missing_value(void) { struct ias_value *value; - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); return NULL; } - memset(value, 0, sizeof(struct ias_value)); value->type = IAS_MISSING; value->len = 0; diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index bd659dd545ac..7dd0a2fe1d20 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c @@ -636,7 +636,7 @@ void irlan_get_provider_info(struct irlan_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - skb = dev_alloc_skb(64); + skb = alloc_skb(64, GFP_ATOMIC); if (!skb) return; @@ -668,7 +668,7 @@ void irlan_open_data_channel(struct irlan_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - skb = dev_alloc_skb(64); + skb = alloc_skb(64, GFP_ATOMIC); if (!skb) return; @@ -704,7 +704,7 @@ void irlan_close_data_channel(struct irlan_cb *self) if (self->client.tsap_ctrl == NULL) return; - skb = dev_alloc_skb(64); + skb = alloc_skb(64, GFP_ATOMIC); if (!skb) return; @@ -739,7 +739,7 @@ static void irlan_open_unicast_addr(struct irlan_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - skb = dev_alloc_skb(128); + skb = alloc_skb(128, GFP_ATOMIC); if (!skb) return; @@ -777,7 +777,7 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - skb = dev_alloc_skb(128); + skb = alloc_skb(128, GFP_ATOMIC); if (!skb) return; @@ -816,7 +816,7 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - skb = dev_alloc_skb(128); + skb = alloc_skb(128, GFP_ATOMIC); if (!skb) return; @@ -856,7 +856,7 @@ static void irlan_get_unicast_addr(struct irlan_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - skb = dev_alloc_skb(128); + skb = alloc_skb(128, GFP_ATOMIC); if (!skb) return; @@ -891,7 +891,7 @@ void irlan_get_media_char(struct irlan_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - skb = dev_alloc_skb(64); + skb = alloc_skb(64, GFP_ATOMIC); if (!skb) return; diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c index 39c202d1c374..9c0df86044d7 100644 --- a/net/irda/irlan/irlan_provider.c +++ b/net/irda/irlan/irlan_provider.c @@ -296,7 +296,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - skb = dev_alloc_skb(128); + skb = alloc_skb(128, GFP_ATOMIC); if (!skb) return; diff --git a/net/irda/irlap.c b/net/irda/irlap.c index cade355ac8af..e7852a07495e 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -116,11 +116,10 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos, IRDA_DEBUG(4, "%s()\n", __FUNCTION__); /* Initialize the irlap structure. */ - self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL); + self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); if (self == NULL) return NULL; - memset(self, 0, sizeof(struct irlap_cb)); self->magic = LAP_MAGIC; /* Make a binding between the layers */ @@ -882,7 +881,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now) /* Change speed now, or just piggyback speed on frames */ if (now) { /* Send down empty frame to trigger speed change */ - skb = dev_alloc_skb(0); + skb = alloc_skb(0, GFP_ATOMIC); if (skb) irlap_queue_xmit(self, skb); } @@ -1222,7 +1221,7 @@ static int irlap_seq_open(struct inode *inode, struct file *file) { struct seq_file *seq; int rc = -ENOMEM; - struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; @@ -1238,7 +1237,6 @@ static int irlap_seq_open(struct inode *inode, struct file *file) seq = file->private_data; seq->private = s; - memset(s, 0, sizeof(*s)); out: return rc; out_kfree: diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 3e9a06abbdd0..ccb983bf0f4a 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -117,7 +117,7 @@ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos) IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Allocate frame */ - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return; @@ -210,7 +210,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos) IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Allocate frame */ - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return; @@ -250,7 +250,7 @@ void irlap_send_dm_frame( struct irlap_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); - tx_skb = dev_alloc_skb(32); + tx_skb = alloc_skb(32, GFP_ATOMIC); if (!tx_skb) return; @@ -282,7 +282,7 @@ void irlap_send_disc_frame(struct irlap_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); - tx_skb = dev_alloc_skb(16); + tx_skb = alloc_skb(16, GFP_ATOMIC); if (!tx_skb) return; @@ -315,7 +315,7 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s, IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return; @@ -422,11 +422,10 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, return; } - if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { + if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__); return; } - memset(discovery, 0, sizeof(discovery_t)); discovery->data.daddr = info->daddr; discovery->data.saddr = self->saddr; @@ -576,7 +575,7 @@ void irlap_send_rr_frame(struct irlap_cb *self, int command) struct sk_buff *tx_skb; __u8 *frame; - tx_skb = dev_alloc_skb(16); + tx_skb = alloc_skb(16, GFP_ATOMIC); if (!tx_skb) return; @@ -601,7 +600,7 @@ void irlap_send_rd_frame(struct irlap_cb *self) struct sk_buff *tx_skb; __u8 *frame; - tx_skb = dev_alloc_skb(16); + tx_skb = alloc_skb(16, GFP_ATOMIC); if (!tx_skb) return; @@ -1215,7 +1214,7 @@ void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr, struct test_frame *frame; __u8 *info; - tx_skb = dev_alloc_skb(cmd->len+sizeof(struct test_frame)); + tx_skb = alloc_skb(cmd->len+sizeof(struct test_frame), GFP_ATOMIC); if (!tx_skb) return; diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 129ad64c15bb..c440913dee14 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -78,10 +78,9 @@ int __init irlmp_init(void) { IRDA_DEBUG(1, "%s()\n", __FUNCTION__); /* Initialize the irlmp structure. */ - irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL); + irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); if (irlmp == NULL) return -ENOMEM; - memset(irlmp, 0, sizeof(struct irlmp_cb)); irlmp->magic = LMP_MAGIC; @@ -160,12 +159,11 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid) return NULL; /* Allocate new instance of a LSAP connection */ - self = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC); + self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC); if (self == NULL) { IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__); return NULL; } - memset(self, 0, sizeof(struct lsap_cb)); self->magic = LMP_LSAP_MAGIC; self->slsap_sel = slsap_sel; @@ -288,12 +286,11 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify) /* * Allocate new instance of a LSAP connection */ - lap = kmalloc(sizeof(struct lap_cb), GFP_KERNEL); + lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL); if (lap == NULL) { IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__); return; } - memset(lap, 0, sizeof(struct lap_cb)); lap->irlap = irlap; lap->magic = LMP_LAP_MAGIC; @@ -395,7 +392,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, /* Any userdata? */ if (tx_skb == NULL) { - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index e53bf9e0053e..a1e502ff9070 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -476,11 +476,10 @@ dev_irnet_open(struct inode * inode, #endif /* SECURE_DEVIRNET */ /* Allocate a private structure for this IrNET instance */ - ap = kmalloc(sizeof(*ap), GFP_KERNEL); + ap = kzalloc(sizeof(*ap), GFP_KERNEL); DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); /* initialize the irnet structure */ - memset(ap, 0, sizeof(*ap)); ap->file = file; /* PPP channel setup */ diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 49c51c5f1a86..42acf1cde737 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -85,10 +85,9 @@ static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 }; */ int __init irttp_init(void) { - irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL); + irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL); if (irttp == NULL) return -ENOMEM; - memset(irttp, 0, sizeof(struct irttp_cb)); irttp->magic = TTP_MAGIC; @@ -306,7 +305,8 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__); /* Make new segment */ - frag = dev_alloc_skb(self->max_seg_size+self->max_header_size); + frag = alloc_skb(self->max_seg_size+self->max_header_size, + GFP_ATOMIC); if (!frag) return; @@ -389,12 +389,11 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) return NULL; } - self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); + self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC); if (self == NULL) { IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); return NULL; } - memset(self, 0, sizeof(struct tsap_cb)); spin_lock_init(&self->lock); /* Initialise todo timer */ @@ -805,7 +804,7 @@ static inline void irttp_give_credit(struct tsap_cb *self) self->send_credit, self->avail_credit, self->remote_credit); /* Give credit to peer */ - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return; @@ -1094,7 +1093,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, /* Any userdata supplied? */ if (userdata == NULL) { - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; @@ -1342,7 +1341,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, /* Any userdata supplied? */ if (userdata == NULL) { - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; @@ -1541,7 +1540,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, if (!userdata) { struct sk_buff *tx_skb; - tx_skb = dev_alloc_skb(64); + tx_skb = alloc_skb(64, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; @@ -1876,7 +1875,7 @@ static int irttp_seq_open(struct inode *inode, struct file *file) int rc = -ENOMEM; struct irttp_iter_state *s; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; @@ -1886,7 +1885,6 @@ static int irttp_seq_open(struct inode *inode, struct file *file) seq = file->private_data; seq->private = s; - memset(s, 0, sizeof(*s)); out: return rc; out_kfree: diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index aea6616cea3d..d504eed416f6 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c @@ -115,14 +115,12 @@ static struct lapb_cb *lapb_devtostruct(struct net_device *dev) */ static struct lapb_cb *lapb_create_cb(void) { - struct lapb_cb *lapb = kmalloc(sizeof(*lapb), GFP_ATOMIC); + struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC); if (!lapb) goto out; - memset(lapb, 0x00, sizeof(*lapb)); - skb_queue_head_init(&lapb->write_queue); skb_queue_head_init(&lapb->ack_queue); diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index bd242a49514a..d12413cff5bd 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c @@ -33,10 +33,9 @@ unsigned char llc_station_mac_sa[ETH_ALEN]; */ static struct llc_sap *llc_sap_alloc(void) { - struct llc_sap *sap = kmalloc(sizeof(*sap), GFP_ATOMIC); + struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC); if (sap) { - memset(sap, 0, sizeof(*sap)); sap->state = LLC_SAP_STATE_ACTIVE; memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN); rwlock_init(&sap->sk_list.lock); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 42a178aa30f9..a9894ddfd72a 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -386,8 +386,8 @@ config NETFILTER_XT_MATCH_REALM <file:Documentation/modules.txt>. If unsure, say `N'. config NETFILTER_XT_MATCH_SCTP - tristate '"sctp" protocol match support' - depends on NETFILTER_XTABLES + tristate '"sctp" protocol match support (EXPERIMENTAL)' + depends on NETFILTER_XTABLES && EXPERIMENTAL help With this option enabled, you will be able to use the `sctp' match in order to match on SCTP source/destination ports diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 5fcab2ef231f..4ef836699962 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -428,6 +428,8 @@ static struct file_operations ct_cpu_seq_fops = { /* Sysctl support */ +int nf_conntrack_checksum = 1; + #ifdef CONFIG_SYSCTL /* From nf_conntrack_core.c */ @@ -459,8 +461,6 @@ extern unsigned int nf_ct_generic_timeout; static int log_invalid_proto_min = 0; static int log_invalid_proto_max = 255; -int nf_conntrack_checksum = 1; - static struct ctl_table_header *nf_ct_sysctl_header; static ctl_table nf_ct_sysctl_table[] = { diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index bb6fcee452ca..662a869593bf 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -219,21 +219,20 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info, switch (verdict & NF_VERDICT_MASK) { case NF_ACCEPT: + case NF_STOP: info->okfn(skb); + case NF_STOLEN: break; - case NF_QUEUE: if (!nf_queue(&skb, elem, info->pf, info->hook, info->indev, info->outdev, info->okfn, verdict >> NF_VERDICT_BITS)) goto next_hook; break; + default: + kfree_skb(skb); } rcu_read_unlock(); - - if (verdict == NF_DROP) - kfree_skb(skb); - kfree(info); return; } diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index 5fe4c9df17f5..a9f4f6f3c628 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c @@ -113,6 +113,21 @@ checkentry(const char *tablename, if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || info->bitmask & ~XT_PHYSDEV_OP_MASK) return 0; + if (brnf_deferred_hooks == 0 && + info->bitmask & XT_PHYSDEV_OP_OUT && + (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || + info->invert & XT_PHYSDEV_OP_BRIDGED) && + hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) | + (1 << NF_IP_POST_ROUTING))) { + printk(KERN_WARNING "physdev match: using --physdev-out in the " + "OUTPUT, FORWARD and POSTROUTING chains for non-bridged " + "traffic is deprecated and breaks other things, it will " + "be removed in January 2007. See Documentation/" + "feature-removal-schedule.txt for details. This doesn't " + "affect you in case you're using it for purely bridged " + "traffic.\n"); + brnf_deferred_hooks = 1; + } return 1; } diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c index 3ac703b5cb8f..d2f5320a80bf 100644 --- a/net/netfilter/xt_pkttype.c +++ b/net/netfilter/xt_pkttype.c @@ -9,6 +9,8 @@ #include <linux/skbuff.h> #include <linux/if_ether.h> #include <linux/if_packet.h> +#include <linux/in.h> +#include <linux/ip.h> #include <linux/netfilter/xt_pkttype.h> #include <linux/netfilter/x_tables.h> @@ -28,9 +30,17 @@ static int match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { + u_int8_t type; const struct xt_pkttype_info *info = matchinfo; - return (skb->pkt_type == info->pkttype) ^ info->invert; + if (skb->pkt_type == PACKET_LOOPBACK) + type = (MULTICAST(skb->nh.iph->daddr) + ? PACKET_MULTICAST + : PACKET_BROADCAST); + else + type = skb->pkt_type; + + return (type == info->pkttype) ^ info->invert; } static struct xt_match pkttype_match = { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 55c0adc8f115..b85c1f9f1288 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -562,10 +562,9 @@ static int netlink_alloc_groups(struct sock *sk) if (err) return err; - nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL); + nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL); if (nlk->groups == NULL) return -ENOMEM; - memset(nlk->groups, 0, NLGRPSZ(groups)); nlk->ngroups = groups; return 0; } @@ -1393,11 +1392,10 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, struct sock *sk; struct netlink_sock *nlk; - cb = kmalloc(sizeof(*cb), GFP_KERNEL); + cb = kzalloc(sizeof(*cb), GFP_KERNEL); if (cb == NULL) return -ENOBUFS; - memset(cb, 0, sizeof(*cb)); cb->dump = dump; cb->done = done; cb->nlh = nlh; @@ -1668,7 +1666,7 @@ static int netlink_seq_open(struct inode *inode, struct file *file) struct nl_seq_iter *iter; int err; - iter = kmalloc(sizeof(*iter), GFP_KERNEL); + iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return -ENOMEM; @@ -1678,7 +1676,6 @@ static int netlink_seq_open(struct inode *inode, struct file *file) return err; } - memset(iter, 0, sizeof(*iter)); seq = file->private_data; seq->private = iter; return 0; @@ -1747,15 +1744,13 @@ static int __init netlink_proto_init(void) if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) netlink_skb_parms_too_large(); - nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL); + nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); if (!nl_table) { enomem: printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n"); return -ENOMEM; } - memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS); - if (num_physpages >= (128 * 1024)) max = num_physpages >> (21 - PAGE_SHIFT); else diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c index 573b572f8f91..93d2c55ad2d5 100644 --- a/net/rxrpc/connection.c +++ b/net/rxrpc/connection.c @@ -58,13 +58,12 @@ static inline int __rxrpc_create_connection(struct rxrpc_peer *peer, _enter("%p",peer); /* allocate and initialise a connection record */ - conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); + conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); if (!conn) { _leave(" = -ENOMEM"); return -ENOMEM; } - memset(conn, 0, sizeof(struct rxrpc_connection)); atomic_set(&conn->usage, 1); INIT_LIST_HEAD(&conn->link); @@ -535,13 +534,12 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn, return -EINVAL; } - msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags); + msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags); if (!msg) { _leave(" = -ENOMEM"); return -ENOMEM; } - memset(msg, 0, sizeof(*msg)); atomic_set(&msg->usage, 1); INIT_LIST_HEAD(&msg->link); diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c index ed38f5b17c1b..8a275157a3bb 100644 --- a/net/rxrpc/peer.c +++ b/net/rxrpc/peer.c @@ -58,13 +58,12 @@ static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr, _enter("%p,%08x", trans, ntohl(addr)); /* allocate and initialise a peer record */ - peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); + peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); if (!peer) { _leave(" = -ENOMEM"); return -ENOMEM; } - memset(peer, 0, sizeof(struct rxrpc_peer)); atomic_set(&peer->usage, 1); INIT_LIST_HEAD(&peer->link); diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c index dbe6105e83a5..465efc86fccf 100644 --- a/net/rxrpc/transport.c +++ b/net/rxrpc/transport.c @@ -68,11 +68,10 @@ int rxrpc_create_transport(unsigned short port, _enter("%hu", port); - trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); + trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); if (!trans) return -ENOMEM; - memset(trans, 0, sizeof(struct rxrpc_transport)); atomic_set(&trans->usage, 1); INIT_LIST_HEAD(&trans->services); INIT_LIST_HEAD(&trans->link); @@ -312,13 +311,12 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans, _enter(""); - msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL); + msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL); if (!msg) { _leave(" = -ENOMEM"); return -ENOMEM; } - memset(msg, 0, sizeof(*msg)); atomic_set(&msg->usage, 1); list_add_tail(&msg->link,msgq); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 9affeeedf107..a2587b52e531 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -312,10 +312,9 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, } *err = -ENOMEM; - a = kmalloc(sizeof(*a), GFP_KERNEL); + a = kzalloc(sizeof(*a), GFP_KERNEL); if (a == NULL) goto err_mod; - memset(a, 0, sizeof(*a)); /* backward compatibility for policer */ if (name == NULL) @@ -492,10 +491,9 @@ tcf_action_get_1(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int *err) index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]); *err = -ENOMEM; - a = kmalloc(sizeof(struct tc_action), GFP_KERNEL); + a = kzalloc(sizeof(struct tc_action), GFP_KERNEL); if (a == NULL) return NULL; - memset(a, 0, sizeof(struct tc_action)); *err = -EINVAL; a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]); @@ -531,12 +529,11 @@ static struct tc_action *create_a(int i) { struct tc_action *act; - act = kmalloc(sizeof(*act), GFP_KERNEL); + act = kzalloc(sizeof(*act), GFP_KERNEL); if (act == NULL) { printk("create_a: failed to alloc!\n"); return NULL; } - memset(act, 0, sizeof(*act)); act->order = i; return act; } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 58b3a8652042..f257475e0e0c 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -209,10 +209,9 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref) s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key); /* netlink spinlocks held above us - must use ATOMIC */ - opt = kmalloc(s, GFP_ATOMIC); + opt = kzalloc(s, GFP_ATOMIC); if (opt == NULL) return -ENOBUFS; - memset(opt, 0, s); memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key)); opt->index = p->index; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 47e00bd9625e..da905d7b4b40 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -196,10 +196,9 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, return ret; } - p = kmalloc(sizeof(*p), GFP_KERNEL); + p = kzalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) return -ENOMEM; - memset(p, 0, sizeof(*p)); ret = ACT_P_CREATED; p->refcnt = 1; @@ -429,11 +428,10 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) return p; } - p = kmalloc(sizeof(*p), GFP_KERNEL); + p = kzalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) return NULL; - memset(p, 0, sizeof(*p)); p->refcnt = 1; spin_lock_init(&p->lock); p->stats_lock = &p->lock; diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 61507f006b11..86cac49a0531 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -178,19 +178,17 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle, err = -ENOBUFS; if (head == NULL) { - head = kmalloc(sizeof(*head), GFP_KERNEL); + head = kzalloc(sizeof(*head), GFP_KERNEL); if (head == NULL) goto errout; - memset(head, 0, sizeof(*head)); INIT_LIST_HEAD(&head->flist); tp->root = head; } - f = kmalloc(sizeof(*f), GFP_KERNEL); + f = kzalloc(sizeof(*f), GFP_KERNEL); if (f == NULL) goto errout; - memset(f, 0, sizeof(*f)); err = -EINVAL; if (handle) diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index d41de91fc4f6..e6973d9b686d 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -267,20 +267,18 @@ static int fw_change(struct tcf_proto *tp, unsigned long base, return -EINVAL; if (head == NULL) { - head = kmalloc(sizeof(struct fw_head), GFP_KERNEL); + head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); if (head == NULL) return -ENOBUFS; - memset(head, 0, sizeof(*head)); tcf_tree_lock(tp); tp->root = head; tcf_tree_unlock(tp); } - f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL); + f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); if (f == NULL) return -ENOBUFS; - memset(f, 0, sizeof(*f)); f->id = handle; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index c2e71900f7bd..d3aea730d4c8 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -396,10 +396,9 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base, h1 = to_hash(nhandle); if ((b = head->table[h1]) == NULL) { err = -ENOBUFS; - b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL); + b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); if (b == NULL) goto errout; - memset(b, 0, sizeof(*b)); tcf_tree_lock(tp); head->table[h1] = b; @@ -475,20 +474,18 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, err = -ENOBUFS; if (head == NULL) { - head = kmalloc(sizeof(struct route4_head), GFP_KERNEL); + head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); if (head == NULL) goto errout; - memset(head, 0, sizeof(struct route4_head)); tcf_tree_lock(tp); tp->root = head; tcf_tree_unlock(tp); } - f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL); + f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); if (f == NULL) goto errout; - memset(f, 0, sizeof(*f)); err = route4_set_parms(tp, base, f, handle, head, tb, tca[TCA_RATE-1], 1); diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index ba8741971629..6e230ecfba05 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -240,9 +240,8 @@ static int rsvp_init(struct tcf_proto *tp) { struct rsvp_head *data; - data = kmalloc(sizeof(struct rsvp_head), GFP_KERNEL); + data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL); if (data) { - memset(data, 0, sizeof(struct rsvp_head)); tp->root = data; return 0; } @@ -446,11 +445,10 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, goto errout2; err = -ENOBUFS; - f = kmalloc(sizeof(struct rsvp_filter), GFP_KERNEL); + f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL); if (f == NULL) goto errout2; - memset(f, 0, sizeof(*f)); h2 = 16; if (tb[TCA_RSVP_SRC-1]) { err = -EINVAL; @@ -532,10 +530,9 @@ insert: /* No session found. Create new one. */ err = -ENOBUFS; - s = kmalloc(sizeof(struct rsvp_session), GFP_KERNEL); + s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL); if (s == NULL) goto errout; - memset(s, 0, sizeof(*s)); memcpy(s->dst, dst, sizeof(s->dst)); if (pinfo) { diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 7870e7bb0bac..5af8a59e1503 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -148,11 +148,10 @@ static int tcindex_init(struct tcf_proto *tp) struct tcindex_data *p; DPRINTK("tcindex_init(tp %p)\n",tp); - p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL); + p = kzalloc(sizeof(struct tcindex_data),GFP_KERNEL); if (!p) return -ENOMEM; - memset(p, 0, sizeof(*p)); p->mask = 0xffff; p->hash = DEFAULT_HASH_SIZE; p->fall_through = 1; @@ -296,16 +295,14 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, err = -ENOMEM; if (!cp.perfect && !cp.h) { if (valid_perfect_hash(&cp)) { - cp.perfect = kmalloc(cp.hash * sizeof(*r), GFP_KERNEL); + cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); if (!cp.perfect) goto errout; - memset(cp.perfect, 0, cp.hash * sizeof(*r)); balloc = 1; } else { - cp.h = kmalloc(cp.hash * sizeof(f), GFP_KERNEL); + cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); if (!cp.h) goto errout; - memset(cp.h, 0, cp.hash * sizeof(f)); balloc = 2; } } @@ -316,10 +313,9 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, r = tcindex_lookup(&cp, handle) ? : &new_filter_result; if (r == &new_filter_result) { - f = kmalloc(sizeof(*f), GFP_KERNEL); + f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) goto errout_alloc; - memset(f, 0, sizeof(*f)); } if (tb[TCA_TCINDEX_CLASSID-1]) { diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d712edcd1bcf..eea366966740 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -307,23 +307,21 @@ static int u32_init(struct tcf_proto *tp) if (tp_c->q == tp->q) break; - root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL); + root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); if (root_ht == NULL) return -ENOBUFS; - memset(root_ht, 0, sizeof(*root_ht)); root_ht->divisor = 0; root_ht->refcnt++; root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; root_ht->prio = tp->prio; if (tp_c == NULL) { - tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL); + tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); if (tp_c == NULL) { kfree(root_ht); return -ENOBUFS; } - memset(tp_c, 0, sizeof(*tp_c)); tp_c->q = tp->q; tp_c->next = u32_list; u32_list = tp_c; @@ -571,10 +569,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, if (handle == 0) return -ENOMEM; } - ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); + ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); if (ht == NULL) return -ENOBUFS; - memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*)); ht->tp_c = tp_c; ht->refcnt = 0; ht->divisor = divisor; @@ -617,18 +614,16 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, s = RTA_DATA(tb[TCA_U32_SEL-1]); - n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); + n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); if (n == NULL) return -ENOBUFS; - memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key)); #ifdef CONFIG_CLS_U32_PERF - n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); + n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); if (n->pf == NULL) { kfree(n); return -ENOBUFS; } - memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64)); #endif memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 698372954f4d..61e3b740ab1a 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -773,10 +773,9 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len, TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX) goto errout; - meta = kmalloc(sizeof(*meta), GFP_KERNEL); + meta = kzalloc(sizeof(*meta), GFP_KERNEL); if (meta == NULL) goto errout; - memset(meta, 0, sizeof(*meta)); memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right)); diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 2405a86093a2..0fd0768a17c6 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -321,10 +321,9 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct rtattr *rta, list_len = RTA_PAYLOAD(rt_list); matches_len = tree_hdr->nmatches * sizeof(*em); - tree->matches = kmalloc(matches_len, GFP_KERNEL); + tree->matches = kzalloc(matches_len, GFP_KERNEL); if (tree->matches == NULL) goto errout; - memset(tree->matches, 0, matches_len); /* We do not use rtattr_parse_nested here because the maximum * number of attributes is unknown. This saves us the allocation diff --git a/net/sched/estimator.c b/net/sched/estimator.c index 5d3ae03e22a7..0ebc98e9be2d 100644 --- a/net/sched/estimator.c +++ b/net/sched/estimator.c @@ -139,11 +139,10 @@ int qdisc_new_estimator(struct tc_stats *stats, spinlock_t *stats_lock, struct r if (parm->interval < -2 || parm->interval > 3) return -EINVAL; - est = kmalloc(sizeof(*est), GFP_KERNEL); + est = kzalloc(sizeof(*est), GFP_KERNEL); if (est == NULL) return -ENOBUFS; - memset(est, 0, sizeof(*est)); est->interval = parm->interval + 2; est->stats = stats; est->stats_lock = stats_lock; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 80b7f6a8d008..bac881bfe362 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1926,10 +1926,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t } err = -ENOBUFS; - cl = kmalloc(sizeof(*cl), GFP_KERNEL); + cl = kzalloc(sizeof(*cl), GFP_KERNEL); if (cl == NULL) goto failure; - memset(cl, 0, sizeof(*cl)); cl->R_tab = rtab; rtab = NULL; cl->refcnt = 1; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index d735f51686a1..0834c2ee9174 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -432,10 +432,9 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) size = QDISC_ALIGN(sizeof(*sch)); size += ops->priv_size + (QDISC_ALIGNTO - 1); - p = kmalloc(size, GFP_KERNEL); + p = kzalloc(size, GFP_KERNEL); if (!p) goto errout; - memset(p, 0, size); sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); sch->padded = (char *) sch - (char *) p; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 0cafdd5feb1b..18e81a8ffb01 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -406,10 +406,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, struct gred_sched_data *q; if (table->tab[dp] == NULL) { - table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL); + table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL); if (table->tab[dp] == NULL) return -ENOMEM; - memset(table->tab[dp], 0, sizeof(*q)); } q = table->tab[dp]; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 6b1b4a981e88..6a6735a2ed35 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1123,10 +1123,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (rsc == NULL && fsc == NULL) return -EINVAL; - cl = kmalloc(sizeof(struct hfsc_class), GFP_KERNEL); + cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); if (cl == NULL) return -ENOBUFS; - memset(cl, 0, sizeof(struct hfsc_class)); if (rsc != NULL) hfsc_change_rsc(cl, rsc, 0); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 34afe41fa2f3..880a3394a51f 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -196,7 +196,7 @@ struct htb_class struct qdisc_rate_table *rate; /* rate table of the class itself */ struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ long buffer,cbuffer; /* token bucket depth/rate */ - long mbuffer; /* max wait time */ + psched_tdiff_t mbuffer; /* max wait time */ long tokens,ctokens; /* current number of tokens */ psched_time_t t_c; /* checkpoint time */ }; @@ -1559,10 +1559,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, goto failure; } err = -ENOBUFS; - if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL) + if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) goto failure; - memset(cl, 0, sizeof(*cl)); cl->refcnt = 1; INIT_LIST_HEAD(&cl->sibling); INIT_LIST_HEAD(&cl->hlist); @@ -1601,7 +1600,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* set class to be in HTB_CAN_SEND state */ cl->tokens = hopt->buffer; cl->ctokens = hopt->cbuffer; - cl->mbuffer = 60000000; /* 1min */ + cl->mbuffer = PSCHED_JIFFIE2US(HZ*60); /* 1min */ PSCHED_GET_TIME(cl->t_c); cl->cmode = HTB_CAN_SEND; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index c5bd8064e6d8..a08ec4c7c55d 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -148,7 +148,8 @@ static long tabledist(unsigned long mu, long sigma, static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); - struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; + /* We don't fill cb now as skb_unshare() may invalidate it */ + struct netem_skb_cb *cb; struct sk_buff *skb2; int ret; int count = 1; @@ -200,6 +201,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } + cb = (struct netem_skb_cb *)skb->cb; if (q->gap == 0 /* not doing reordering */ || q->counter < q->gap /* inside last reordering gap */ || q->reorder < get_crandom(&q->reorder_cor)) { diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 9d05e13e92f6..27329ce9c311 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -441,7 +441,8 @@ void sctp_assoc_set_primary(struct sctp_association *asoc, /* If the primary path is changing, assume that the * user wants to use this new path. */ - if (transport->state != SCTP_INACTIVE) + if ((transport->state == SCTP_ACTIVE) || + (transport->state == SCTP_UNKNOWN)) asoc->peer.active_path = transport; /* @@ -532,11 +533,11 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, port = addr->v4.sin_port; SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", - " port: %d state:%s\n", + " port: %d state:%d\n", asoc, addr, addr->v4.sin_port, - peer_state == SCTP_UNKNOWN?"UNKNOWN":"ACTIVE"); + peer_state); /* Set the port if it has not been set yet. */ if (0 == asoc->peer.port) @@ -545,9 +546,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, /* Check to see if this is a duplicate. */ peer = sctp_assoc_lookup_paddr(asoc, addr); if (peer) { - if (peer_state == SCTP_ACTIVE && - peer->state == SCTP_UNKNOWN) - peer->state = SCTP_ACTIVE; + if (peer->state == SCTP_UNKNOWN) { + if (peer_state == SCTP_ACTIVE) + peer->state = SCTP_ACTIVE; + if (peer_state == SCTP_UNCONFIRMED) + peer->state = SCTP_UNCONFIRMED; + } return peer; } @@ -739,7 +743,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, list_for_each(pos, &asoc->peer.transport_addr_list) { t = list_entry(pos, struct sctp_transport, transports); - if (t->state == SCTP_INACTIVE) + if ((t->state == SCTP_INACTIVE) || + (t->state == SCTP_UNCONFIRMED)) continue; if (!first || t->last_time_heard > first->last_time_heard) { second = first; @@ -759,7 +764,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, * [If the primary is active but not most recent, bump the most * recently used transport.] */ - if (asoc->peer.primary_path->state != SCTP_INACTIVE && + if (((asoc->peer.primary_path->state == SCTP_ACTIVE) || + (asoc->peer.primary_path->state == SCTP_UNKNOWN)) && first != asoc->peer.primary_path) { second = first; first = asoc->peer.primary_path; @@ -1054,7 +1060,7 @@ void sctp_assoc_update(struct sctp_association *asoc, transports); if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) sctp_assoc_add_peer(asoc, &trans->ipaddr, - GFP_ATOMIC, SCTP_ACTIVE); + GFP_ATOMIC, trans->state); } asoc->ctsn_ack_point = asoc->next_tsn - 1; @@ -1094,7 +1100,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc) /* Try to find an active transport. */ - if (t->state != SCTP_INACTIVE) { + if ((t->state == SCTP_ACTIVE) || + (t->state == SCTP_UNKNOWN)) { break; } else { /* Keep track of the next transport in case diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 2b962627f631..2b9c12a170e5 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -146,7 +146,7 @@ void sctp_bind_addr_free(struct sctp_bind_addr *bp) /* Add an address to the bind address list in the SCTP_bind_addr structure. */ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, - gfp_t gfp) + __u8 use_as_src, gfp_t gfp) { struct sctp_sockaddr_entry *addr; @@ -163,6 +163,8 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, if (!addr->a.v4.sin_port) addr->a.v4.sin_port = bp->port; + addr->use_as_src = use_as_src; + INIT_LIST_HEAD(&addr->list); list_add_tail(&addr->list, &bp->address_list); SCTP_DBG_OBJCNT_INC(addr); @@ -274,7 +276,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, } af->from_addr_param(&addr, rawaddr, port, 0); - retval = sctp_add_bind_addr(bp, &addr, gfp); + retval = sctp_add_bind_addr(bp, &addr, 1, gfp); if (retval) { /* Can't finish building the list, clean up. */ sctp_bind_addr_clean(bp); @@ -367,7 +369,7 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest, (((AF_INET6 == addr->sa.sa_family) && (flags & SCTP_ADDR6_ALLOWED) && (flags & SCTP_ADDR6_PEERSUPP)))) - error = sctp_add_bind_addr(dest, addr, gfp); + error = sctp_add_bind_addr(dest, addr, 1, gfp); } return error; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 67bd53070ee0..ffda1d680529 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -158,6 +158,12 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep, void sctp_endpoint_free(struct sctp_endpoint *ep) { ep->base.dead = 1; + + ep->base.sk->sk_state = SCTP_SS_CLOSED; + + /* Unlink this endpoint, so we can't find it again! */ + sctp_unhash_endpoint(ep); + sctp_endpoint_put(ep); } @@ -166,11 +172,6 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) { SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); - ep->base.sk->sk_state = SCTP_SS_CLOSED; - - /* Unlink this endpoint, so we can't find it again! */ - sctp_unhash_endpoint(ep); - /* Free up the HMAC transform. */ sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 8ef08070c8b6..99c0cefc04e0 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -290,7 +290,8 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc, sctp_read_lock(addr_lock); list_for_each(pos, &bp->address_list) { laddr = list_entry(pos, struct sctp_sockaddr_entry, list); - if ((laddr->a.sa.sa_family == AF_INET6) && + if ((laddr->use_as_src) && + (laddr->a.sa.sa_family == AF_INET6) && (scope <= sctp_scope(&laddr->a))) { bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); if (!baddr || (matchlen < bmatchlen)) { diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index e5faa351aaad..30b710c54e64 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -691,7 +691,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) if (!new_transport) { new_transport = asoc->peer.active_path; - } else if (new_transport->state == SCTP_INACTIVE) { + } else if ((new_transport->state == SCTP_INACTIVE) || + (new_transport->state == SCTP_UNCONFIRMED)) { /* If the chunk is Heartbeat or Heartbeat Ack, * send it to chunk->transport, even if it's * inactive. @@ -848,7 +849,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) */ new_transport = chunk->transport; if (!new_transport || - new_transport->state == SCTP_INACTIVE) + ((new_transport->state == SCTP_INACTIVE) || + (new_transport->state == SCTP_UNCONFIRMED))) new_transport = asoc->peer.active_path; /* Change packets if necessary. */ @@ -1464,7 +1466,8 @@ static void sctp_check_transmitted(struct sctp_outq *q, /* Mark the destination transport address as * active if it is not so marked. */ - if (transport->state == SCTP_INACTIVE) { + if ((transport->state == SCTP_INACTIVE) || + (transport->state == SCTP_UNCONFIRMED)) { sctp_assoc_control_transport( transport->asoc, transport, diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 816c033d7886..1ab03a27a76e 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -240,7 +240,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, (((AF_INET6 == addr->a.sa.sa_family) && (copy_flags & SCTP_ADDR6_ALLOWED) && (copy_flags & SCTP_ADDR6_PEERSUPP)))) { - error = sctp_add_bind_addr(bp, &addr->a, + error = sctp_add_bind_addr(bp, &addr->a, 1, GFP_ATOMIC); if (error) goto end_copy; @@ -486,6 +486,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, list_for_each(pos, &bp->address_list) { laddr = list_entry(pos, struct sctp_sockaddr_entry, list); + if (!laddr->use_as_src) + continue; sctp_v4_dst_saddr(&dst_saddr, dst, bp->port); if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) goto out_unlock; @@ -506,7 +508,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, list_for_each(pos, &bp->address_list) { laddr = list_entry(pos, struct sctp_sockaddr_entry, list); - if (AF_INET == laddr->a.sa.sa_family) { + if ((laddr->use_as_src) && + (AF_INET == laddr->a.sa.sa_family)) { fl.fl4_src = laddr->a.v4.sin_addr.s_addr; if (!ip_route_output_key(&rt, &fl)) { dst = &rt->u.dst; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 2a8773691695..4f11f5858209 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1493,7 +1493,7 @@ no_hmac: /* Also, add the destination address. */ if (list_empty(&retval->base.bind_addr.address_list)) { - sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, + sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1, GFP_ATOMIC); } @@ -2017,7 +2017,7 @@ static int sctp_process_param(struct sctp_association *asoc, af->from_addr_param(&addr, param.addr, asoc->peer.port, 0); scope = sctp_scope(peer_addr); if (sctp_in_scope(&addr, scope)) - if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_ACTIVE)) + if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) return 0; break; @@ -2418,7 +2418,7 @@ static __u16 sctp_process_asconf_param(struct sctp_association *asoc, * Due to Resource Shortage'. */ - peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_ACTIVE); + peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); if (!peer) return SCTP_ERROR_RSRC_LOW; @@ -2565,6 +2565,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, union sctp_addr_param *addr_param; struct list_head *pos; struct sctp_transport *transport; + struct sctp_sockaddr_entry *saddr; int retval = 0; addr_param = (union sctp_addr_param *) @@ -2578,7 +2579,11 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, case SCTP_PARAM_ADD_IP: sctp_local_bh_disable(); sctp_write_lock(&asoc->base.addr_lock); - retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC); + list_for_each(pos, &bp->address_list) { + saddr = list_entry(pos, struct sctp_sockaddr_entry, list); + if (sctp_cmp_addr_exact(&saddr->a, &addr)) + saddr->use_as_src = 1; + } sctp_write_unlock(&asoc->base.addr_lock); sctp_local_bh_enable(); break; @@ -2591,6 +2596,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, list_for_each(pos, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); + dst_release(transport->dst); sctp_transport_route(transport, NULL, sctp_sk(asoc->base.sk)); } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index c5beb2ad7ef7..9c10bdec1afe 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -430,7 +430,11 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, /* The check for association's overall error counter exceeding the * threshold is done in the state function. */ - asoc->overall_error_count++; + /* When probing UNCONFIRMED addresses, the association overall + * error count is NOT incremented + */ + if (transport->state != SCTP_UNCONFIRMED) + asoc->overall_error_count++; if (transport->state != SCTP_INACTIVE && (transport->error_count++ >= transport->pathmaxrxt)) { @@ -610,7 +614,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, /* Mark the destination transport address as active if it is not so * marked. */ - if (t->state == SCTP_INACTIVE) + if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, SCTP_HEARTBEAT_SUCCESS); @@ -620,6 +624,10 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, */ hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); + + /* Update the heartbeat timer. */ + if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) + sctp_transport_hold(t); } /* Helper function to do a transport reset at the expiry of the hearbeat diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 9e58144f4851..ead3f1b0ea3d 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -846,6 +846,7 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); hbinfo.daddr = transport->ipaddr; hbinfo.sent_at = jiffies; + hbinfo.hb_nonce = transport->hb_nonce; /* Send a heartbeat to our peer. */ paylen = sizeof(sctp_sender_hb_info_t); @@ -1048,6 +1049,10 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, return SCTP_DISPOSITION_DISCARD; } + /* Validate the 64-bit random nonce. */ + if (hbinfo->hb_nonce != link->hb_nonce) + return SCTP_DISPOSITION_DISCARD; + max_interval = link->hbinterval + link->rto; /* Check if the timestamp looks valid. */ @@ -5278,7 +5283,6 @@ static int sctp_eat_data(const struct sctp_association *asoc, datalen -= sizeof(sctp_data_chunk_t); deliver = SCTP_CMD_CHUNK_ULP; - chunk->data_accepted = 1; /* Think about partial delivery. */ if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { @@ -5357,6 +5361,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, if (SCTP_CMD_CHUNK_ULP == deliver) sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); + chunk->data_accepted = 1; + /* Note: Some chunks may get overcounted (if we drop) or overcounted * if we renege and the chunk arrives again. */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0a2c71d0d8aa..54722e622e6d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -369,7 +369,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) /* Use GFP_ATOMIC since BHs are disabled. */ addr->v4.sin_port = ntohs(addr->v4.sin_port); - ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC); + ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); addr->v4.sin_port = htons(addr->v4.sin_port); sctp_write_unlock(&ep->base.addr_lock); sctp_local_bh_enable(); @@ -491,6 +491,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk, struct sctp_chunk *chunk; struct sctp_sockaddr_entry *laddr; union sctp_addr *addr; + union sctp_addr saveaddr; void *addr_buf; struct sctp_af *af; struct list_head *pos; @@ -558,14 +559,26 @@ static int sctp_send_asconf_add_ip(struct sock *sk, } retval = sctp_send_asconf(asoc, chunk); + if (retval) + goto out; - /* FIXME: After sending the add address ASCONF chunk, we - * cannot append the address to the association's binding - * address list, because the new address may be used as the - * source of a message sent to the peer before the ASCONF - * chunk is received by the peer. So we should wait until - * ASCONF_ACK is received. + /* Add the new addresses to the bind address list with + * use_as_src set to 0. */ + sctp_local_bh_disable(); + sctp_write_lock(&asoc->base.addr_lock); + addr_buf = addrs; + for (i = 0; i < addrcnt; i++) { + addr = (union sctp_addr *)addr_buf; + af = sctp_get_af_specific(addr->v4.sin_family); + memcpy(&saveaddr, addr, af->sockaddr_len); + saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); + retval = sctp_add_bind_addr(bp, &saveaddr, 0, + GFP_ATOMIC); + addr_buf += af->sockaddr_len; + } + sctp_write_unlock(&asoc->base.addr_lock); + sctp_local_bh_enable(); } out: @@ -676,12 +689,15 @@ static int sctp_send_asconf_del_ip(struct sock *sk, struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; + struct sctp_transport *transport; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; union sctp_addr *laddr; + union sctp_addr saveaddr; void *addr_buf; struct sctp_af *af; - struct list_head *pos; + struct list_head *pos, *pos1; + struct sctp_sockaddr_entry *saddr; int i; int retval = 0; @@ -748,14 +764,42 @@ static int sctp_send_asconf_del_ip(struct sock *sk, goto out; } - retval = sctp_send_asconf(asoc, chunk); + /* Reset use_as_src flag for the addresses in the bind address + * list that are to be deleted. + */ + sctp_local_bh_disable(); + sctp_write_lock(&asoc->base.addr_lock); + addr_buf = addrs; + for (i = 0; i < addrcnt; i++) { + laddr = (union sctp_addr *)addr_buf; + af = sctp_get_af_specific(laddr->v4.sin_family); + memcpy(&saveaddr, laddr, af->sockaddr_len); + saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); + list_for_each(pos1, &bp->address_list) { + saddr = list_entry(pos1, + struct sctp_sockaddr_entry, + list); + if (sctp_cmp_addr_exact(&saddr->a, &saveaddr)) + saddr->use_as_src = 0; + } + addr_buf += af->sockaddr_len; + } + sctp_write_unlock(&asoc->base.addr_lock); + sctp_local_bh_enable(); - /* FIXME: After sending the delete address ASCONF chunk, we - * cannot remove the addresses from the association's bind - * address list, because there maybe some packet send to - * the delete addresses, so we should wait until ASCONF_ACK - * packet is received. + /* Update the route and saddr entries for all the transports + * as some of the addresses in the bind address list are + * about to be deleted and cannot be used as source addresses. */ + list_for_each(pos1, &asoc->peer.transport_addr_list) { + transport = list_entry(pos1, struct sctp_transport, + transports); + dst_release(transport->dst); + sctp_transport_route(transport, NULL, + sctp_sk(asoc->base.sk)); + } + + retval = sctp_send_asconf(asoc, chunk); } out: return retval; @@ -4977,7 +5021,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( /* Caller must hold hashbucket lock for this tb with local BH disabled */ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) { - if (hlist_empty(&pp->owner)) { + if (pp && hlist_empty(&pp->owner)) { if (pp->next) pp->next->pprev = pp->pprev; *(pp->pprev) = pp->next; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 160f62ad1cc5..2763aa93de1a 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -49,6 +49,7 @@ */ #include <linux/types.h> +#include <linux/random.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> @@ -85,7 +86,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, peer->init_sent_count = 0; - peer->state = SCTP_ACTIVE; peer->param_flags = SPP_HB_DISABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; @@ -109,6 +109,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, peer->hb_timer.function = sctp_generate_heartbeat_event; peer->hb_timer.data = (unsigned long)peer; + /* Initialize the 64-bit random nonce sent with heartbeat. */ + get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); + atomic_set(&peer->refcnt, 1); peer->dead = 0; @@ -517,7 +520,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, unsigned long sctp_transport_timeout(struct sctp_transport *t) { unsigned long timeout; - timeout = t->hbinterval + t->rto + sctp_jitter(t->rto); + timeout = t->rto + sctp_jitter(t->rto); + if (t->state != SCTP_UNCONFIRMED) + timeout += t->hbinterval; timeout += jiffies; return timeout; } diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 519ebc17c028..4a9aa9393b97 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -225,9 +225,8 @@ gss_alloc_context(void) { struct gss_cl_ctx *ctx; - ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (ctx != NULL) { - memset(ctx, 0, sizeof(*ctx)); ctx->gc_proc = RPC_GSS_PROC_DATA; ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ spin_lock_init(&ctx->gc_seq_lock); @@ -391,9 +390,8 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid) { struct gss_upcall_msg *gss_msg; - gss_msg = kmalloc(sizeof(*gss_msg), GFP_KERNEL); + gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL); if (gss_msg != NULL) { - memset(gss_msg, 0, sizeof(*gss_msg)); INIT_LIST_HEAD(&gss_msg->list); rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); init_waitqueue_head(&gss_msg->waitqueue); @@ -776,10 +774,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", acred->uid, auth->au_flavor); - if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) + if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) goto out_err; - memset(cred, 0, sizeof(*cred)); atomic_set(&cred->gc_count, 1); cred->gc_uid = acred->uid; /* diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index b8714a87b34c..70e1e53a632b 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -129,9 +129,8 @@ gss_import_sec_context_kerberos(const void *p, const void *end = (const void *)((const char *)p + len); struct krb5_ctx *ctx; - if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) + if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) goto out_err; - memset(ctx, 0, sizeof(*ctx)); p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); if (IS_ERR(p)) diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index d88468d21c37..3db745379d06 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -237,9 +237,8 @@ gss_import_sec_context(const void *input_token, size_t bufsize, struct gss_api_mech *mech, struct gss_ctx **ctx_id) { - if (!(*ctx_id = kmalloc(sizeof(**ctx_id), GFP_KERNEL))) + if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) return GSS_S_FAILURE; - memset(*ctx_id, 0, sizeof(**ctx_id)); (*ctx_id)->mech_type = gss_mech_get(mech); return mech->gm_ops diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 3d0432aa45c1..88dcb52d171b 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -152,9 +152,8 @@ gss_import_sec_context_spkm3(const void *p, size_t len, const void *end = (const void *)((const char *)p + len); struct spkm3_ctx *ctx; - if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) + if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) goto out_err; - memset(ctx, 0, sizeof(*ctx)); p = simple_get_netobj(p, end, &ctx->ctx_id); if (IS_ERR(p)) diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c index af0d7ce74686..854a983ccf26 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_token.c +++ b/net/sunrpc/auth_gss/gss_spkm3_token.c @@ -90,10 +90,9 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits) int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) { - if (!(out->data = kmalloc(explen,GFP_KERNEL))) + if (!(out->data = kzalloc(explen,GFP_KERNEL))) return 0; out->len = explen; - memset(out->data, 0, explen); memcpy(out->data, in, enclen); return 1; } diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index aa8965e9d307..4ba271f892c8 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -125,10 +125,9 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, goto out_err; err = -ENOMEM; - clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); + clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); if (!clnt) goto out_err; - memset(clnt, 0, sizeof(*clnt)); atomic_set(&clnt->cl_users, 0); atomic_set(&clnt->cl_count, 1); clnt->cl_parent = clnt; diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 15c2db26767b..bd98124c3a64 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -114,13 +114,8 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) { */ struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { - unsigned int ops = clnt->cl_maxproc; - size_t size = ops * sizeof(struct rpc_iostats); struct rpc_iostats *new; - - new = kmalloc(size, GFP_KERNEL); - if (new) - memset(new, 0 , size); + new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); return new; } EXPORT_SYMBOL(rpc_alloc_iostats); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 01ba60a49572..b76a227dd3ad 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -32,9 +32,8 @@ svc_create(struct svc_program *prog, unsigned int bufsize) int vers; unsigned int xdrsize; - if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL))) + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) return NULL; - memset(serv, 0, sizeof(*serv)); serv->sv_name = prog->pg_name; serv->sv_program = prog; serv->sv_nrthreads = 1; @@ -159,11 +158,10 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv) struct svc_rqst *rqstp; int error = -ENOMEM; - rqstp = kmalloc(sizeof(*rqstp), GFP_KERNEL); + rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); if (!rqstp) goto out; - memset(rqstp, 0, sizeof(*rqstp)); init_waitqueue_head(&rqstp->rq_wait); if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index a27905a0ad27..d9a95732df46 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1322,11 +1322,10 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock, struct sock *inet; dprintk("svc: svc_setup_socket %p\n", sock); - if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) { + if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { *errp = -ENOMEM; return NULL; } - memset(svsk, 0, sizeof(*svsk)); inet = sock->sk; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 02060d0e7be8..313b68d892c6 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -908,9 +908,8 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc struct rpc_xprt *xprt; struct rpc_rqst *req; - if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) + if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) return ERR_PTR(-ENOMEM); - memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ xprt->addr = *ap; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 21006b109101..ee678ed13b6f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1276,10 +1276,9 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->max_reqs = xprt_udp_slot_table_entries; slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); + xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); if (xprt->slot == NULL) return -ENOMEM; - memset(xprt->slot, 0, slot_table_size); xprt->prot = IPPROTO_UDP; xprt->port = xs_get_random_port(); @@ -1318,10 +1317,9 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->max_reqs = xprt_tcp_slot_table_entries; slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); + xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); if (xprt->slot == NULL) return -ENOMEM; - memset(xprt->slot, 0, slot_table_size); xprt->prot = IPPROTO_TCP; xprt->port = xs_get_random_port(); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 7ef17a449cfd..75a5968c2139 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -665,11 +665,9 @@ int tipc_bearer_init(void) int res; write_lock_bh(&tipc_net_lock); - tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC); - media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC); + tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); + media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); if (tipc_bearers && media_list) { - memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer)); - memset(media_list, 0, MAX_MEDIA * sizeof(struct media)); res = TIPC_OK; } else { kfree(tipc_bearers); diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c index 1dcb6940e338..b46b5188a9fd 100644 --- a/net/tipc/cluster.c +++ b/net/tipc/cluster.c @@ -57,29 +57,25 @@ struct cluster *tipc_cltr_create(u32 addr) struct _zone *z_ptr; struct cluster *c_ptr; int max_nodes; - int alloc; - c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC); + c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC); if (c_ptr == NULL) { warn("Cluster creation failure, no memory\n"); return NULL; } - memset(c_ptr, 0, sizeof(*c_ptr)); c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); if (in_own_cluster(addr)) max_nodes = LOWEST_SLAVE + tipc_max_slaves; else max_nodes = tipc_max_nodes + 1; - alloc = sizeof(void *) * (max_nodes + 1); - c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC); + c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC); if (c_ptr->nodes == NULL) { warn("Cluster creation failure, no memory for node area\n"); kfree(c_ptr); return NULL; } - memset(c_ptr->nodes, 0, alloc); if (in_own_cluster(addr)) tipc_local_nodes = c_ptr->nodes; diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 2b8441203120..ee94de92ae99 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -295,7 +295,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr, { struct link_req *req; - req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC); + req = kmalloc(sizeof(*req), GFP_ATOMIC); if (!req) return NULL; diff --git a/net/tipc/link.c b/net/tipc/link.c index c10e18a49b96..693f02eca6d6 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -417,12 +417,11 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer, struct tipc_msg *msg; char *if_name; - l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC); + l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); if (!l_ptr) { warn("Link creation failed, no memory\n"); return NULL; } - memset(l_ptr, 0, sizeof(*l_ptr)); l_ptr->addr = peer; if_name = strchr(b_ptr->publ.name, ':') + 1; diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index a6926ff07bcc..049242ea5c38 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -117,14 +117,12 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port_ref, u32 key) { - struct publication *publ = - (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC); + struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); if (publ == NULL) { warn("Publication creation failure, no memory\n"); return NULL; } - memset(publ, 0, sizeof(*publ)); publ->type = type; publ->lower = lower; publ->upper = upper; @@ -144,11 +142,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper, static struct sub_seq *tipc_subseq_alloc(u32 cnt) { - u32 sz = cnt * sizeof(struct sub_seq); - struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC); - - if (sseq) - memset(sseq, 0, sz); + struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); return sseq; } @@ -160,8 +154,7 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt) static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) { - struct name_seq *nseq = - (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC); + struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); struct sub_seq *sseq = tipc_subseq_alloc(1); if (!nseq || !sseq) { @@ -171,7 +164,6 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea return NULL; } - memset(nseq, 0, sizeof(*nseq)); spin_lock_init(&nseq->lock); nseq->type = type; nseq->sseqs = sseq; @@ -1060,7 +1052,7 @@ int tipc_nametbl_init(void) { int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; - table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC); + table.types = kmalloc(array_size, GFP_ATOMIC); if (!table.types) return -ENOMEM; diff --git a/net/tipc/net.c b/net/tipc/net.c index e5a359ab4930..a991bf8a7f74 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -160,14 +160,11 @@ void tipc_net_send_external_routes(u32 dest) static int net_init(void) { - u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1); - memset(&tipc_net, 0, sizeof(tipc_net)); - tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC); + tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC); if (!tipc_net.zones) { return -ENOMEM; } - memset(tipc_net.zones, 0, sz); return TIPC_OK; } diff --git a/net/tipc/port.c b/net/tipc/port.c index 3251c8d8e53c..b9c8c6b9e94f 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -226,12 +226,11 @@ u32 tipc_createport_raw(void *usr_handle, struct tipc_msg *msg; u32 ref; - p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC); + p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC); if (!p_ptr) { warn("Port creation failed, no memory\n"); return 0; } - memset(p_ptr, 0, sizeof(*p_ptr)); ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); if (!ref) { warn("Port creation failed, reference table exhausted\n"); @@ -1058,7 +1057,7 @@ int tipc_createport(u32 user_ref, struct port *p_ptr; u32 ref; - up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC); + up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); if (!up_ptr) { warn("Port creation failed, no memory\n"); return -ENOMEM; diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 596d3c8ff750..e6d6ae22ea49 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c @@ -79,7 +79,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start) while (sz < requested_size) { sz <<= 1; } - table = (struct reference *)vmalloc(sz * sizeof(struct reference)); + table = vmalloc(sz * sizeof(*table)); if (table == NULL) return -ENOMEM; diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index e19b4bcd67ec..c51600ba5f4a 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -393,12 +393,11 @@ static void subscr_named_msg_event(void *usr_handle, /* Create subscriber object */ - subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC); + subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC); if (subscriber == NULL) { warn("Subscriber rejected, no memory\n"); return; } - memset(subscriber, 0, sizeof(struct subscriber)); INIT_LIST_HEAD(&subscriber->subscription_list); INIT_LIST_HEAD(&subscriber->subscriber_list); subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock); diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c index 1e3ae57c7228..04d1b9be9c51 100644 --- a/net/tipc/user_reg.c +++ b/net/tipc/user_reg.c @@ -82,9 +82,8 @@ static int reg_init(void) spin_lock_bh(®_lock); if (!users) { - users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC); + users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC); if (users) { - memset(users, 0, USER_LIST_SIZE); for (i = 1; i <= MAX_USERID; i++) { users[i].next = i - 1; } diff --git a/net/tipc/zone.c b/net/tipc/zone.c index 316c4872ff5b..f5b00ea2d5ac 100644 --- a/net/tipc/zone.c +++ b/net/tipc/zone.c @@ -52,13 +52,12 @@ struct _zone *tipc_zone_create(u32 addr) return NULL; } - z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); + z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC); if (!z_ptr) { warn("Zone creation failed, insufficient memory\n"); return NULL; } - memset(z_ptr, 0, sizeof(*z_ptr)); z_num = tipc_zone(addr); z_ptr->addr = tipc_addr(z_num, 0, 0); tipc_net.zones[z_num] = z_ptr; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index f70475bfb62a..6f2909279268 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -663,11 +663,10 @@ static int unix_autobind(struct socket *sock) goto out; err = -ENOMEM; - addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); + addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); if (!addr) goto out; - memset(addr, 0, sizeof(*addr) + sizeof(short) + 16); addr->name->sun_family = AF_UNIX; atomic_set(&addr->refcnt, 1); diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c index a690cf773b6a..6f39faa15832 100644 --- a/net/wanrouter/af_wanpipe.c +++ b/net/wanrouter/af_wanpipe.c @@ -370,12 +370,11 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk) * used by the ioctl call to read call information * and to execute commands. */ - if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { + if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { wanpipe_kill_sock_irq (newsk); release_device(dev); return -ENOMEM; } - memset(mbox_ptr, 0, sizeof(mbox_cmd_t)); memcpy(mbox_ptr,skb->data,skb->len); /* Register the lcn on which incoming call came @@ -507,11 +506,10 @@ static struct sock *wanpipe_alloc_socket(void) if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL) return NULL; - if ((wan_opt = kmalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) { + if ((wan_opt = kzalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) { sk_free(sk); return NULL; } - memset(wan_opt, 0x00, sizeof(struct wanpipe_opt)); wp_sk(sk) = wan_opt; @@ -2011,10 +2009,9 @@ static int set_ioctl_cmd (struct sock *sk, void *arg) dev_put(dev); - if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) + if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) return -ENOMEM; - memset(mbox_ptr, 0, sizeof(mbox_cmd_t)); wp_sk(sk)->mbox = mbox_ptr; wanpipe_link_driver(dev,sk); diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index ad8e8a797790..9479659277ae 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c @@ -642,18 +642,16 @@ static int wanrouter_device_new_if(struct wan_device *wandev, if (cnf->config_id == WANCONFIG_MPPP) { #ifdef CONFIG_WANPIPE_MULTPPP - pppdev = kmalloc(sizeof(struct ppp_device), GFP_KERNEL); + pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL); err = -ENOBUFS; if (pppdev == NULL) goto out; - memset(pppdev, 0, sizeof(struct ppp_device)); - pppdev->dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); + pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); if (pppdev->dev == NULL) { kfree(pppdev); err = -ENOBUFS; goto out; } - memset(pppdev->dev, 0, sizeof(struct net_device)); err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf); dev = pppdev->dev; #else @@ -663,11 +661,10 @@ static int wanrouter_device_new_if(struct wan_device *wandev, goto out; #endif } else { - dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); + dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); err = -ENOBUFS; if (dev == NULL) goto out; - memset(dev, 0, sizeof(struct net_device)); err = wandev->new_if(wandev, dev, cnf); } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 405b741dff43..f35bc676128c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -307,10 +307,9 @@ struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) { struct xfrm_policy *policy; - policy = kmalloc(sizeof(struct xfrm_policy), gfp); + policy = kzalloc(sizeof(struct xfrm_policy), gfp); if (policy) { - memset(policy, 0, sizeof(struct xfrm_policy)); atomic_set(&policy->refcnt, 1); rwlock_init(&policy->lock); init_timer(&policy->timer); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 43f00fc28a3d..0021aad5db43 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -194,10 +194,9 @@ struct xfrm_state *xfrm_state_alloc(void) { struct xfrm_state *x; - x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC); + x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC); if (x) { - memset(x, 0, sizeof(struct xfrm_state)); atomic_set(&x->refcnt, 1); atomic_set(&x->tunnel_users, 0); INIT_LIST_HEAD(&x->bydst); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 2e8b4dfcbc74..a91c961ba38b 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -523,12 +523,16 @@ static int try_context_mount(struct super_block *sb, void *data) goto out_free; } - rc = may_context_mount_sb_relabel(sid, sbsec, tsec); - if (rc) - goto out_free; - - if (!fscontext) + if (!fscontext) { + rc = may_context_mount_sb_relabel(sid, sbsec, tsec); + if (rc) + goto out_free; sbsec->sid = sid; + } else { + rc = may_context_mount_inode_relabel(sid, sbsec, tsec); + if (rc) + goto out_free; + } sbsec->mntpoint_sid = sid; sbsec->behavior = SECURITY_FS_USE_MNTPOINT; |