MAINTAINERS: Add cxgb4 and iw_cxgb4 entries

Signed-off-by: Roland Dreier <rolandd@cisco.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 7a9ccda..8a32aec 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1719,6 +1719,20 @@
 S:	Supported
 F:	drivers/infiniband/hw/cxgb3/
 
+CXGB4 ETHERNET DRIVER (CXGB4)
+M:	Dimitris Michailidis <dm@chelsio.com>
+L:	netdev@vger.kernel.org
+W:	http://www.chelsio.com
+S:	Supported
+F:	drivers/net/cxgb4/
+
+CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
+M:	Steve Wise <swise@chelsio.com>
+L:	linux-rdma@vger.kernel.org
+W:	http://www.openfabrics.org
+S:	Supported
+F:	drivers/infiniband/hw/cxgb4/
+
 CYBERPRO FB DRIVER
 M:	Russell King <linux@arm.linux.org.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 07b068b..30ce0a8 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -61,6 +61,10 @@
 	NULL,
 };
 
+int c4iw_max_read_depth = 8;
+module_param(c4iw_max_read_depth, int, 0644);
+MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
+
 static int enable_tcp_timestamps;
 module_param(enable_tcp_timestamps, int, 0644);
 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
@@ -113,18 +117,17 @@
 module_param(snd_win, int, 0644);
 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
 
-static void process_work(struct work_struct *work);
 static struct workqueue_struct *workq;
-static DECLARE_WORK(skb_work, process_work);
 
 static struct sk_buff_head rxq;
-static c4iw_handler_func work_handlers[NUM_CPL_CMDS];
-c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
 
 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
 static void ep_timeout(unsigned long arg);
 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
 
+static LIST_HEAD(timeout_list);
+static spinlock_t timeout_lock;
+
 static void start_ep_timer(struct c4iw_ep *ep)
 {
 	PDBG("%s ep %p\n", __func__, ep);
@@ -271,26 +274,6 @@
 	c4iw_put_ep(&ep->com);
 }
 
-static void process_work(struct work_struct *work)
-{
-	struct sk_buff *skb = NULL;
-	struct c4iw_dev *dev;
-	struct cpl_act_establish *rpl = cplhdr(skb);
-	unsigned int opcode;
-	int ret;
-
-	while ((skb = skb_dequeue(&rxq))) {
-		rpl = cplhdr(skb);
-		dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
-		opcode = rpl->ot.opcode;
-
-		BUG_ON(!work_handlers[opcode]);
-		ret = work_handlers[opcode](dev, skb);
-		if (!ret)
-			kfree_skb(skb);
-	}
-}
-
 static int status2errno(int status)
 {
 	switch (status) {
@@ -1795,76 +1778,6 @@
 	return 0;
 }
 
-static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
-{
-	struct cpl_fw6_msg *rpl = cplhdr(skb);
-	struct c4iw_wr_wait *wr_waitp;
-	int ret;
-
-	PDBG("%s type %u\n", __func__, rpl->type);
-
-	switch (rpl->type) {
-	case 1:
-		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
-		wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
-		PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
-		if (wr_waitp) {
-			wr_waitp->ret = ret;
-			wr_waitp->done = 1;
-			wake_up(&wr_waitp->wait);
-		}
-		break;
-	case 2:
-		c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
-		break;
-	default:
-		printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
-		       rpl->type);
-		break;
-	}
-	return 0;
-}
-
-static void ep_timeout(unsigned long arg)
-{
-	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
-	struct c4iw_qp_attributes attrs;
-	unsigned long flags;
-	int abort = 1;
-
-	spin_lock_irqsave(&ep->com.lock, flags);
-	PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
-	     ep->com.state);
-	switch (ep->com.state) {
-	case MPA_REQ_SENT:
-		__state_set(&ep->com, ABORTING);
-		connect_reply_upcall(ep, -ETIMEDOUT);
-		break;
-	case MPA_REQ_WAIT:
-		__state_set(&ep->com, ABORTING);
-		break;
-	case CLOSING:
-	case MORIBUND:
-		if (ep->com.cm_id && ep->com.qp) {
-			attrs.next_state = C4IW_QP_STATE_ERROR;
-			c4iw_modify_qp(ep->com.qp->rhp,
-				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
-				     &attrs, 1);
-		}
-		__state_set(&ep->com, ABORTING);
-		break;
-	default:
-		printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
-			__func__, ep, ep->hwtid, ep->com.state);
-		WARN_ON(1);
-		abort = 0;
-	}
-	spin_unlock_irqrestore(&ep->com.lock, flags);
-	if (abort)
-		abort_connection(ep, NULL, GFP_ATOMIC);
-	c4iw_put_ep(&ep->com);
-}
-
 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
 {
 	int err;
@@ -1904,8 +1817,8 @@
 	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
 	BUG_ON(!qp);
 
-	if ((conn_param->ord > T4_MAX_READ_DEPTH) ||
-	    (conn_param->ird > T4_MAX_READ_DEPTH)) {
+	if ((conn_param->ord > c4iw_max_read_depth) ||
+	    (conn_param->ird > c4iw_max_read_depth)) {
 		abort_connection(ep, NULL, GFP_KERNEL);
 		err = -EINVAL;
 		goto err;
@@ -1968,6 +1881,11 @@
 	struct net_device *pdev;
 	int step;
 
+	if ((conn_param->ord > c4iw_max_read_depth) ||
+	    (conn_param->ird > c4iw_max_read_depth)) {
+		err = -EINVAL;
+		goto out;
+	}
 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
 	if (!ep) {
 		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
@@ -2115,7 +2033,7 @@
 	 */
 	ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
 	if (ep->stid == -1) {
-		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
+		printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
 		err = -ENOMEM;
 		goto fail2;
 	}
@@ -2244,6 +2162,116 @@
 }
 
 /*
+ * These are the real handlers that are called from a
+ * work queue.
+ */
+static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
+	[CPL_ACT_ESTABLISH] = act_establish,
+	[CPL_ACT_OPEN_RPL] = act_open_rpl,
+	[CPL_RX_DATA] = rx_data,
+	[CPL_ABORT_RPL_RSS] = abort_rpl,
+	[CPL_ABORT_RPL] = abort_rpl,
+	[CPL_PASS_OPEN_RPL] = pass_open_rpl,
+	[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
+	[CPL_PASS_ACCEPT_REQ] = pass_accept_req,
+	[CPL_PASS_ESTABLISH] = pass_establish,
+	[CPL_PEER_CLOSE] = peer_close,
+	[CPL_ABORT_REQ_RSS] = peer_abort,
+	[CPL_CLOSE_CON_RPL] = close_con_rpl,
+	[CPL_RDMA_TERMINATE] = terminate,
+	[CPL_FW4_ACK] = fw4_ack
+};
+
+static void process_timeout(struct c4iw_ep *ep)
+{
+	struct c4iw_qp_attributes attrs;
+	int abort = 1;
+
+	spin_lock_irq(&ep->com.lock);
+	PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
+	     ep->com.state);
+	switch (ep->com.state) {
+	case MPA_REQ_SENT:
+		__state_set(&ep->com, ABORTING);
+		connect_reply_upcall(ep, -ETIMEDOUT);
+		break;
+	case MPA_REQ_WAIT:
+		__state_set(&ep->com, ABORTING);
+		break;
+	case CLOSING:
+	case MORIBUND:
+		if (ep->com.cm_id && ep->com.qp) {
+			attrs.next_state = C4IW_QP_STATE_ERROR;
+			c4iw_modify_qp(ep->com.qp->rhp,
+				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
+				     &attrs, 1);
+		}
+		__state_set(&ep->com, ABORTING);
+		break;
+	default:
+		printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
+			__func__, ep, ep->hwtid, ep->com.state);
+		WARN_ON(1);
+		abort = 0;
+	}
+	spin_unlock_irq(&ep->com.lock);
+	if (abort)
+		abort_connection(ep, NULL, GFP_KERNEL);
+	c4iw_put_ep(&ep->com);
+}
+
+static void process_timedout_eps(void)
+{
+	struct c4iw_ep *ep;
+
+	spin_lock_irq(&timeout_lock);
+	while (!list_empty(&timeout_list)) {
+		struct list_head *tmp;
+
+		tmp = timeout_list.next;
+		list_del(tmp);
+		spin_unlock_irq(&timeout_lock);
+		ep = list_entry(tmp, struct c4iw_ep, entry);
+		process_timeout(ep);
+		spin_lock_irq(&timeout_lock);
+	}
+	spin_unlock_irq(&timeout_lock);
+}
+
+static void process_work(struct work_struct *work)
+{
+	struct sk_buff *skb = NULL;
+	struct c4iw_dev *dev;
+	struct cpl_act_establish *rpl = cplhdr(skb);
+	unsigned int opcode;
+	int ret;
+
+	while ((skb = skb_dequeue(&rxq))) {
+		rpl = cplhdr(skb);
+		dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
+		opcode = rpl->ot.opcode;
+
+		BUG_ON(!work_handlers[opcode]);
+		ret = work_handlers[opcode](dev, skb);
+		if (!ret)
+			kfree_skb(skb);
+	}
+	process_timedout_eps();
+}
+
+static DECLARE_WORK(skb_work, process_work);
+
+static void ep_timeout(unsigned long arg)
+{
+	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+
+	spin_lock(&timeout_lock);
+	list_add_tail(&ep->entry, &timeout_list);
+	spin_unlock(&timeout_lock);
+	queue_work(workq, &skb_work);
+}
+
+/*
  * All the CM events are handled on a work queue to have a safe context.
  */
 static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
@@ -2273,58 +2301,74 @@
 	return 0;
 }
 
+static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+	struct cpl_fw6_msg *rpl = cplhdr(skb);
+	struct c4iw_wr_wait *wr_waitp;
+	int ret;
+
+	PDBG("%s type %u\n", __func__, rpl->type);
+
+	switch (rpl->type) {
+	case 1:
+		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
+		wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
+		PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+		if (wr_waitp) {
+			wr_waitp->ret = ret;
+			wr_waitp->done = 1;
+			wake_up(&wr_waitp->wait);
+		}
+		break;
+	case 2:
+		c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+		break;
+	default:
+		printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
+		       rpl->type);
+		break;
+	}
+	return 0;
+}
+
+/*
+ * Most upcalls from the T4 Core go to sched() to
+ * schedule the processing on a work queue.
+ */
+c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
+	[CPL_ACT_ESTABLISH] = sched,
+	[CPL_ACT_OPEN_RPL] = sched,
+	[CPL_RX_DATA] = sched,
+	[CPL_ABORT_RPL_RSS] = sched,
+	[CPL_ABORT_RPL] = sched,
+	[CPL_PASS_OPEN_RPL] = sched,
+	[CPL_CLOSE_LISTSRV_RPL] = sched,
+	[CPL_PASS_ACCEPT_REQ] = sched,
+	[CPL_PASS_ESTABLISH] = sched,
+	[CPL_PEER_CLOSE] = sched,
+	[CPL_CLOSE_CON_RPL] = sched,
+	[CPL_ABORT_REQ_RSS] = sched,
+	[CPL_RDMA_TERMINATE] = sched,
+	[CPL_FW4_ACK] = sched,
+	[CPL_SET_TCB_RPL] = set_tcb_rpl,
+	[CPL_FW6_MSG] = fw6_msg
+};
+
 int __init c4iw_cm_init(void)
 {
+	spin_lock_init(&timeout_lock);
 	skb_queue_head_init(&rxq);
 
 	workq = create_singlethread_workqueue("iw_cxgb4");
 	if (!workq)
 		return -ENOMEM;
 
-	/*
-	 * Most upcalls from the T4 Core go to sched() to
-	 * schedule the processing on a work queue.
-	 */
-	c4iw_handlers[CPL_ACT_ESTABLISH] = sched;
-	c4iw_handlers[CPL_ACT_OPEN_RPL] = sched;
-	c4iw_handlers[CPL_RX_DATA] = sched;
-	c4iw_handlers[CPL_ABORT_RPL_RSS] = sched;
-	c4iw_handlers[CPL_ABORT_RPL] = sched;
-	c4iw_handlers[CPL_PASS_OPEN_RPL] = sched;
-	c4iw_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
-	c4iw_handlers[CPL_PASS_ACCEPT_REQ] = sched;
-	c4iw_handlers[CPL_PASS_ESTABLISH] = sched;
-	c4iw_handlers[CPL_PEER_CLOSE] = sched;
-	c4iw_handlers[CPL_CLOSE_CON_RPL] = sched;
-	c4iw_handlers[CPL_ABORT_REQ_RSS] = sched;
-	c4iw_handlers[CPL_RDMA_TERMINATE] = sched;
-	c4iw_handlers[CPL_FW4_ACK] = sched;
-	c4iw_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
-	c4iw_handlers[CPL_FW6_MSG] = fw6_msg;
-
-	/*
-	 * These are the real handlers that are called from a
-	 * work queue.
-	 */
-	work_handlers[CPL_ACT_ESTABLISH] = act_establish;
-	work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
-	work_handlers[CPL_RX_DATA] = rx_data;
-	work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
-	work_handlers[CPL_ABORT_RPL] = abort_rpl;
-	work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
-	work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
-	work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
-	work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
-	work_handlers[CPL_PEER_CLOSE] = peer_close;
-	work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
-	work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
-	work_handlers[CPL_RDMA_TERMINATE] = terminate;
-	work_handlers[CPL_FW4_ACK] = fw4_ack;
 	return 0;
 }
 
 void __exit c4iw_cm_term(void)
 {
+	WARN_ON(!list_empty(&timeout_list));
 	flush_workqueue(workq);
 	destroy_workqueue(workq);
 }
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 1bd6a3e..491e76a 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -51,8 +51,8 @@
 		return;
 	}
 
-	printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
-	       "type %d wrid.hi 0x%x wrid.lo 0x%x\n", __func__,
+	printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
+	       "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
 	       CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
 	       CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
 	       CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
@@ -60,7 +60,7 @@
 	if (qhp->attr.state == C4IW_QP_STATE_RTS) {
 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
 		c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
-			       &attrs, 0);
+			       &attrs, 1);
 	}
 
 	event.event = ib_event;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index ccce6fe..a626998 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -597,6 +597,7 @@
 	struct c4iw_ep_common com;
 	struct c4iw_ep *parent_ep;
 	struct timer_list timer;
+	struct list_head entry;
 	unsigned int atid;
 	u32 hwtid;
 	u32 snd_seq;
@@ -739,5 +740,6 @@
 
 extern struct cxgb4_client t4c_client;
 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
+extern int c4iw_max_read_depth;
 
 #endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 3cb50af..dfc4902 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -267,8 +267,8 @@
 	props->max_qp_wr = T4_MAX_QP_DEPTH;
 	props->max_sge = T4_MAX_RECV_SGE;
 	props->max_sge_rd = 1;
-	props->max_qp_rd_atom = T4_MAX_READ_DEPTH;
-	props->max_qp_init_rd_atom = T4_MAX_READ_DEPTH;
+	props->max_qp_rd_atom = c4iw_max_read_depth;
+	props->max_qp_init_rd_atom = c4iw_max_read_depth;
 	props->max_cq = T4_MAX_NUM_CQ;
 	props->max_cqe = T4_MAX_CQ_DEPTH;
 	props->max_mr = c4iw_num_stags(&dev->rdev);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bd56c84..83a01dc 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -856,7 +856,8 @@
 	return c4iw_ofld_send(&qhp->rhp->rdev, skb);
 }
 
-int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
+static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
+			   gfp_t gfp)
 {
 	struct fw_ri_wr *wqe;
 	struct sk_buff *skb;
@@ -865,9 +866,9 @@
 	PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
 	     qhp->ep->hwtid);
 
-	skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+	skb = alloc_skb(sizeof *wqe, gfp);
 	if (!skb)
-		return -ENOMEM;
+		return;
 	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
 
 	wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -881,7 +882,7 @@
 	wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
 	term = (struct terminate_message *)wqe->u.terminate.termmsg;
 	build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
-	return c4iw_ofld_send(&qhp->rhp->rdev, skb);
+	c4iw_ofld_send(&qhp->rhp->rdev, skb);
 }
 
 /*
@@ -1130,14 +1131,14 @@
 		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
 			newattr.enable_bind = attrs->enable_bind;
 		if (mask & C4IW_QP_ATTR_MAX_ORD) {
-			if (attrs->max_ord > T4_MAX_READ_DEPTH) {
+			if (attrs->max_ord > c4iw_max_read_depth) {
 				ret = -EINVAL;
 				goto out;
 			}
 			newattr.max_ord = attrs->max_ord;
 		}
 		if (mask & C4IW_QP_ATTR_MAX_IRD) {
-			if (attrs->max_ird > T4_MAX_READ_DEPTH) {
+			if (attrs->max_ird > c4iw_max_read_depth) {
 				ret = -EINVAL;
 				goto out;
 			}
@@ -1215,12 +1216,10 @@
 			qhp->attr.state = C4IW_QP_STATE_TERMINATE;
 			if (qhp->ibqp.uobject)
 				t4_set_wq_in_error(&qhp->wq);
-			if (!internal) {
-				ep = qhp->ep;
-				c4iw_get_ep(&ep->com);
-				terminate = 1;
-				disconnect = 1;
-			}
+			ep = qhp->ep;
+			c4iw_get_ep(&ep->com);
+			terminate = 1;
+			disconnect = 1;
 			break;
 		case C4IW_QP_STATE_ERROR:
 			qhp->attr.state = C4IW_QP_STATE_ERROR;
@@ -1301,7 +1300,7 @@
 	spin_unlock_irqrestore(&qhp->lock, flag);
 
 	if (terminate)
-		c4iw_post_terminate(qhp, NULL);
+		post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
 
 	/*
 	 * If disconnect is 1, then we need to initiate a disconnect
@@ -1309,7 +1308,8 @@
 	 * an abnormal close (RTS/CLOSING->ERROR).
 	 */
 	if (disconnect) {
-		c4iw_ep_disconnect(ep, abort, GFP_KERNEL);
+		c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
+							 GFP_KERNEL);
 		c4iw_put_ep(&ep->com);
 	}
 
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 3f0d217..d0e8af3 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -36,7 +36,6 @@
 #include "t4_msg.h"
 #include "t4fw_ri_api.h"
 
-#define T4_MAX_READ_DEPTH 16
 #define T4_QID_BASE 1024
 #define T4_MAX_QIDS 256
 #define T4_MAX_NUM_QP (1<<16)
@@ -450,11 +449,25 @@
 static inline int t4_arm_cq(struct t4_cq *cq, int se)
 {
 	u32 val;
+	u16 inc;
 
-	val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
-	      INGRESSQID(cq->cqid);
-	cq->cidx_inc = 0;
-	writel(val, cq->gts);
+	do {
+		/*
+		 * inc must be less the both the max update value -and-
+		 * the size of the CQ.
+		 */
+		inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
+						     CIDXINC_MASK;
+		inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
+		if (inc == cq->cidx_inc)
+			val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
+			      INGRESSQID(cq->cqid);
+		else
+			val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
+			      INGRESSQID(cq->cqid);
+		cq->cidx_inc -= inc;
+		writel(val, cq->gts);
+	} while (cq->cidx_inc);
 	return 0;
 }
 
@@ -489,11 +502,12 @@
 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
 {
 	int ret = 0;
+	u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts);
 
-	if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
+	if (G_CQE_GENBIT(bits_type_ts) == cq->gen) {
 		*cqe = &cq->queue[cq->cidx];
-		cq->timestamp = CQE_TS(*cqe);
-	} else if (CQE_TS(&cq->queue[cq->cidx]) > cq->timestamp)
+		cq->timestamp = G_CQE_TS(bits_type_ts);
+	} else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
 		ret = -EOVERFLOW;
 	else
 		ret = -ENODATA;