1c0dd49bdSEiji Ota /* 2c0dd49bdSEiji Ota * CDDL HEADER START 3c0dd49bdSEiji Ota * 4c0dd49bdSEiji Ota * The contents of this file are subject to the terms of the 5c0dd49bdSEiji Ota * Common Development and Distribution License (the "License"). 6c0dd49bdSEiji Ota * You may not use this file except in compliance with the License. 7c0dd49bdSEiji Ota * 8c0dd49bdSEiji Ota * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9c0dd49bdSEiji Ota * or http://www.opensolaris.org/os/licensing. 10c0dd49bdSEiji Ota * See the License for the specific language governing permissions 11c0dd49bdSEiji Ota * and limitations under the License. 12c0dd49bdSEiji Ota * 13c0dd49bdSEiji Ota * When distributing Covered Code, include this CDDL HEADER in each 14c0dd49bdSEiji Ota * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15c0dd49bdSEiji Ota * If applicable, add the following below this CDDL HEADER, with the 16c0dd49bdSEiji Ota * fields enclosed by brackets "[]" replaced with your own identifying 17c0dd49bdSEiji Ota * information: Portions Copyright [yyyy] [name of copyright owner] 18c0dd49bdSEiji Ota * 19c0dd49bdSEiji Ota * CDDL HEADER END 20c0dd49bdSEiji Ota */ 21c0dd49bdSEiji Ota /* 22c0dd49bdSEiji Ota * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23c0dd49bdSEiji Ota */ 24c0dd49bdSEiji Ota 25c0dd49bdSEiji Ota /* 26c0dd49bdSEiji Ota * Copyright (c) 2006 Oracle. All rights reserved. 27c0dd49bdSEiji Ota * 28c0dd49bdSEiji Ota * This software is available to you under a choice of one of two 29c0dd49bdSEiji Ota * licenses. You may choose to be licensed under the terms of the GNU 30c0dd49bdSEiji Ota * General Public License (GPL) Version 2, available from the file 31c0dd49bdSEiji Ota * COPYING in the main directory of this source tree, or the 32c0dd49bdSEiji Ota * OpenIB.org BSD license below: 33c0dd49bdSEiji Ota * 34c0dd49bdSEiji Ota * Redistribution and use in source and binary forms, with or 35c0dd49bdSEiji Ota * without modification, are permitted provided that the following 36c0dd49bdSEiji Ota * conditions are met: 37c0dd49bdSEiji Ota * 38c0dd49bdSEiji Ota * - Redistributions of source code must retain the above 39c0dd49bdSEiji Ota * copyright notice, this list of conditions and the following 40c0dd49bdSEiji Ota * disclaimer. 41c0dd49bdSEiji Ota * 42c0dd49bdSEiji Ota * - Redistributions in binary form must reproduce the above 43c0dd49bdSEiji Ota * copyright notice, this list of conditions and the following 44c0dd49bdSEiji Ota * disclaimer in the documentation and/or other materials 45c0dd49bdSEiji Ota * provided with the distribution. 46c0dd49bdSEiji Ota * 47c0dd49bdSEiji Ota * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 48c0dd49bdSEiji Ota * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 49c0dd49bdSEiji Ota * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 50c0dd49bdSEiji Ota * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 51c0dd49bdSEiji Ota * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 52c0dd49bdSEiji Ota * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 53c0dd49bdSEiji Ota * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 54c0dd49bdSEiji Ota * SOFTWARE. 55c0dd49bdSEiji Ota * 56c0dd49bdSEiji Ota */ 57c0dd49bdSEiji Ota #include <sys/stropts.h> 58c0dd49bdSEiji Ota #include <sys/systm.h> 59c0dd49bdSEiji Ota 60c0dd49bdSEiji Ota #include <sys/rds.h> 61c0dd49bdSEiji Ota #include <sys/socket.h> 62c0dd49bdSEiji Ota #include <sys/socketvar.h> 63c0dd49bdSEiji Ota 64c0dd49bdSEiji Ota #include <sys/ib/clients/rdsv3/rdsv3.h> 65c0dd49bdSEiji Ota #include <sys/ib/clients/rdsv3/rdma.h> 66c0dd49bdSEiji Ota #include <sys/ib/clients/rdsv3/rdsv3_debug.h> 67c0dd49bdSEiji Ota 68c0dd49bdSEiji Ota /* 69c0dd49bdSEiji Ota * When transmitting messages in rdsv3_send_xmit, we need to emerge from 70c0dd49bdSEiji Ota * time to time and briefly release the CPU. Otherwise the softlock watchdog 71c0dd49bdSEiji Ota * will kick our shin. 72c0dd49bdSEiji Ota * Also, it seems fairer to not let one busy connection stall all the 73c0dd49bdSEiji Ota * others. 74c0dd49bdSEiji Ota * 75c0dd49bdSEiji Ota * send_batch_count is the number of times we'll loop in send_xmit. Setting 76c0dd49bdSEiji Ota * it to 0 will restore the old behavior (where we looped until we had 77c0dd49bdSEiji Ota * drained the queue). 78c0dd49bdSEiji Ota */ 79c0dd49bdSEiji Ota static int send_batch_count = 64; 80c0dd49bdSEiji Ota 81c0dd49bdSEiji Ota extern void rdsv3_ib_send_unmap_rdma(void *ic, struct rdsv3_rdma_op *op); 82c0dd49bdSEiji Ota /* 83c0dd49bdSEiji Ota * Reset the send state. Caller must hold c_send_lock when calling here. 84c0dd49bdSEiji Ota */ 85c0dd49bdSEiji Ota void 86c0dd49bdSEiji Ota rdsv3_send_reset(struct rdsv3_connection *conn) 87c0dd49bdSEiji Ota { 88c0dd49bdSEiji Ota struct rdsv3_message *rm, *tmp; 89c0dd49bdSEiji Ota struct rdsv3_rdma_op *ro; 90c0dd49bdSEiji Ota 91c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_reset", "Enter(conn: %p)", conn); 92c0dd49bdSEiji Ota 93*5d5562f5SEiji Ota ASSERT(MUTEX_HELD(&conn->c_send_lock)); 94*5d5562f5SEiji Ota 95c0dd49bdSEiji Ota if (conn->c_xmit_rm) { 96c0dd49bdSEiji Ota rm = conn->c_xmit_rm; 97c0dd49bdSEiji Ota ro = rm->m_rdma_op; 98c0dd49bdSEiji Ota if (ro && ro->r_mapped) { 99c0dd49bdSEiji Ota RDSV3_DPRINTF2("rdsv3_send_reset", 100c0dd49bdSEiji Ota "rm %p mflg 0x%x map %d mihdl %p sgl %p", 101c0dd49bdSEiji Ota rm, rm->m_flags, ro->r_mapped, 102c0dd49bdSEiji Ota ro->r_rdma_sg[0].mihdl, 103c0dd49bdSEiji Ota ro->r_rdma_sg[0].swr.wr_sgl); 104c0dd49bdSEiji Ota rdsv3_ib_send_unmap_rdma(conn->c_transport_data, ro); 105c0dd49bdSEiji Ota } 106c0dd49bdSEiji Ota /* 107c0dd49bdSEiji Ota * Tell the user the RDMA op is no longer mapped by the 108c0dd49bdSEiji Ota * transport. This isn't entirely true (it's flushed out 109c0dd49bdSEiji Ota * independently) but as the connection is down, there's 110c0dd49bdSEiji Ota * no ongoing RDMA to/from that memory 111c0dd49bdSEiji Ota */ 112c0dd49bdSEiji Ota rdsv3_message_unmapped(conn->c_xmit_rm); 113c0dd49bdSEiji Ota rdsv3_message_put(conn->c_xmit_rm); 114c0dd49bdSEiji Ota conn->c_xmit_rm = NULL; 115c0dd49bdSEiji Ota } 116*5d5562f5SEiji Ota 117c0dd49bdSEiji Ota conn->c_xmit_sg = 0; 118c0dd49bdSEiji Ota conn->c_xmit_hdr_off = 0; 119c0dd49bdSEiji Ota conn->c_xmit_data_off = 0; 120c0dd49bdSEiji Ota conn->c_xmit_rdma_sent = 0; 121c0dd49bdSEiji Ota conn->c_map_queued = 0; 122c0dd49bdSEiji Ota 123c0dd49bdSEiji Ota conn->c_unacked_packets = rdsv3_sysctl_max_unacked_packets; 124c0dd49bdSEiji Ota conn->c_unacked_bytes = rdsv3_sysctl_max_unacked_bytes; 125c0dd49bdSEiji Ota 126c0dd49bdSEiji Ota /* Mark messages as retransmissions, and move them to the send q */ 127c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 128c0dd49bdSEiji Ota RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_retrans, m_conn_item) { 129c0dd49bdSEiji Ota set_bit(RDSV3_MSG_ACK_REQUIRED, &rm->m_flags); 130c0dd49bdSEiji Ota set_bit(RDSV3_MSG_RETRANSMITTED, &rm->m_flags); 131c0dd49bdSEiji Ota if (rm->m_rdma_op && rm->m_rdma_op->r_mapped) { 132c0dd49bdSEiji Ota RDSV3_DPRINTF4("_send_reset", 133c0dd49bdSEiji Ota "RT rm %p mflg 0x%x sgl %p", 134c0dd49bdSEiji Ota rm, rm->m_flags, 135c0dd49bdSEiji Ota rm->m_rdma_op->r_rdma_sg[0].swr.wr_sgl); 136c0dd49bdSEiji Ota } 137c0dd49bdSEiji Ota } 138c0dd49bdSEiji Ota list_move_tail(&conn->c_send_queue, &conn->c_retrans); 139c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 140c0dd49bdSEiji Ota 141c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_reset", "Return(conn: %p)", conn); 142c0dd49bdSEiji Ota } 143c0dd49bdSEiji Ota 144c0dd49bdSEiji Ota /* 145c0dd49bdSEiji Ota * We're making the concious trade-off here to only send one message 146c0dd49bdSEiji Ota * down the connection at a time. 147c0dd49bdSEiji Ota * Pro: 148c0dd49bdSEiji Ota * - tx queueing is a simple fifo list 149c0dd49bdSEiji Ota * - reassembly is optional and easily done by transports per conn 150c0dd49bdSEiji Ota * - no per flow rx lookup at all, straight to the socket 151c0dd49bdSEiji Ota * - less per-frag memory and wire overhead 152c0dd49bdSEiji Ota * Con: 153c0dd49bdSEiji Ota * - queued acks can be delayed behind large messages 154c0dd49bdSEiji Ota * Depends: 155c0dd49bdSEiji Ota * - small message latency is higher behind queued large messages 156c0dd49bdSEiji Ota * - large message latency isn't starved by intervening small sends 157c0dd49bdSEiji Ota */ 158c0dd49bdSEiji Ota int 159c0dd49bdSEiji Ota rdsv3_send_xmit(struct rdsv3_connection *conn) 160c0dd49bdSEiji Ota { 161c0dd49bdSEiji Ota struct rdsv3_message *rm; 162c0dd49bdSEiji Ota unsigned int tmp; 163c0dd49bdSEiji Ota unsigned int send_quota = send_batch_count; 164c0dd49bdSEiji Ota struct rdsv3_scatterlist *sg; 165c0dd49bdSEiji Ota int ret = 0; 166c0dd49bdSEiji Ota int was_empty = 0; 167c0dd49bdSEiji Ota list_t to_be_dropped; 168c0dd49bdSEiji Ota 169*5d5562f5SEiji Ota restart: 170*5d5562f5SEiji Ota if (!rdsv3_conn_up(conn)) 171*5d5562f5SEiji Ota goto out; 172*5d5562f5SEiji Ota 173c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_xmit", "Enter(conn: %p)", conn); 174c0dd49bdSEiji Ota 175c0dd49bdSEiji Ota list_create(&to_be_dropped, sizeof (struct rdsv3_message), 176c0dd49bdSEiji Ota offsetof(struct rdsv3_message, m_conn_item)); 177c0dd49bdSEiji Ota 178c0dd49bdSEiji Ota /* 179c0dd49bdSEiji Ota * sendmsg calls here after having queued its message on the send 180c0dd49bdSEiji Ota * queue. We only have one task feeding the connection at a time. If 181c0dd49bdSEiji Ota * another thread is already feeding the queue then we back off. This 182c0dd49bdSEiji Ota * avoids blocking the caller and trading per-connection data between 183c0dd49bdSEiji Ota * caches per message. 184c0dd49bdSEiji Ota */ 185c0dd49bdSEiji Ota if (!mutex_tryenter(&conn->c_send_lock)) { 186c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_xmit", 187c0dd49bdSEiji Ota "Another thread running(conn: %p)", conn); 188c0dd49bdSEiji Ota rdsv3_stats_inc(s_send_sem_contention); 189c0dd49bdSEiji Ota ret = -ENOMEM; 190c0dd49bdSEiji Ota goto out; 191c0dd49bdSEiji Ota } 192*5d5562f5SEiji Ota atomic_add_32(&conn->c_senders, 1); 193c0dd49bdSEiji Ota 194c0dd49bdSEiji Ota if (conn->c_trans->xmit_prepare) 195c0dd49bdSEiji Ota conn->c_trans->xmit_prepare(conn); 196c0dd49bdSEiji Ota 197c0dd49bdSEiji Ota /* 198c0dd49bdSEiji Ota * spin trying to push headers and data down the connection until 199*5d5562f5SEiji Ota * the connection doesn't make forward progress. 200c0dd49bdSEiji Ota */ 201c0dd49bdSEiji Ota while (--send_quota) { 202c0dd49bdSEiji Ota /* 203c0dd49bdSEiji Ota * See if need to send a congestion map update if we're 204c0dd49bdSEiji Ota * between sending messages. The send_sem protects our sole 205c0dd49bdSEiji Ota * use of c_map_offset and _bytes. 206c0dd49bdSEiji Ota * Note this is used only by transports that define a special 207c0dd49bdSEiji Ota * xmit_cong_map function. For all others, we create allocate 208c0dd49bdSEiji Ota * a cong_map message and treat it just like any other send. 209c0dd49bdSEiji Ota */ 210c0dd49bdSEiji Ota if (conn->c_map_bytes) { 211c0dd49bdSEiji Ota ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong, 212c0dd49bdSEiji Ota conn->c_map_offset); 213c0dd49bdSEiji Ota if (ret <= 0) 214c0dd49bdSEiji Ota break; 215c0dd49bdSEiji Ota 216c0dd49bdSEiji Ota conn->c_map_offset += ret; 217c0dd49bdSEiji Ota conn->c_map_bytes -= ret; 218c0dd49bdSEiji Ota if (conn->c_map_bytes) 219c0dd49bdSEiji Ota continue; 220c0dd49bdSEiji Ota } 221c0dd49bdSEiji Ota 222c0dd49bdSEiji Ota /* 223c0dd49bdSEiji Ota * If we're done sending the current message, clear the 224c0dd49bdSEiji Ota * offset and S/G temporaries. 225c0dd49bdSEiji Ota */ 226c0dd49bdSEiji Ota rm = conn->c_xmit_rm; 227c0dd49bdSEiji Ota if (rm != NULL && 228c0dd49bdSEiji Ota conn->c_xmit_hdr_off == sizeof (struct rdsv3_header) && 229c0dd49bdSEiji Ota conn->c_xmit_sg == rm->m_nents) { 230c0dd49bdSEiji Ota conn->c_xmit_rm = NULL; 231c0dd49bdSEiji Ota conn->c_xmit_sg = 0; 232c0dd49bdSEiji Ota conn->c_xmit_hdr_off = 0; 233c0dd49bdSEiji Ota conn->c_xmit_data_off = 0; 234c0dd49bdSEiji Ota conn->c_xmit_rdma_sent = 0; 235c0dd49bdSEiji Ota 236c0dd49bdSEiji Ota /* Release the reference to the previous message. */ 237c0dd49bdSEiji Ota rdsv3_message_put(rm); 238c0dd49bdSEiji Ota rm = NULL; 239c0dd49bdSEiji Ota } 240c0dd49bdSEiji Ota 241c0dd49bdSEiji Ota /* If we're asked to send a cong map update, do so. */ 242c0dd49bdSEiji Ota if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) { 243c0dd49bdSEiji Ota if (conn->c_trans->xmit_cong_map != NULL) { 244c0dd49bdSEiji Ota conn->c_map_offset = 0; 245c0dd49bdSEiji Ota conn->c_map_bytes = 246c0dd49bdSEiji Ota sizeof (struct rdsv3_header) + 247c0dd49bdSEiji Ota RDSV3_CONG_MAP_BYTES; 248c0dd49bdSEiji Ota continue; 249c0dd49bdSEiji Ota } 250c0dd49bdSEiji Ota 251c0dd49bdSEiji Ota rm = rdsv3_cong_update_alloc(conn); 252c0dd49bdSEiji Ota if (IS_ERR(rm)) { 253c0dd49bdSEiji Ota ret = PTR_ERR(rm); 254c0dd49bdSEiji Ota break; 255c0dd49bdSEiji Ota } 256c0dd49bdSEiji Ota 257c0dd49bdSEiji Ota conn->c_xmit_rm = rm; 258c0dd49bdSEiji Ota } 259c0dd49bdSEiji Ota 260c0dd49bdSEiji Ota /* 261c0dd49bdSEiji Ota * Grab the next message from the send queue, if there is one. 262c0dd49bdSEiji Ota * 263c0dd49bdSEiji Ota * c_xmit_rm holds a ref while we're sending this message down 264c0dd49bdSEiji Ota * the connction. We can use this ref while holding the 265c0dd49bdSEiji Ota * send_sem.. rdsv3_send_reset() is serialized with it. 266c0dd49bdSEiji Ota */ 267c0dd49bdSEiji Ota if (rm == NULL) { 268c0dd49bdSEiji Ota unsigned int len; 269c0dd49bdSEiji Ota 270c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 271c0dd49bdSEiji Ota 272c0dd49bdSEiji Ota if (!list_is_empty(&conn->c_send_queue)) { 273c0dd49bdSEiji Ota rm = list_remove_head(&conn->c_send_queue); 274c0dd49bdSEiji Ota rdsv3_message_addref(rm); 275c0dd49bdSEiji Ota 276c0dd49bdSEiji Ota /* 277c0dd49bdSEiji Ota * Move the message from the send queue to 278c0dd49bdSEiji Ota * the retransmit 279c0dd49bdSEiji Ota * list right away. 280c0dd49bdSEiji Ota */ 281c0dd49bdSEiji Ota list_insert_tail(&conn->c_retrans, rm); 282c0dd49bdSEiji Ota } 283c0dd49bdSEiji Ota 284c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 285c0dd49bdSEiji Ota 286c0dd49bdSEiji Ota if (rm == NULL) { 287c0dd49bdSEiji Ota was_empty = 1; 288c0dd49bdSEiji Ota break; 289c0dd49bdSEiji Ota } 290c0dd49bdSEiji Ota 291c0dd49bdSEiji Ota /* 292c0dd49bdSEiji Ota * Unfortunately, the way Infiniband deals with 293c0dd49bdSEiji Ota * RDMA to a bad MR key is by moving the entire 294c0dd49bdSEiji Ota * queue pair to error state. We cold possibly 295c0dd49bdSEiji Ota * recover from that, but right now we drop the 296c0dd49bdSEiji Ota * connection. 297c0dd49bdSEiji Ota * Therefore, we never retransmit messages with 298c0dd49bdSEiji Ota * RDMA ops. 299c0dd49bdSEiji Ota */ 300c0dd49bdSEiji Ota if (rm->m_rdma_op && 301c0dd49bdSEiji Ota test_bit(RDSV3_MSG_RETRANSMITTED, &rm->m_flags)) { 302c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 303c0dd49bdSEiji Ota if (test_and_clear_bit(RDSV3_MSG_ON_CONN, 304c0dd49bdSEiji Ota &rm->m_flags)) 305c0dd49bdSEiji Ota list_remove_node(&rm->m_conn_item); 306c0dd49bdSEiji Ota list_insert_tail(&to_be_dropped, rm); 307c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 308c0dd49bdSEiji Ota rdsv3_message_put(rm); 309c0dd49bdSEiji Ota continue; 310c0dd49bdSEiji Ota } 311c0dd49bdSEiji Ota 312c0dd49bdSEiji Ota /* Require an ACK every once in a while */ 313c0dd49bdSEiji Ota len = ntohl(rm->m_inc.i_hdr.h_len); 314c0dd49bdSEiji Ota if (conn->c_unacked_packets == 0 || 315c0dd49bdSEiji Ota conn->c_unacked_bytes < len) { 316c0dd49bdSEiji Ota set_bit(RDSV3_MSG_ACK_REQUIRED, &rm->m_flags); 317c0dd49bdSEiji Ota 318c0dd49bdSEiji Ota conn->c_unacked_packets = 319c0dd49bdSEiji Ota rdsv3_sysctl_max_unacked_packets; 320c0dd49bdSEiji Ota conn->c_unacked_bytes = 321c0dd49bdSEiji Ota rdsv3_sysctl_max_unacked_bytes; 322c0dd49bdSEiji Ota rdsv3_stats_inc(s_send_ack_required); 323c0dd49bdSEiji Ota } else { 324c0dd49bdSEiji Ota conn->c_unacked_bytes -= len; 325c0dd49bdSEiji Ota conn->c_unacked_packets--; 326c0dd49bdSEiji Ota } 327c0dd49bdSEiji Ota 328c0dd49bdSEiji Ota conn->c_xmit_rm = rm; 329c0dd49bdSEiji Ota } 330c0dd49bdSEiji Ota 331c0dd49bdSEiji Ota /* 332c0dd49bdSEiji Ota * Try and send an rdma message. Let's see if we can 333c0dd49bdSEiji Ota * keep this simple and require that the transport either 334c0dd49bdSEiji Ota * send the whole rdma or none of it. 335c0dd49bdSEiji Ota */ 336c0dd49bdSEiji Ota if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) { 337c0dd49bdSEiji Ota ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op); 338c0dd49bdSEiji Ota if (ret) 339c0dd49bdSEiji Ota break; 340c0dd49bdSEiji Ota conn->c_xmit_rdma_sent = 1; 341c0dd49bdSEiji Ota /* 342c0dd49bdSEiji Ota * The transport owns the mapped memory for now. 343c0dd49bdSEiji Ota * You can't unmap it while it's on the send queue 344c0dd49bdSEiji Ota */ 345c0dd49bdSEiji Ota set_bit(RDSV3_MSG_MAPPED, &rm->m_flags); 346c0dd49bdSEiji Ota } 347c0dd49bdSEiji Ota 348c0dd49bdSEiji Ota if (conn->c_xmit_hdr_off < sizeof (struct rdsv3_header) || 349c0dd49bdSEiji Ota conn->c_xmit_sg < rm->m_nents) { 350c0dd49bdSEiji Ota ret = conn->c_trans->xmit(conn, rm, 351c0dd49bdSEiji Ota conn->c_xmit_hdr_off, 352c0dd49bdSEiji Ota conn->c_xmit_sg, 353c0dd49bdSEiji Ota conn->c_xmit_data_off); 354c0dd49bdSEiji Ota if (ret <= 0) 355c0dd49bdSEiji Ota break; 356c0dd49bdSEiji Ota 357c0dd49bdSEiji Ota if (conn->c_xmit_hdr_off < 358c0dd49bdSEiji Ota sizeof (struct rdsv3_header)) { 359c0dd49bdSEiji Ota tmp = min(ret, 360c0dd49bdSEiji Ota sizeof (struct rdsv3_header) - 361c0dd49bdSEiji Ota conn->c_xmit_hdr_off); 362c0dd49bdSEiji Ota conn->c_xmit_hdr_off += tmp; 363c0dd49bdSEiji Ota ret -= tmp; 364c0dd49bdSEiji Ota } 365c0dd49bdSEiji Ota 366c0dd49bdSEiji Ota sg = &rm->m_sg[conn->c_xmit_sg]; 367c0dd49bdSEiji Ota while (ret) { 368c0dd49bdSEiji Ota tmp = min(ret, rdsv3_sg_len(sg) - 369c0dd49bdSEiji Ota conn->c_xmit_data_off); 370c0dd49bdSEiji Ota conn->c_xmit_data_off += tmp; 371c0dd49bdSEiji Ota ret -= tmp; 372c0dd49bdSEiji Ota if (conn->c_xmit_data_off == rdsv3_sg_len(sg)) { 373c0dd49bdSEiji Ota conn->c_xmit_data_off = 0; 374c0dd49bdSEiji Ota sg++; 375c0dd49bdSEiji Ota conn->c_xmit_sg++; 376c0dd49bdSEiji Ota ASSERT(!(ret != 0 && 377c0dd49bdSEiji Ota conn->c_xmit_sg == rm->m_nents)); 378c0dd49bdSEiji Ota } 379c0dd49bdSEiji Ota } 380c0dd49bdSEiji Ota } 381c0dd49bdSEiji Ota } 382c0dd49bdSEiji Ota 383c0dd49bdSEiji Ota /* Nuke any messages we decided not to retransmit. */ 384c0dd49bdSEiji Ota if (!list_is_empty(&to_be_dropped)) 385c0dd49bdSEiji Ota rdsv3_send_remove_from_sock(&to_be_dropped, RDSV3_RDMA_DROPPED); 386c0dd49bdSEiji Ota 387c0dd49bdSEiji Ota if (conn->c_trans->xmit_complete) 388c0dd49bdSEiji Ota conn->c_trans->xmit_complete(conn); 389c0dd49bdSEiji Ota 390c0dd49bdSEiji Ota /* 391c0dd49bdSEiji Ota * We might be racing with another sender who queued a message but 392c0dd49bdSEiji Ota * backed off on noticing that we held the c_send_lock. If we check 393c0dd49bdSEiji Ota * for queued messages after dropping the sem then either we'll 394c0dd49bdSEiji Ota * see the queued message or the queuer will get the sem. If we 395c0dd49bdSEiji Ota * notice the queued message then we trigger an immediate retry. 396c0dd49bdSEiji Ota * 397c0dd49bdSEiji Ota * We need to be careful only to do this when we stopped processing 398c0dd49bdSEiji Ota * the send queue because it was empty. It's the only way we 399c0dd49bdSEiji Ota * stop processing the loop when the transport hasn't taken 400c0dd49bdSEiji Ota * responsibility for forward progress. 401c0dd49bdSEiji Ota */ 402c0dd49bdSEiji Ota mutex_exit(&conn->c_send_lock); 403c0dd49bdSEiji Ota 404c0dd49bdSEiji Ota if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) { 405c0dd49bdSEiji Ota /* 406c0dd49bdSEiji Ota * We exhausted the send quota, but there's work left to 407c0dd49bdSEiji Ota * do. Return and (re-)schedule the send worker. 408c0dd49bdSEiji Ota */ 409c0dd49bdSEiji Ota ret = -EAGAIN; 410c0dd49bdSEiji Ota } 411c0dd49bdSEiji Ota 412*5d5562f5SEiji Ota atomic_dec_32(&conn->c_senders); 413*5d5562f5SEiji Ota 414c0dd49bdSEiji Ota if (ret == 0 && was_empty) { 415c0dd49bdSEiji Ota /* 416c0dd49bdSEiji Ota * A simple bit test would be way faster than taking the 417c0dd49bdSEiji Ota * spin lock 418c0dd49bdSEiji Ota */ 419c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 420c0dd49bdSEiji Ota if (!list_is_empty(&conn->c_send_queue)) { 421c0dd49bdSEiji Ota rdsv3_stats_inc(s_send_sem_queue_raced); 422c0dd49bdSEiji Ota ret = -EAGAIN; 423c0dd49bdSEiji Ota } 424c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 425c0dd49bdSEiji Ota } 426c0dd49bdSEiji Ota 427c0dd49bdSEiji Ota out: 428c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_xmit", "Return(conn: %p, ret: %d)", 429c0dd49bdSEiji Ota conn, ret); 430c0dd49bdSEiji Ota return (ret); 431c0dd49bdSEiji Ota } 432c0dd49bdSEiji Ota 433c0dd49bdSEiji Ota static void 434c0dd49bdSEiji Ota rdsv3_send_sndbuf_remove(struct rdsv3_sock *rs, struct rdsv3_message *rm) 435c0dd49bdSEiji Ota { 436c0dd49bdSEiji Ota uint32_t len = ntohl(rm->m_inc.i_hdr.h_len); 437c0dd49bdSEiji Ota 438c0dd49bdSEiji Ota ASSERT(mutex_owned(&rs->rs_lock)); 439c0dd49bdSEiji Ota 440c0dd49bdSEiji Ota ASSERT(rs->rs_snd_bytes >= len); 441c0dd49bdSEiji Ota rs->rs_snd_bytes -= len; 442c0dd49bdSEiji Ota 443c0dd49bdSEiji Ota if (rs->rs_snd_bytes == 0) 444c0dd49bdSEiji Ota rdsv3_stats_inc(s_send_queue_empty); 445c0dd49bdSEiji Ota } 446c0dd49bdSEiji Ota 447c0dd49bdSEiji Ota static inline int 448c0dd49bdSEiji Ota rdsv3_send_is_acked(struct rdsv3_message *rm, uint64_t ack, 449c0dd49bdSEiji Ota is_acked_func is_acked) 450c0dd49bdSEiji Ota { 451c0dd49bdSEiji Ota if (is_acked) 452c0dd49bdSEiji Ota return (is_acked(rm, ack)); 453c0dd49bdSEiji Ota return (ntohll(rm->m_inc.i_hdr.h_sequence) <= ack); 454c0dd49bdSEiji Ota } 455c0dd49bdSEiji Ota 456c0dd49bdSEiji Ota /* 457c0dd49bdSEiji Ota * Returns true if there are no messages on the send and retransmit queues 458c0dd49bdSEiji Ota * which have a sequence number greater than or equal to the given sequence 459c0dd49bdSEiji Ota * number. 460c0dd49bdSEiji Ota */ 461c0dd49bdSEiji Ota int 462c0dd49bdSEiji Ota rdsv3_send_acked_before(struct rdsv3_connection *conn, uint64_t seq) 463c0dd49bdSEiji Ota { 464c0dd49bdSEiji Ota struct rdsv3_message *rm; 465c0dd49bdSEiji Ota int ret = 1; 466c0dd49bdSEiji Ota 467c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_acked_before", "Enter(conn: %p)", conn); 468c0dd49bdSEiji Ota 469c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 470c0dd49bdSEiji Ota 471c0dd49bdSEiji Ota /* XXX - original code spits out warning */ 472c0dd49bdSEiji Ota rm = list_head(&conn->c_retrans); 473c0dd49bdSEiji Ota if (ntohll(rm->m_inc.i_hdr.h_sequence) < seq) 474c0dd49bdSEiji Ota ret = 0; 475c0dd49bdSEiji Ota 476c0dd49bdSEiji Ota /* XXX - original code spits out warning */ 477c0dd49bdSEiji Ota rm = list_head(&conn->c_send_queue); 478c0dd49bdSEiji Ota if (ntohll(rm->m_inc.i_hdr.h_sequence) < seq) 479c0dd49bdSEiji Ota ret = 0; 480c0dd49bdSEiji Ota 481c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 482c0dd49bdSEiji Ota 483c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_acked_before", "Return(conn: %p)", conn); 484c0dd49bdSEiji Ota 485c0dd49bdSEiji Ota return (ret); 486c0dd49bdSEiji Ota } 487c0dd49bdSEiji Ota 488c0dd49bdSEiji Ota /* 489c0dd49bdSEiji Ota * This is pretty similar to what happens below in the ACK 490c0dd49bdSEiji Ota * handling code - except that we call here as soon as we get 491c0dd49bdSEiji Ota * the IB send completion on the RDMA op and the accompanying 492c0dd49bdSEiji Ota * message. 493c0dd49bdSEiji Ota */ 494c0dd49bdSEiji Ota void 495c0dd49bdSEiji Ota rdsv3_rdma_send_complete(struct rdsv3_message *rm, int status) 496c0dd49bdSEiji Ota { 497c0dd49bdSEiji Ota struct rdsv3_sock *rs = NULL; 498c0dd49bdSEiji Ota struct rdsv3_rdma_op *ro; 499c0dd49bdSEiji Ota struct rdsv3_notifier *notifier; 500c0dd49bdSEiji Ota 501c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_rdma_send_complete", "Enter(rm: %p)", rm); 502c0dd49bdSEiji Ota 503c0dd49bdSEiji Ota mutex_enter(&rm->m_rs_lock); 504c0dd49bdSEiji Ota 505c0dd49bdSEiji Ota ro = rm->m_rdma_op; 506c0dd49bdSEiji Ota if (test_bit(RDSV3_MSG_ON_SOCK, &rm->m_flags) && 507cadbfdc3SEiji Ota ro && ro->r_notify && ro->r_notifier) { 508cadbfdc3SEiji Ota notifier = ro->r_notifier; 509c0dd49bdSEiji Ota rs = rm->m_rs; 510c0dd49bdSEiji Ota rdsv3_sk_sock_hold(rdsv3_rs_to_sk(rs)); 511c0dd49bdSEiji Ota 512c0dd49bdSEiji Ota notifier->n_status = status; 513c0dd49bdSEiji Ota mutex_enter(&rs->rs_lock); 514c0dd49bdSEiji Ota list_insert_tail(&rs->rs_notify_queue, notifier); 515c0dd49bdSEiji Ota mutex_exit(&rs->rs_lock); 516cadbfdc3SEiji Ota ro->r_notifier = NULL; 517c0dd49bdSEiji Ota } 518c0dd49bdSEiji Ota 519c0dd49bdSEiji Ota mutex_exit(&rm->m_rs_lock); 520c0dd49bdSEiji Ota 521c0dd49bdSEiji Ota if (rs) { 522c0dd49bdSEiji Ota rdsv3_wake_sk_sleep(rs); 523c0dd49bdSEiji Ota rdsv3_sk_sock_put(rdsv3_rs_to_sk(rs)); 524c0dd49bdSEiji Ota } 525c0dd49bdSEiji Ota 526c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_rdma_send_complete", "Return(rm: %p)", rm); 527c0dd49bdSEiji Ota } 528c0dd49bdSEiji Ota 529c0dd49bdSEiji Ota /* 530c0dd49bdSEiji Ota * This is the same as rdsv3_rdma_send_complete except we 531c0dd49bdSEiji Ota * don't do any locking - we have all the ingredients (message, 532c0dd49bdSEiji Ota * socket, socket lock) and can just move the notifier. 533c0dd49bdSEiji Ota */ 534c0dd49bdSEiji Ota static inline void 535c0dd49bdSEiji Ota __rdsv3_rdma_send_complete(struct rdsv3_sock *rs, struct rdsv3_message *rm, 536c0dd49bdSEiji Ota int status) 537c0dd49bdSEiji Ota { 538c0dd49bdSEiji Ota struct rdsv3_rdma_op *ro; 539c0dd49bdSEiji Ota void *ic; 540c0dd49bdSEiji Ota 541c0dd49bdSEiji Ota RDSV3_DPRINTF4("__rdsv3_rdma_send_complete", 542c0dd49bdSEiji Ota "Enter(rs: %p, rm: %p)", rs, rm); 543c0dd49bdSEiji Ota 544c0dd49bdSEiji Ota ro = rm->m_rdma_op; 545c0dd49bdSEiji Ota if (ro && ro->r_notify && ro->r_notifier) { 546c0dd49bdSEiji Ota ro->r_notifier->n_status = status; 547c0dd49bdSEiji Ota list_insert_tail(&rs->rs_notify_queue, ro->r_notifier); 548c0dd49bdSEiji Ota ro->r_notifier = NULL; 549c0dd49bdSEiji Ota } 550c0dd49bdSEiji Ota 551c0dd49bdSEiji Ota /* No need to wake the app - caller does this */ 552c0dd49bdSEiji Ota } 553c0dd49bdSEiji Ota 554c0dd49bdSEiji Ota /* 555c0dd49bdSEiji Ota * This is called from the IB send completion when we detect 556c0dd49bdSEiji Ota * a RDMA operation that failed with remote access error. 557c0dd49bdSEiji Ota * So speed is not an issue here. 558c0dd49bdSEiji Ota */ 559c0dd49bdSEiji Ota struct rdsv3_message * 560c0dd49bdSEiji Ota rdsv3_send_get_message(struct rdsv3_connection *conn, 561c0dd49bdSEiji Ota struct rdsv3_rdma_op *op) 562c0dd49bdSEiji Ota { 563c0dd49bdSEiji Ota struct rdsv3_message *rm, *tmp, *found = NULL; 564c0dd49bdSEiji Ota 565c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_get_message", "Enter(conn: %p)", conn); 566c0dd49bdSEiji Ota 567c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 568c0dd49bdSEiji Ota 569c0dd49bdSEiji Ota RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_retrans, m_conn_item) { 570c0dd49bdSEiji Ota if (rm->m_rdma_op == op) { 571c0dd49bdSEiji Ota atomic_add_32(&rm->m_refcount, 1); 572c0dd49bdSEiji Ota found = rm; 573c0dd49bdSEiji Ota goto out; 574c0dd49bdSEiji Ota } 575c0dd49bdSEiji Ota } 576c0dd49bdSEiji Ota 577c0dd49bdSEiji Ota RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_send_queue, 578c0dd49bdSEiji Ota m_conn_item) { 579c0dd49bdSEiji Ota if (rm->m_rdma_op == op) { 580c0dd49bdSEiji Ota atomic_add_32(&rm->m_refcount, 1); 581c0dd49bdSEiji Ota found = rm; 582c0dd49bdSEiji Ota break; 583c0dd49bdSEiji Ota } 584c0dd49bdSEiji Ota } 585c0dd49bdSEiji Ota 586c0dd49bdSEiji Ota out: 587c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 588c0dd49bdSEiji Ota 589c0dd49bdSEiji Ota return (found); 590c0dd49bdSEiji Ota } 591c0dd49bdSEiji Ota 592c0dd49bdSEiji Ota /* 593c0dd49bdSEiji Ota * This removes messages from the socket's list if they're on it. The list 594c0dd49bdSEiji Ota * argument must be private to the caller, we must be able to modify it 595c0dd49bdSEiji Ota * without locks. The messages must have a reference held for their 596c0dd49bdSEiji Ota * position on the list. This function will drop that reference after 597c0dd49bdSEiji Ota * removing the messages from the 'messages' list regardless of if it found 598c0dd49bdSEiji Ota * the messages on the socket list or not. 599c0dd49bdSEiji Ota */ 600c0dd49bdSEiji Ota void 601c0dd49bdSEiji Ota rdsv3_send_remove_from_sock(struct list *messages, int status) 602c0dd49bdSEiji Ota { 603c0dd49bdSEiji Ota struct rdsv3_sock *rs = NULL; 604c0dd49bdSEiji Ota struct rdsv3_message *rm; 605c0dd49bdSEiji Ota 606c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_remove_from_sock", "Enter"); 607c0dd49bdSEiji Ota 608c0dd49bdSEiji Ota while (!list_is_empty(messages)) { 609cadbfdc3SEiji Ota int was_on_sock = 0; 610c0dd49bdSEiji Ota rm = list_remove_head(messages); 611c0dd49bdSEiji Ota 612c0dd49bdSEiji Ota /* 613c0dd49bdSEiji Ota * If we see this flag cleared then we're *sure* that someone 614c0dd49bdSEiji Ota * else beat us to removing it from the sock. If we race 615c0dd49bdSEiji Ota * with their flag update we'll get the lock and then really 616c0dd49bdSEiji Ota * see that the flag has been cleared. 617c0dd49bdSEiji Ota * 618c0dd49bdSEiji Ota * The message spinlock makes sure nobody clears rm->m_rs 619c0dd49bdSEiji Ota * while we're messing with it. It does not prevent the 620c0dd49bdSEiji Ota * message from being removed from the socket, though. 621c0dd49bdSEiji Ota */ 622c0dd49bdSEiji Ota mutex_enter(&rm->m_rs_lock); 623c0dd49bdSEiji Ota if (!test_bit(RDSV3_MSG_ON_SOCK, &rm->m_flags)) 624c0dd49bdSEiji Ota goto unlock_and_drop; 625c0dd49bdSEiji Ota 626c0dd49bdSEiji Ota if (rs != rm->m_rs) { 627c0dd49bdSEiji Ota if (rs) { 628c0dd49bdSEiji Ota rdsv3_wake_sk_sleep(rs); 629c0dd49bdSEiji Ota rdsv3_sk_sock_put(rdsv3_rs_to_sk(rs)); 630c0dd49bdSEiji Ota } 631c0dd49bdSEiji Ota rs = rm->m_rs; 632c0dd49bdSEiji Ota rdsv3_sk_sock_hold(rdsv3_rs_to_sk(rs)); 633c0dd49bdSEiji Ota } 634c0dd49bdSEiji Ota 635c0dd49bdSEiji Ota mutex_enter(&rs->rs_lock); 636c0dd49bdSEiji Ota if (test_and_clear_bit(RDSV3_MSG_ON_SOCK, &rm->m_flags)) { 637c0dd49bdSEiji Ota struct rdsv3_rdma_op *ro = rm->m_rdma_op; 638c0dd49bdSEiji Ota struct rdsv3_notifier *notifier; 639c0dd49bdSEiji Ota 640c0dd49bdSEiji Ota list_remove_node(&rm->m_sock_item); 641c0dd49bdSEiji Ota rdsv3_send_sndbuf_remove(rs, rm); 642cadbfdc3SEiji Ota if (ro && ro->r_notifier && 643c0dd49bdSEiji Ota (status || ro->r_notify)) { 644cadbfdc3SEiji Ota notifier = ro->r_notifier; 645c0dd49bdSEiji Ota list_insert_tail(&rs->rs_notify_queue, 646c0dd49bdSEiji Ota notifier); 647c0dd49bdSEiji Ota if (!notifier->n_status) 648c0dd49bdSEiji Ota notifier->n_status = status; 649c0dd49bdSEiji Ota rm->m_rdma_op->r_notifier = NULL; 650c0dd49bdSEiji Ota } 651cadbfdc3SEiji Ota was_on_sock = 1; 652c0dd49bdSEiji Ota rm->m_rs = NULL; 653c0dd49bdSEiji Ota } 654c0dd49bdSEiji Ota mutex_exit(&rs->rs_lock); 655c0dd49bdSEiji Ota 656c0dd49bdSEiji Ota unlock_and_drop: 657c0dd49bdSEiji Ota mutex_exit(&rm->m_rs_lock); 658c0dd49bdSEiji Ota rdsv3_message_put(rm); 659cadbfdc3SEiji Ota if (was_on_sock) 660cadbfdc3SEiji Ota rdsv3_message_put(rm); 661c0dd49bdSEiji Ota } 662c0dd49bdSEiji Ota 663c0dd49bdSEiji Ota if (rs) { 664c0dd49bdSEiji Ota rdsv3_wake_sk_sleep(rs); 665c0dd49bdSEiji Ota rdsv3_sk_sock_put(rdsv3_rs_to_sk(rs)); 666c0dd49bdSEiji Ota } 667c0dd49bdSEiji Ota 668c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_remove_from_sock", "Return"); 669c0dd49bdSEiji Ota } 670c0dd49bdSEiji Ota 671c0dd49bdSEiji Ota /* 672c0dd49bdSEiji Ota * Transports call here when they've determined that the receiver queued 673c0dd49bdSEiji Ota * messages up to, and including, the given sequence number. Messages are 674c0dd49bdSEiji Ota * moved to the retrans queue when rdsv3_send_xmit picks them off the send 675c0dd49bdSEiji Ota * queue. This means that in the TCP case, the message may not have been 676c0dd49bdSEiji Ota * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked 677c0dd49bdSEiji Ota * checks the RDSV3_MSG_HAS_ACK_SEQ bit. 678c0dd49bdSEiji Ota * 679c0dd49bdSEiji Ota * XXX It's not clear to me how this is safely serialized with socket 680c0dd49bdSEiji Ota * destruction. Maybe it should bail if it sees SOCK_DEAD. 681c0dd49bdSEiji Ota */ 682c0dd49bdSEiji Ota void 683c0dd49bdSEiji Ota rdsv3_send_drop_acked(struct rdsv3_connection *conn, uint64_t ack, 684c0dd49bdSEiji Ota is_acked_func is_acked) 685c0dd49bdSEiji Ota { 686c0dd49bdSEiji Ota struct rdsv3_message *rm, *tmp; 687c0dd49bdSEiji Ota list_t list; 688c0dd49bdSEiji Ota 689c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_drop_acked", "Enter(conn: %p)", conn); 690c0dd49bdSEiji Ota 691c0dd49bdSEiji Ota list_create(&list, sizeof (struct rdsv3_message), 692c0dd49bdSEiji Ota offsetof(struct rdsv3_message, m_conn_item)); 693c0dd49bdSEiji Ota 694c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 695c0dd49bdSEiji Ota 696c0dd49bdSEiji Ota RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_retrans, m_conn_item) { 697c0dd49bdSEiji Ota if (!rdsv3_send_is_acked(rm, ack, is_acked)) 698c0dd49bdSEiji Ota break; 699c0dd49bdSEiji Ota 700c0dd49bdSEiji Ota list_remove_node(&rm->m_conn_item); 701c0dd49bdSEiji Ota list_insert_tail(&list, rm); 702c0dd49bdSEiji Ota clear_bit(RDSV3_MSG_ON_CONN, &rm->m_flags); 703c0dd49bdSEiji Ota } 704c0dd49bdSEiji Ota 705c0dd49bdSEiji Ota #if 0 706c0dd49bdSEiji Ota XXX 707c0dd49bdSEiji Ota /* order flag updates with spin locks */ 708c0dd49bdSEiji Ota if (!list_is_empty(&list)) 709c0dd49bdSEiji Ota smp_mb__after_clear_bit(); 710c0dd49bdSEiji Ota #endif 711c0dd49bdSEiji Ota 712c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 713c0dd49bdSEiji Ota 714c0dd49bdSEiji Ota /* now remove the messages from the sock list as needed */ 715c0dd49bdSEiji Ota rdsv3_send_remove_from_sock(&list, RDSV3_RDMA_SUCCESS); 716c0dd49bdSEiji Ota 717c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_drop_acked", "Return(conn: %p)", conn); 718c0dd49bdSEiji Ota } 719c0dd49bdSEiji Ota 720c0dd49bdSEiji Ota void 721c0dd49bdSEiji Ota rdsv3_send_drop_to(struct rdsv3_sock *rs, struct sockaddr_in *dest) 722c0dd49bdSEiji Ota { 723c0dd49bdSEiji Ota struct rdsv3_message *rm, *tmp; 724c0dd49bdSEiji Ota struct rdsv3_connection *conn; 725c0dd49bdSEiji Ota list_t list; 726c0dd49bdSEiji Ota int wake = 0; 727c0dd49bdSEiji Ota 728c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_drop_to", "Enter(rs: %p)", rs); 729c0dd49bdSEiji Ota 730c0dd49bdSEiji Ota list_create(&list, sizeof (struct rdsv3_message), 731c0dd49bdSEiji Ota offsetof(struct rdsv3_message, m_sock_item)); 732c0dd49bdSEiji Ota 733c0dd49bdSEiji Ota /* get all the messages we're dropping under the rs lock */ 734c0dd49bdSEiji Ota mutex_enter(&rs->rs_lock); 735c0dd49bdSEiji Ota 736c0dd49bdSEiji Ota RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &rs->rs_send_queue, 737c0dd49bdSEiji Ota m_sock_item) { 738c0dd49bdSEiji Ota if (dest && (dest->sin_addr.s_addr != rm->m_daddr || 739c0dd49bdSEiji Ota dest->sin_port != rm->m_inc.i_hdr.h_dport)) 740c0dd49bdSEiji Ota continue; 741c0dd49bdSEiji Ota wake = 1; 742c0dd49bdSEiji Ota list_remove(&rs->rs_send_queue, rm); 743c0dd49bdSEiji Ota list_insert_tail(&list, rm); 744c0dd49bdSEiji Ota rdsv3_send_sndbuf_remove(rs, rm); 745c0dd49bdSEiji Ota clear_bit(RDSV3_MSG_ON_SOCK, &rm->m_flags); 746c0dd49bdSEiji Ota } 747c0dd49bdSEiji Ota 748c0dd49bdSEiji Ota mutex_exit(&rs->rs_lock); 749c0dd49bdSEiji Ota 750c0dd49bdSEiji Ota conn = NULL; 751c0dd49bdSEiji Ota 752c0dd49bdSEiji Ota /* now remove the messages from the conn list as needed */ 753c0dd49bdSEiji Ota RDSV3_FOR_EACH_LIST_NODE(rm, &list, m_sock_item) { 754c0dd49bdSEiji Ota /* 755c0dd49bdSEiji Ota * We do this here rather than in the loop above, so that 756c0dd49bdSEiji Ota * we don't have to nest m_rs_lock under rs->rs_lock 757c0dd49bdSEiji Ota */ 758c0dd49bdSEiji Ota mutex_enter(&rm->m_rs_lock); 759c0dd49bdSEiji Ota /* If this is a RDMA operation, notify the app. */ 760c0dd49bdSEiji Ota __rdsv3_rdma_send_complete(rs, rm, RDSV3_RDMA_CANCELED); 761c0dd49bdSEiji Ota rm->m_rs = NULL; 762c0dd49bdSEiji Ota mutex_exit(&rm->m_rs_lock); 763c0dd49bdSEiji Ota 764c0dd49bdSEiji Ota /* 765c0dd49bdSEiji Ota * If we see this flag cleared then we're *sure* that someone 766c0dd49bdSEiji Ota * else beat us to removing it from the conn. If we race 767c0dd49bdSEiji Ota * with their flag update we'll get the lock and then really 768c0dd49bdSEiji Ota * see that the flag has been cleared. 769c0dd49bdSEiji Ota */ 770c0dd49bdSEiji Ota if (!test_bit(RDSV3_MSG_ON_CONN, &rm->m_flags)) 771c0dd49bdSEiji Ota continue; 772c0dd49bdSEiji Ota 773c0dd49bdSEiji Ota if (conn != rm->m_inc.i_conn) { 774c0dd49bdSEiji Ota if (conn) 775c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 776c0dd49bdSEiji Ota conn = rm->m_inc.i_conn; 777c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 778c0dd49bdSEiji Ota } 779c0dd49bdSEiji Ota 780c0dd49bdSEiji Ota if (test_and_clear_bit(RDSV3_MSG_ON_CONN, &rm->m_flags)) { 781c0dd49bdSEiji Ota list_remove_node(&rm->m_conn_item); 782c0dd49bdSEiji Ota rdsv3_message_put(rm); 783c0dd49bdSEiji Ota } 784c0dd49bdSEiji Ota } 785c0dd49bdSEiji Ota 786c0dd49bdSEiji Ota if (conn) 787c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 788c0dd49bdSEiji Ota 789c0dd49bdSEiji Ota if (wake) 790c0dd49bdSEiji Ota rdsv3_wake_sk_sleep(rs); 791c0dd49bdSEiji Ota 792c0dd49bdSEiji Ota while (!list_is_empty(&list)) { 793c0dd49bdSEiji Ota rm = list_remove_head(&list); 794c0dd49bdSEiji Ota 795c0dd49bdSEiji Ota rdsv3_message_wait(rm); 796c0dd49bdSEiji Ota rdsv3_message_put(rm); 797c0dd49bdSEiji Ota } 798c0dd49bdSEiji Ota 799c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_drop_to", "Return(rs: %p)", rs); 800c0dd49bdSEiji Ota } 801c0dd49bdSEiji Ota 802c0dd49bdSEiji Ota /* 803c0dd49bdSEiji Ota * we only want this to fire once so we use the callers 'queued'. It's 804c0dd49bdSEiji Ota * possible that another thread can race with us and remove the 805c0dd49bdSEiji Ota * message from the flow with RDSV3_CANCEL_SENT_TO. 806c0dd49bdSEiji Ota */ 807c0dd49bdSEiji Ota static int 808c0dd49bdSEiji Ota rdsv3_send_queue_rm(struct rdsv3_sock *rs, struct rdsv3_connection *conn, 809c0dd49bdSEiji Ota struct rdsv3_message *rm, uint16_be_t sport, 810c0dd49bdSEiji Ota uint16_be_t dport, int *queued) 811c0dd49bdSEiji Ota { 812c0dd49bdSEiji Ota uint32_t len; 813c0dd49bdSEiji Ota 814c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_queue_rm", "Enter(rs: %p, rm: %p)", rs, rm); 815c0dd49bdSEiji Ota 816c0dd49bdSEiji Ota if (*queued) 817c0dd49bdSEiji Ota goto out; 818c0dd49bdSEiji Ota 819c0dd49bdSEiji Ota len = ntohl(rm->m_inc.i_hdr.h_len); 820c0dd49bdSEiji Ota 821c0dd49bdSEiji Ota /* 822c0dd49bdSEiji Ota * this is the only place which holds both the socket's rs_lock 823c0dd49bdSEiji Ota * and the connection's c_lock 824c0dd49bdSEiji Ota */ 825c0dd49bdSEiji Ota mutex_enter(&rs->rs_lock); 826c0dd49bdSEiji Ota 827c0dd49bdSEiji Ota /* 828c0dd49bdSEiji Ota * If there is a little space in sndbuf, we don't queue anything, 829c0dd49bdSEiji Ota * and userspace gets -EAGAIN. But poll() indicates there's send 830c0dd49bdSEiji Ota * room. This can lead to bad behavior (spinning) if snd_bytes isn't 831c0dd49bdSEiji Ota * freed up by incoming acks. So we check the *old* value of 832c0dd49bdSEiji Ota * rs_snd_bytes here to allow the last msg to exceed the buffer, 833c0dd49bdSEiji Ota * and poll() now knows no more data can be sent. 834c0dd49bdSEiji Ota */ 835c0dd49bdSEiji Ota if (rs->rs_snd_bytes < rdsv3_sk_sndbuf(rs)) { 836c0dd49bdSEiji Ota rs->rs_snd_bytes += len; 837c0dd49bdSEiji Ota 838c0dd49bdSEiji Ota /* 839c0dd49bdSEiji Ota * let recv side know we are close to send space exhaustion. 840c0dd49bdSEiji Ota * This is probably not the optimal way to do it, as this 841c0dd49bdSEiji Ota * means we set the flag on *all* messages as soon as our 842c0dd49bdSEiji Ota * throughput hits a certain threshold. 843c0dd49bdSEiji Ota */ 844c0dd49bdSEiji Ota if (rs->rs_snd_bytes >= rdsv3_sk_sndbuf(rs) / 2) 845c0dd49bdSEiji Ota set_bit(RDSV3_MSG_ACK_REQUIRED, &rm->m_flags); 846c0dd49bdSEiji Ota 847c0dd49bdSEiji Ota list_insert_tail(&rs->rs_send_queue, rm); 848c0dd49bdSEiji Ota set_bit(RDSV3_MSG_ON_SOCK, &rm->m_flags); 849c0dd49bdSEiji Ota 850c0dd49bdSEiji Ota rdsv3_message_addref(rm); 851c0dd49bdSEiji Ota rm->m_rs = rs; 852c0dd49bdSEiji Ota 853c0dd49bdSEiji Ota /* 854c0dd49bdSEiji Ota * The code ordering is a little weird, but we're 855c0dd49bdSEiji Ota * trying to minimize the time we hold c_lock 856c0dd49bdSEiji Ota */ 857c0dd49bdSEiji Ota rdsv3_message_populate_header(&rm->m_inc.i_hdr, sport, 858c0dd49bdSEiji Ota dport, 0); 859c0dd49bdSEiji Ota rm->m_inc.i_conn = conn; 860c0dd49bdSEiji Ota rdsv3_message_addref(rm); /* XXX - called twice */ 861c0dd49bdSEiji Ota 862c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 863c0dd49bdSEiji Ota rm->m_inc.i_hdr.h_sequence = htonll(conn->c_next_tx_seq++); 864c0dd49bdSEiji Ota list_insert_tail(&conn->c_send_queue, rm); 865c0dd49bdSEiji Ota set_bit(RDSV3_MSG_ON_CONN, &rm->m_flags); 866c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 867c0dd49bdSEiji Ota 868c0dd49bdSEiji Ota RDSV3_DPRINTF5("rdsv3_send_queue_rm", 869c0dd49bdSEiji Ota "queued msg %p len %d, rs %p bytes %d seq %llu", 870c0dd49bdSEiji Ota rm, len, rs, rs->rs_snd_bytes, 871c0dd49bdSEiji Ota (unsigned long long)ntohll( 872c0dd49bdSEiji Ota rm->m_inc.i_hdr.h_sequence)); 873c0dd49bdSEiji Ota 874c0dd49bdSEiji Ota *queued = 1; 875c0dd49bdSEiji Ota } 876c0dd49bdSEiji Ota 877c0dd49bdSEiji Ota mutex_exit(&rs->rs_lock); 878c0dd49bdSEiji Ota 879c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_queue_rm", "Return(rs: %p)", rs); 880c0dd49bdSEiji Ota out: 881c0dd49bdSEiji Ota return (*queued); 882c0dd49bdSEiji Ota } 883c0dd49bdSEiji Ota 884c0dd49bdSEiji Ota static int 885c0dd49bdSEiji Ota rdsv3_cmsg_send(struct rdsv3_sock *rs, struct rdsv3_message *rm, 886c0dd49bdSEiji Ota struct msghdr *msg, int *allocated_mr) 887c0dd49bdSEiji Ota { 888c0dd49bdSEiji Ota struct cmsghdr *cmsg; 889c0dd49bdSEiji Ota int ret = 0; 890c0dd49bdSEiji Ota 891c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_cmsg_send", "Enter(rs: %p)", rs); 892c0dd49bdSEiji Ota 893c0dd49bdSEiji Ota for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 894c0dd49bdSEiji Ota 895c0dd49bdSEiji Ota if (cmsg->cmsg_level != SOL_RDS) 896c0dd49bdSEiji Ota continue; 897c0dd49bdSEiji Ota 898c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_cmsg_send", "cmsg(%p, %p) type %d", 899c0dd49bdSEiji Ota cmsg, rm, cmsg->cmsg_type); 900c0dd49bdSEiji Ota /* 901c0dd49bdSEiji Ota * As a side effect, RDMA_DEST and RDMA_MAP will set 902c0dd49bdSEiji Ota * rm->m_rdma_cookie and rm->m_rdma_mr. 903c0dd49bdSEiji Ota */ 904c0dd49bdSEiji Ota switch (cmsg->cmsg_type) { 905c0dd49bdSEiji Ota case RDSV3_CMSG_RDMA_ARGS: 906c0dd49bdSEiji Ota ret = rdsv3_cmsg_rdma_args(rs, rm, cmsg); 907c0dd49bdSEiji Ota break; 908c0dd49bdSEiji Ota 909c0dd49bdSEiji Ota case RDSV3_CMSG_RDMA_DEST: 910c0dd49bdSEiji Ota ret = rdsv3_cmsg_rdma_dest(rs, rm, cmsg); 911c0dd49bdSEiji Ota break; 912c0dd49bdSEiji Ota 913c0dd49bdSEiji Ota case RDSV3_CMSG_RDMA_MAP: 914c0dd49bdSEiji Ota ret = rdsv3_cmsg_rdma_map(rs, rm, cmsg); 915c0dd49bdSEiji Ota if (ret) 916c0dd49bdSEiji Ota *allocated_mr = 1; 917c0dd49bdSEiji Ota break; 918c0dd49bdSEiji Ota 919c0dd49bdSEiji Ota default: 920c0dd49bdSEiji Ota return (-EINVAL); 921c0dd49bdSEiji Ota } 922c0dd49bdSEiji Ota 923c0dd49bdSEiji Ota if (ret) 924c0dd49bdSEiji Ota break; 925c0dd49bdSEiji Ota } 926c0dd49bdSEiji Ota 927c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_cmsg_send", "Return(rs: %p)", rs); 928c0dd49bdSEiji Ota 929c0dd49bdSEiji Ota return (ret); 930c0dd49bdSEiji Ota } 931c0dd49bdSEiji Ota 932c0dd49bdSEiji Ota int 933c0dd49bdSEiji Ota rdsv3_sendmsg(struct rdsv3_sock *rs, uio_t *uio, struct nmsghdr *msg, 934c0dd49bdSEiji Ota size_t payload_len) 935c0dd49bdSEiji Ota { 936c0dd49bdSEiji Ota struct rsock *sk = rdsv3_rs_to_sk(rs); 937c0dd49bdSEiji Ota struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; 938c0dd49bdSEiji Ota uint32_be_t daddr; 939c0dd49bdSEiji Ota uint16_be_t dport; 940c0dd49bdSEiji Ota struct rdsv3_message *rm = NULL; 941c0dd49bdSEiji Ota struct rdsv3_connection *conn; 942c0dd49bdSEiji Ota int ret = 0; 943c0dd49bdSEiji Ota int queued = 0, allocated_mr = 0; 944c0dd49bdSEiji Ota int nonblock = msg->msg_flags & MSG_DONTWAIT; 945cadbfdc3SEiji Ota long timeo = rdsv3_sndtimeo(sk, nonblock); 946c0dd49bdSEiji Ota 947c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_sendmsg", "Enter(rs: %p)", rs); 948c0dd49bdSEiji Ota 949c0dd49bdSEiji Ota if (msg->msg_namelen) { 950c0dd49bdSEiji Ota /* XXX fail non-unicast destination IPs? */ 951c0dd49bdSEiji Ota if (msg->msg_namelen < sizeof (*usin) || 952c0dd49bdSEiji Ota usin->sin_family != AF_INET_OFFLOAD) { 953c0dd49bdSEiji Ota ret = -EINVAL; 954c0dd49bdSEiji Ota RDSV3_DPRINTF2("rdsv3_sendmsg", "returning: %d", -ret); 955c0dd49bdSEiji Ota goto out; 956c0dd49bdSEiji Ota } 957c0dd49bdSEiji Ota daddr = usin->sin_addr.s_addr; 958c0dd49bdSEiji Ota dport = usin->sin_port; 959c0dd49bdSEiji Ota } else { 960c0dd49bdSEiji Ota /* We only care about consistency with ->connect() */ 961c0dd49bdSEiji Ota mutex_enter(&sk->sk_lock); 962c0dd49bdSEiji Ota daddr = rs->rs_conn_addr; 963c0dd49bdSEiji Ota dport = rs->rs_conn_port; 964c0dd49bdSEiji Ota mutex_exit(&sk->sk_lock); 965c0dd49bdSEiji Ota } 966c0dd49bdSEiji Ota 967c0dd49bdSEiji Ota /* racing with another thread binding seems ok here */ 968c0dd49bdSEiji Ota if (daddr == 0 || rs->rs_bound_addr == 0) { 969c0dd49bdSEiji Ota ret = -ENOTCONN; /* XXX not a great errno */ 970c0dd49bdSEiji Ota RDSV3_DPRINTF2("rdsv3_sendmsg", "returning: %d", -ret); 971c0dd49bdSEiji Ota goto out; 972c0dd49bdSEiji Ota } 973c0dd49bdSEiji Ota 974c0dd49bdSEiji Ota rm = rdsv3_message_copy_from_user(uio, payload_len); 975c0dd49bdSEiji Ota if (IS_ERR(rm)) { 976c0dd49bdSEiji Ota ret = PTR_ERR(rm); 977c0dd49bdSEiji Ota RDSV3_DPRINTF2("rdsv3_sendmsg", 978c0dd49bdSEiji Ota "rdsv3_message_copy_from_user failed %d", -ret); 979c0dd49bdSEiji Ota rm = NULL; 980c0dd49bdSEiji Ota goto out; 981c0dd49bdSEiji Ota } 982c0dd49bdSEiji Ota 983c0dd49bdSEiji Ota rm->m_daddr = daddr; 984c0dd49bdSEiji Ota 985cadbfdc3SEiji Ota /* Parse any control messages the user may have included. */ 986cadbfdc3SEiji Ota ret = rdsv3_cmsg_send(rs, rm, msg, &allocated_mr); 987cadbfdc3SEiji Ota if (ret) { 988cadbfdc3SEiji Ota RDSV3_DPRINTF2("rdsv3_sendmsg", 989cadbfdc3SEiji Ota "rdsv3_cmsg_send(rs: %p rm: %p msg: %p) returned: %d", 990cadbfdc3SEiji Ota rs, rm, msg, ret); 991cadbfdc3SEiji Ota goto out; 992cadbfdc3SEiji Ota } 993cadbfdc3SEiji Ota 994c0dd49bdSEiji Ota /* 995c0dd49bdSEiji Ota * rdsv3_conn_create has a spinlock that runs with IRQ off. 996c0dd49bdSEiji Ota * Caching the conn in the socket helps a lot. 997c0dd49bdSEiji Ota */ 998c0dd49bdSEiji Ota mutex_enter(&rs->rs_conn_lock); 999c0dd49bdSEiji Ota if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) { 1000c0dd49bdSEiji Ota conn = rs->rs_conn; 1001c0dd49bdSEiji Ota } else { 1002c0dd49bdSEiji Ota conn = rdsv3_conn_create_outgoing(rs->rs_bound_addr, 1003c0dd49bdSEiji Ota daddr, rs->rs_transport, KM_NOSLEEP); 1004c0dd49bdSEiji Ota if (IS_ERR(conn)) { 1005c0dd49bdSEiji Ota mutex_exit(&rs->rs_conn_lock); 1006c0dd49bdSEiji Ota ret = PTR_ERR(conn); 1007c0dd49bdSEiji Ota RDSV3_DPRINTF2("rdsv3_sendmsg", 1008c0dd49bdSEiji Ota "rdsv3_conn_create_outgoing failed %d", 1009c0dd49bdSEiji Ota -ret); 1010c0dd49bdSEiji Ota goto out; 1011c0dd49bdSEiji Ota } 1012c0dd49bdSEiji Ota rs->rs_conn = conn; 1013c0dd49bdSEiji Ota } 1014c0dd49bdSEiji Ota mutex_exit(&rs->rs_conn_lock); 1015c0dd49bdSEiji Ota 1016c0dd49bdSEiji Ota if ((rm->m_rdma_cookie || rm->m_rdma_op) && 1017c0dd49bdSEiji Ota conn->c_trans->xmit_rdma == NULL) { 10186e18d381Sagiri RDSV3_DPRINTF2("rdsv3_sendmsg", "rdma_op %p conn xmit_rdma %p", 1019c0dd49bdSEiji Ota rm->m_rdma_op, conn->c_trans->xmit_rdma); 1020c0dd49bdSEiji Ota ret = -EOPNOTSUPP; 1021c0dd49bdSEiji Ota goto out; 1022c0dd49bdSEiji Ota } 1023c0dd49bdSEiji Ota 1024c0dd49bdSEiji Ota /* 1025c0dd49bdSEiji Ota * If the connection is down, trigger a connect. We may 1026c0dd49bdSEiji Ota * have scheduled a delayed reconnect however - in this case 1027c0dd49bdSEiji Ota * we should not interfere. 1028c0dd49bdSEiji Ota */ 1029c0dd49bdSEiji Ota if (rdsv3_conn_state(conn) == RDSV3_CONN_DOWN && 1030c0dd49bdSEiji Ota !test_and_set_bit(RDSV3_RECONNECT_PENDING, &conn->c_flags)) 1031c0dd49bdSEiji Ota rdsv3_queue_delayed_work(rdsv3_wq, &conn->c_conn_w, 0); 1032c0dd49bdSEiji Ota 1033c0dd49bdSEiji Ota ret = rdsv3_cong_wait(conn->c_fcong, dport, nonblock, rs); 1034c0dd49bdSEiji Ota if (ret) { 1035*5d5562f5SEiji Ota mutex_enter(&rs->rs_congested_lock); 1036cadbfdc3SEiji Ota rs->rs_seen_congestion = 1; 1037*5d5562f5SEiji Ota cv_signal(&rs->rs_congested_cv); 1038*5d5562f5SEiji Ota mutex_exit(&rs->rs_congested_lock); 1039cadbfdc3SEiji Ota 1040c0dd49bdSEiji Ota RDSV3_DPRINTF2("rdsv3_sendmsg", 1041c0dd49bdSEiji Ota "rdsv3_cong_wait (dport: %d) returned: %d", dport, ret); 1042c0dd49bdSEiji Ota goto out; 1043c0dd49bdSEiji Ota } 1044c0dd49bdSEiji Ota 1045c0dd49bdSEiji Ota (void) rdsv3_send_queue_rm(rs, conn, rm, rs->rs_bound_port, dport, 1046c0dd49bdSEiji Ota &queued); 1047c0dd49bdSEiji Ota if (!queued) { 1048c0dd49bdSEiji Ota /* rdsv3_stats_inc(s_send_queue_full); */ 1049c0dd49bdSEiji Ota /* XXX make sure this is reasonable */ 1050c0dd49bdSEiji Ota if (payload_len > rdsv3_sk_sndbuf(rs)) { 1051c0dd49bdSEiji Ota ret = -EMSGSIZE; 1052c0dd49bdSEiji Ota RDSV3_DPRINTF2("rdsv3_sendmsg", 1053c0dd49bdSEiji Ota "msgsize(%d) too big, returning: %d", 1054c0dd49bdSEiji Ota payload_len, -ret); 1055c0dd49bdSEiji Ota goto out; 1056c0dd49bdSEiji Ota } 1057c0dd49bdSEiji Ota if (nonblock) { 1058c0dd49bdSEiji Ota ret = -EAGAIN; 1059c0dd49bdSEiji Ota RDSV3_DPRINTF3("rdsv3_sendmsg", 1060c0dd49bdSEiji Ota "send queue full (%d), returning: %d", 1061c0dd49bdSEiji Ota payload_len, -ret); 1062c0dd49bdSEiji Ota goto out; 1063c0dd49bdSEiji Ota } 1064c0dd49bdSEiji Ota 1065c0dd49bdSEiji Ota #if 0 10666e18d381Sagiri ret = rdsv3_wait_sig(sk->sk_sleep, 10676e18d381Sagiri (rdsv3_send_queue_rm(rs, conn, rm, rs->rs_bound_port, 10686e18d381Sagiri dport, &queued))); 10696e18d381Sagiri if (ret == 0) { 1070c0dd49bdSEiji Ota /* signal/timeout pending */ 1071c0dd49bdSEiji Ota RDSV3_DPRINTF2("rdsv3_sendmsg", 10726e18d381Sagiri "woke due to signal: %d", ret); 10736e18d381Sagiri ret = -ERESTART; 1074c0dd49bdSEiji Ota goto out; 1075c0dd49bdSEiji Ota } 1076c0dd49bdSEiji Ota #else 10776e18d381Sagiri mutex_enter(&sk->sk_sleep->waitq_mutex); 10786e18d381Sagiri sk->sk_sleep->waitq_waiters++; 10796e18d381Sagiri while (!rdsv3_send_queue_rm(rs, conn, rm, rs->rs_bound_port, 10806e18d381Sagiri dport, &queued)) { 1081c0dd49bdSEiji Ota ret = cv_wait_sig(&sk->sk_sleep->waitq_cv, 1082c0dd49bdSEiji Ota &sk->sk_sleep->waitq_mutex); 1083c0dd49bdSEiji Ota if (ret == 0) { 1084c0dd49bdSEiji Ota /* signal/timeout pending */ 1085c0dd49bdSEiji Ota RDSV3_DPRINTF2("rdsv3_sendmsg", 10866e18d381Sagiri "woke due to signal: %d", ret); 1087c0dd49bdSEiji Ota ret = -ERESTART; 10886e18d381Sagiri sk->sk_sleep->waitq_waiters--; 1089c0dd49bdSEiji Ota mutex_exit(&sk->sk_sleep->waitq_mutex); 1090c0dd49bdSEiji Ota goto out; 1091c0dd49bdSEiji Ota } 1092c0dd49bdSEiji Ota } 10936e18d381Sagiri sk->sk_sleep->waitq_waiters--; 1094c0dd49bdSEiji Ota mutex_exit(&sk->sk_sleep->waitq_mutex); 10956e18d381Sagiri #endif 1096c0dd49bdSEiji Ota 1097c0dd49bdSEiji Ota RDSV3_DPRINTF5("rdsv3_sendmsg", "sendmsg woke queued %d", 1098c0dd49bdSEiji Ota queued); 1099c0dd49bdSEiji Ota 1100c0dd49bdSEiji Ota ASSERT(queued); 1101c0dd49bdSEiji Ota ret = 0; 1102c0dd49bdSEiji Ota } 1103c0dd49bdSEiji Ota 1104c0dd49bdSEiji Ota /* 1105c0dd49bdSEiji Ota * By now we've committed to the send. We reuse rdsv3_send_worker() 1106c0dd49bdSEiji Ota * to retry sends in the rds thread if the transport asks us to. 1107c0dd49bdSEiji Ota */ 1108c0dd49bdSEiji Ota rdsv3_stats_inc(s_send_queued); 1109c0dd49bdSEiji Ota 1110c0dd49bdSEiji Ota if (!test_bit(RDSV3_LL_SEND_FULL, &conn->c_flags)) 1111*5d5562f5SEiji Ota (void) rdsv3_send_xmit(conn); 1112c0dd49bdSEiji Ota 1113c0dd49bdSEiji Ota rdsv3_message_put(rm); 1114c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_sendmsg", "Return(rs: %p, len: %d)", 1115c0dd49bdSEiji Ota rs, payload_len); 1116c0dd49bdSEiji Ota return (payload_len); 1117c0dd49bdSEiji Ota 1118c0dd49bdSEiji Ota out: 1119c0dd49bdSEiji Ota /* 1120c0dd49bdSEiji Ota * If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. 1121c0dd49bdSEiji Ota * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN 1122c0dd49bdSEiji Ota * or in any other way, we need to destroy the MR again 1123c0dd49bdSEiji Ota */ 1124c0dd49bdSEiji Ota if (allocated_mr) 1125c0dd49bdSEiji Ota rdsv3_rdma_unuse(rs, rdsv3_rdma_cookie_key(rm->m_rdma_cookie), 1126c0dd49bdSEiji Ota 1); 1127c0dd49bdSEiji Ota 1128c0dd49bdSEiji Ota if (rm) 1129c0dd49bdSEiji Ota rdsv3_message_put(rm); 1130c0dd49bdSEiji Ota return (ret); 1131c0dd49bdSEiji Ota } 1132c0dd49bdSEiji Ota 1133c0dd49bdSEiji Ota /* 1134c0dd49bdSEiji Ota * Reply to a ping packet. 1135c0dd49bdSEiji Ota */ 1136c0dd49bdSEiji Ota int 1137c0dd49bdSEiji Ota rdsv3_send_pong(struct rdsv3_connection *conn, uint16_be_t dport) 1138c0dd49bdSEiji Ota { 1139c0dd49bdSEiji Ota struct rdsv3_message *rm; 1140c0dd49bdSEiji Ota int ret = 0; 1141c0dd49bdSEiji Ota 1142c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_pong", "Enter(conn: %p)", conn); 1143c0dd49bdSEiji Ota 1144c0dd49bdSEiji Ota rm = rdsv3_message_alloc(0, KM_NOSLEEP); 1145*5d5562f5SEiji Ota if (!rm) { 1146c0dd49bdSEiji Ota ret = -ENOMEM; 1147c0dd49bdSEiji Ota goto out; 1148c0dd49bdSEiji Ota } 1149c0dd49bdSEiji Ota 1150c0dd49bdSEiji Ota rm->m_daddr = conn->c_faddr; 1151c0dd49bdSEiji Ota 1152c0dd49bdSEiji Ota /* 1153c0dd49bdSEiji Ota * If the connection is down, trigger a connect. We may 1154c0dd49bdSEiji Ota * have scheduled a delayed reconnect however - in this case 1155c0dd49bdSEiji Ota * we should not interfere. 1156c0dd49bdSEiji Ota */ 1157c0dd49bdSEiji Ota if (rdsv3_conn_state(conn) == RDSV3_CONN_DOWN && 1158c0dd49bdSEiji Ota !test_and_set_bit(RDSV3_RECONNECT_PENDING, &conn->c_flags)) 1159c0dd49bdSEiji Ota rdsv3_queue_delayed_work(rdsv3_wq, &conn->c_conn_w, 0); 1160c0dd49bdSEiji Ota 1161c0dd49bdSEiji Ota ret = rdsv3_cong_wait(conn->c_fcong, dport, 1, NULL); 1162c0dd49bdSEiji Ota if (ret) 1163c0dd49bdSEiji Ota goto out; 1164c0dd49bdSEiji Ota 1165c0dd49bdSEiji Ota mutex_enter(&conn->c_lock); 1166c0dd49bdSEiji Ota list_insert_tail(&conn->c_send_queue, rm); 1167c0dd49bdSEiji Ota set_bit(RDSV3_MSG_ON_CONN, &rm->m_flags); 1168c0dd49bdSEiji Ota rdsv3_message_addref(rm); 1169c0dd49bdSEiji Ota rm->m_inc.i_conn = conn; 1170c0dd49bdSEiji Ota 1171c0dd49bdSEiji Ota rdsv3_message_populate_header(&rm->m_inc.i_hdr, 0, dport, 1172c0dd49bdSEiji Ota conn->c_next_tx_seq); 1173c0dd49bdSEiji Ota conn->c_next_tx_seq++; 1174c0dd49bdSEiji Ota mutex_exit(&conn->c_lock); 1175c0dd49bdSEiji Ota 1176c0dd49bdSEiji Ota rdsv3_stats_inc(s_send_queued); 1177c0dd49bdSEiji Ota rdsv3_stats_inc(s_send_pong); 1178c0dd49bdSEiji Ota 1179*5d5562f5SEiji Ota if (!test_bit(RDSV3_LL_SEND_FULL, &conn->c_flags)) 1180*5d5562f5SEiji Ota (void) rdsv3_send_xmit(conn); 1181*5d5562f5SEiji Ota 1182c0dd49bdSEiji Ota rdsv3_message_put(rm); 1183c0dd49bdSEiji Ota 1184c0dd49bdSEiji Ota RDSV3_DPRINTF4("rdsv3_send_pong", "Return(conn: %p)", conn); 1185c0dd49bdSEiji Ota return (0); 1186c0dd49bdSEiji Ota 1187c0dd49bdSEiji Ota out: 1188c0dd49bdSEiji Ota if (rm) 1189c0dd49bdSEiji Ota rdsv3_message_put(rm); 1190c0dd49bdSEiji Ota return (ret); 1191c0dd49bdSEiji Ota } 1192