diff --git a/tempesta_fw/classifier/frang.c b/tempesta_fw/classifier/frang.c index 411e3a1528..c85eb6fc15 100644 --- a/tempesta_fw/classifier/frang.c +++ b/tempesta_fw/classifier/frang.c @@ -156,12 +156,12 @@ frang_conn_limit(FrangAcc *ra, struct sock *unused) } /* - * Increment connection counters even if we return TFW_BLOCK. - * Linux will call sk_free() from inet_csk_clone_lock(), - * so our frang_conn_close() is also called. conn_curr is - * decremented there, but conn_new is not changed. We count - * both failed connection attempts and connections that were - * successfully established. + * Increment connection counters even when we return TFW_BLOCK. + * Linux will call sk_free() from inet_csk_clone_lock(), so our + * frang_conn_close() is also called. @conn_curr is decremented + * there, but @conn_new is not changed. We count both failed + * connection attempts and connections that were successfully + * established. */ ra->history[i].conn_new++; ra->conn_curr++; @@ -219,14 +219,14 @@ frang_conn_new(struct sock *sk) spin_lock(&ra->lock); /* - * sk->sk_user_data references TfwConnection which in turn references - * TfwPeer, so basically we can get FrangAcc from TfwConnection. + * sk->sk_user_data references TfwConn{} which in turn references + * TfwPeer, so basically we can get FrangAcc from TfwConn{}. * However, socket can live (for a short period of time, when kernel * just allocated the socket and called tempesta_new_clntsk()) w/o - * TfwConnection and vise versa - TfwConnection can leave w/o socket + * TfwConn{} and vise versa - TfwConn{} can leave w/o socket * (e.g. server connections during failover). Thus to keep design * consistent we two references to TfwPeer: from socket and - * TfwConnection. + * TfwConn{}. */ sk->sk_security = ra; @@ -601,7 +601,7 @@ do { \ } while (0) static int -frang_http_req_process(FrangAcc *ra, TfwConnection *conn, struct sk_buff *skb, +frang_http_req_process(FrangAcc *ra, TfwConn *conn, struct sk_buff *skb, unsigned int off) { int r = TFW_PASS; @@ -848,7 +848,7 @@ static int frang_http_req_handler(void *obj, struct sk_buff *skb, unsigned int off) { int r; - TfwConnection *conn = (TfwConnection *)obj; + TfwConn *conn = (TfwConn *)obj; FrangAcc *ra = conn->sk->sk_security; r = frang_http_req_process(ra, conn, skb, off); diff --git a/tempesta_fw/client.h b/tempesta_fw/client.h index 458f938543..15b345c7a1 100644 --- a/tempesta_fw/client.h +++ b/tempesta_fw/client.h @@ -46,8 +46,8 @@ typedef struct { TfwClient *tfw_client_obtain(struct sock *sk, void (*init)(TfwClient *)); void tfw_client_put(TfwClient *cli); -void tfw_cli_conn_release(TfwCliConnection *cli_conn); -int tfw_cli_conn_send(TfwCliConnection *cli_conn, TfwMsg *msg); +void tfw_cli_conn_release(TfwCliConn *cli_conn); +int tfw_cli_conn_send(TfwCliConn *cli_conn, TfwMsg *msg); int tfw_sock_check_listeners(void); #endif /* __TFW_CLIENT_H__ */ diff --git a/tempesta_fw/connection.c b/tempesta_fw/connection.c index 88cac35416..c9186bc506 100644 --- a/tempesta_fw/connection.c +++ b/tempesta_fw/connection.c @@ -32,14 +32,14 @@ TfwConnHooks *conn_hooks[TFW_CONN_MAX_PROTOS]; * It's not on any list yet, so it's safe to do so without locks. */ void -tfw_connection_init(TfwConnection *conn) +tfw_connection_init(TfwConn *conn) { memset(conn, 0, sizeof(*conn)); INIT_LIST_HEAD(&conn->list); } void -tfw_connection_link_peer(TfwConnection *conn, TfwPeer *peer) +tfw_connection_link_peer(TfwConn *conn, TfwPeer *peer) { BUG_ON(conn->peer || !list_empty(&conn->list)); conn->peer = peer; @@ -50,7 +50,7 @@ tfw_connection_link_peer(TfwConnection *conn, TfwPeer *peer) * Publish the "connection is established" event via TfwConnHooks. */ int -tfw_connection_new(TfwConnection *conn) +tfw_connection_new(TfwConn *conn) { return TFW_CONN_HOOK_CALL(conn, conn_init); } @@ -59,7 +59,7 @@ tfw_connection_new(TfwConnection *conn) * Call connection repairing via TfwConnHooks. */ void -tfw_connection_repair(TfwConnection *conn) +tfw_connection_repair(TfwConn *conn) { TFW_CONN_HOOK_CALL(conn, conn_repair); } @@ -68,7 +68,7 @@ tfw_connection_repair(TfwConnection *conn) * Publish the "connection is dropped" event via TfwConnHooks. */ void -tfw_connection_drop(TfwConnection *conn) +tfw_connection_drop(TfwConn *conn) { /* Ask higher levels to free resources at connection close. */ TFW_CONN_HOOK_CALL(conn, conn_drop); @@ -79,12 +79,12 @@ tfw_connection_drop(TfwConnection *conn) * Publish the "connection is released" event via TfwConnHooks. */ void -tfw_connection_release(TfwConnection *conn) +tfw_connection_release(TfwConn *conn) { /* Ask higher levels to free resources at connection release. */ TFW_CONN_HOOK_CALL(conn, conn_release); BUG_ON((TFW_CONN_TYPE(conn) & Conn_Clnt) - && !list_empty(&((TfwCliConnection *)conn)->seq_queue)); + && !list_empty(&((TfwCliConn *)conn)->seq_queue)); } /* @@ -94,7 +94,7 @@ tfw_connection_release(TfwConnection *conn) * only on an active socket. */ int -tfw_connection_send(TfwConnection *conn, TfwMsg *msg) +tfw_connection_send(TfwConn *conn, TfwMsg *msg) { return TFW_CONN_HOOK_CALL(conn, conn_send, msg); } @@ -102,7 +102,7 @@ tfw_connection_send(TfwConnection *conn, TfwMsg *msg) int tfw_connection_recv(void *cdata, struct sk_buff *skb, unsigned int off) { - TfwConnection *conn = cdata; + TfwConn *conn = cdata; return tfw_gfsm_dispatch(&conn->state, conn, skb, off); } diff --git a/tempesta_fw/connection.h b/tempesta_fw/connection.h index a15c518607..86fdad3cdf 100644 --- a/tempesta_fw/connection.h +++ b/tempesta_fw/connection.h @@ -54,23 +54,23 @@ enum { /** * Session/Presentation layer (in OSI terms) handling. * - * An instance of TfwConnection{} structure links each HTTP message to - * attributes of a connection the message has come on. Some of those - * messages may stay longer in Tempesta after they're sent out to their - * destinations. Requests are kept until a paired response comes. By the - * time the request's connection is needed for sending the response, it - * may already be destroyed. With that in mind, TfwConnection{} instance - * is not destroyed along with the connection so that it can be safely - * dereferenced. It's kept around until refcnt permits freeing of the - * instance, so it may have longer lifetime than the connection itself. + * An instance of TfwConn{} structure links each HTTP message to properties + * of a connection the message has come on. Some messages may stay longer + * in Tempesta after they're sent out to their destinations. Requests are + * kept until a paired response comes. By the time the request's connection + * is needed for sending the response, it may be destroyed already. Thus, + * TfwConn{} instance is not destroyed along with the connection so that + * it can be safely dereferenced. It's kept around until refcnt's value + * permits freeing of the instance, so it may have longer lifetime than + * the connection itself. * - * @sk is an intrinsic property of TfwConnection{}. - * It has exactly the same lifetime as an instance of TfwConnection{}. + * @sk is an intrinsic property of TfwConn{}. + * It has exactly the same lifetime as an instance of TfwConn{}. * - * @peer is major property of TfwConnection{}. An instance of @peer has - * longer lifetime expectation than a connection. @peer is always valid - * while it's referenced from an instance of TfwConnection{}. That is - * supported by a separate reference counter in @peer. + * @peer is major property of TfwConn{}. An instance of @peer has longer + * lifetime expectation than a connection. @peer is always valid while + * it's referenced from an instance of TfwConn{}. That is supported by + * a separate reference counter in @peer. * * These are the properties of a connection that are common to client * and server connections. @@ -85,7 +85,7 @@ enum { * @sk - an appropriate sock handler; * @destructor - called when a connection is destroyed; */ -#define TFW_CONNECTION_COMMON \ +#define TFW_CONN_COMMON \ SsProto proto; \ TfwGState state; \ struct list_head list; \ @@ -97,8 +97,8 @@ enum { void (*destructor)(void *); typedef struct { - TFW_CONNECTION_COMMON; -} TfwConnection; + TFW_CONN_COMMON; +} TfwConn; #define TFW_CONN_TYPE(c) ((c)->proto.type) @@ -110,11 +110,11 @@ typedef struct { * @ret_qlock - lock for serializing sets of responses; */ typedef struct { - TFW_CONNECTION_COMMON; + TFW_CONN_COMMON; struct list_head seq_queue; spinlock_t seq_qlock; spinlock_t ret_qlock; -} TfwCliConnection; +} TfwCliConn; /* * These are specific properties that are relevant to server connections. @@ -129,7 +129,7 @@ typedef struct { * @msg_sent - request that was sent last in a server connection; */ typedef struct { - TFW_CONNECTION_COMMON; + TFW_CONN_COMMON; struct list_head fwd_queue; struct list_head nip_queue; spinlock_t fwd_qlock; @@ -137,7 +137,7 @@ typedef struct { unsigned int qsize; unsigned int recns; TfwMsg *msg_sent; -} TfwSrvConnection; +} TfwSrvConn; #define TFW_CONN_DEATHCNT (INT_MIN / 2) @@ -158,11 +158,11 @@ enum { * TLS hardened connection. */ typedef struct { - TfwConnection conn; - TfwTlsContext tls; -} TfwTlsConnection; + TfwConn conn; + TfwTlsContext tls; +} TfwTlsConn; -#define tfw_tls_context(p) (TfwTlsContext *)(&((TfwTlsConnection *)p)->tls) +#define tfw_tls_context(p) (TfwTlsContext *)(&((TfwTlsConn *)p)->tls) /* Callbacks used by l5-l7 protocols to operate on connection level. */ typedef struct { @@ -172,7 +172,7 @@ typedef struct { * This is a good place to handle Access or GEO modules * (block a client or bind its descriptor with GEO data). */ - int (*conn_init)(TfwConnection *conn); + int (*conn_init)(TfwConn *conn); /* * Called when a new connection is initialized and before @@ -180,26 +180,26 @@ typedef struct { * server connections. Used to re-send requests that were * left in the connection queue. */ - void (*conn_repair)(TfwConnection *conn); + void (*conn_repair)(TfwConn *conn); /* * Called when closing a connection (client or server, * as in conn_init()). This is required for modules that * maintain the number of established client connections. */ - void (*conn_drop)(TfwConnection *conn); + void (*conn_drop)(TfwConn *conn); /* * Called when there are no more users of a connection * and the connections's resources are finally released. */ - void (*conn_release)(TfwConnection *conn); + void (*conn_release)(TfwConn *conn); /* * Called by the connection layer when there is a message * that needs to be send. */ - int (*conn_send)(TfwConnection *conn, TfwMsg *msg); + int (*conn_send)(TfwConn *conn, TfwMsg *msg); } TfwConnHooks; #define TFW_CONN_MAX_PROTOS TFW_GFSM_FSM_N @@ -224,7 +224,7 @@ extern TfwConnHooks *conn_hooks[TFW_CONN_MAX_PROTOS]; * are re-sent. */ static inline bool -tfw_srv_conn_restricted(TfwSrvConnection *srv_conn) +tfw_srv_conn_restricted(TfwSrvConn *srv_conn) { return test_bit(TFW_CONN_B_RESEND, &srv_conn->flags); } @@ -233,36 +233,36 @@ tfw_srv_conn_restricted(TfwSrvConnection *srv_conn) * Tell if a connection has non-idempotent requests. */ static inline bool -tfw_srv_conn_hasnip(TfwSrvConnection *srv_conn) +tfw_srv_conn_hasnip(TfwSrvConn *srv_conn) { return test_bit(TFW_CONN_B_HASNIP, &srv_conn->flags); } static inline bool -tfw_connection_live(TfwConnection *conn) +tfw_connection_live(TfwConn *conn) { return atomic_read(&conn->refcnt) > 0; } static inline bool -tfw_srv_conn_live(TfwSrvConnection *srv_conn) +tfw_srv_conn_live(TfwSrvConn *srv_conn) { - return tfw_connection_live((TfwConnection *)srv_conn); + return tfw_connection_live((TfwConn *)srv_conn); } static inline void -tfw_connection_get(TfwConnection *conn) +tfw_connection_get(TfwConn *conn) { atomic_inc(&conn->refcnt); } static inline void -tfw_cli_conn_get(TfwCliConnection *cli_conn) +tfw_cli_conn_get(TfwCliConn *cli_conn) { - tfw_connection_get((TfwConnection *)cli_conn); + tfw_connection_get((TfwConn *)cli_conn); } static inline void -tfw_srv_conn_get(TfwSrvConnection *srv_conn) +tfw_srv_conn_get(TfwSrvConn *srv_conn) { - tfw_connection_get((TfwConnection *)srv_conn); + tfw_connection_get((TfwConn *)srv_conn); } /** @@ -270,7 +270,7 @@ tfw_srv_conn_get(TfwSrvConnection *srv_conn) * in failovering process, i.e. if @refcnt > 0. */ static inline bool -__tfw_connection_get_if_live(TfwConnection *conn) +__tfw_connection_get_if_live(TfwConn *conn) { int old, rc = atomic_read(&conn->refcnt); @@ -284,13 +284,13 @@ __tfw_connection_get_if_live(TfwConnection *conn) return false; } static inline bool -tfw_srv_conn_get_if_live(TfwSrvConnection *srv_conn) +tfw_srv_conn_get_if_live(TfwSrvConn *srv_conn) { - return __tfw_connection_get_if_live((TfwConnection *)srv_conn); + return __tfw_connection_get_if_live((TfwConn *)srv_conn); } static inline void -tfw_connection_put(TfwConnection *conn) +tfw_connection_put(TfwConn *conn) { int rc; @@ -304,24 +304,24 @@ tfw_connection_put(TfwConnection *conn) conn->destructor(conn); } static inline void -tfw_cli_conn_put(TfwCliConnection *cli_conn) +tfw_cli_conn_put(TfwCliConn *cli_conn) { - tfw_connection_put((TfwConnection *)cli_conn); + tfw_connection_put((TfwConn *)cli_conn); } static inline void -tfw_srv_conn_put(TfwSrvConnection *srv_conn) +tfw_srv_conn_put(TfwSrvConn *srv_conn) { - tfw_connection_put((TfwConnection *)srv_conn); + tfw_connection_put((TfwConn *)srv_conn); } static inline void -tfw_connection_put_to_death(TfwConnection *conn) +tfw_connection_put_to_death(TfwConn *conn) { atomic_add(TFW_CONN_DEATHCNT, &conn->refcnt); } static inline void -tfw_connection_revive(TfwConnection *conn) +tfw_connection_revive(TfwConn *conn) { atomic_set(&conn->refcnt, 1); } @@ -333,7 +333,7 @@ tfw_connection_revive(TfwConnection *conn) * the reference to @conn instance for the socket can be found quickly. */ static inline void -tfw_connection_link_from_sk(TfwConnection *conn, struct sock *sk) +tfw_connection_link_from_sk(TfwConn *conn, struct sock *sk) { BUG_ON(sk->sk_user_data); sk->sk_user_data = conn; @@ -346,7 +346,7 @@ tfw_connection_link_from_sk(TfwConnection *conn, struct sock *sk) * get a hold of the socket to avoid premature socket release. */ static inline void -tfw_connection_link_to_sk(TfwConnection *conn, struct sock *sk) +tfw_connection_link_to_sk(TfwConn *conn, struct sock *sk) { ss_sock_hold(sk); conn->sk = sk; @@ -373,7 +373,7 @@ tfw_connection_unlink_from_sk(struct sock *sk) * on the socket. A zeroed conn->sk is that indicator. */ static inline void -tfw_connection_unlink_to_sk(TfwConnection *conn) +tfw_connection_unlink_to_sk(TfwConn *conn) { struct sock *sk = conn->sk; @@ -382,23 +382,23 @@ tfw_connection_unlink_to_sk(TfwConnection *conn) } static inline void -tfw_connection_unlink_from_peer(TfwConnection *conn) +tfw_connection_unlink_from_peer(TfwConn *conn) { BUG_ON(!conn->peer || list_empty(&conn->list)); tfw_peer_del_conn(conn->peer, &conn->list); } static inline void -tfw_connection_unlink_msg(TfwConnection *conn) +tfw_connection_unlink_msg(TfwConn *conn) { conn->msg = NULL; } /** - * Check that TfwConnection resources are cleaned up properly. + * Check that TfwConn{} resources are cleaned up properly. */ static inline void -tfw_connection_validate_cleanup(TfwConnection *conn) +tfw_connection_validate_cleanup(TfwConn *conn) { BUG_ON(!conn); BUG_ON(!list_empty(&conn->list)); @@ -408,16 +408,16 @@ tfw_connection_validate_cleanup(TfwConnection *conn) void tfw_connection_hooks_register(TfwConnHooks *hooks, int type); void tfw_connection_hooks_unregister(int type); -int tfw_connection_send(TfwConnection *conn, TfwMsg *msg); +int tfw_connection_send(TfwConn *conn, TfwMsg *msg); /* Generic helpers, used for both client and server connections. */ -void tfw_connection_init(TfwConnection *conn); -void tfw_connection_link_peer(TfwConnection *conn, TfwPeer *peer); +void tfw_connection_init(TfwConn *conn); +void tfw_connection_link_peer(TfwConn *conn, TfwPeer *peer); -int tfw_connection_new(TfwConnection *conn); -void tfw_connection_repair(TfwConnection *conn); -void tfw_connection_drop(TfwConnection *conn); -void tfw_connection_release(TfwConnection *conn); +int tfw_connection_new(TfwConn *conn); +void tfw_connection_repair(TfwConn *conn); +void tfw_connection_drop(TfwConn *conn); +void tfw_connection_release(TfwConn *conn); int tfw_connection_recv(void *cdata, struct sk_buff *skb, unsigned int off); diff --git a/tempesta_fw/http.c b/tempesta_fw/http.c index 8507f7b0c9..e0ac7656d1 100644 --- a/tempesta_fw/http.c +++ b/tempesta_fw/http.c @@ -391,7 +391,7 @@ tfw_http_req_is_nip(TfwHttpReq *req) * @req must be confirmed to be on the list. */ static inline void -__tfw_http_req_nip_delist(TfwSrvConnection *srv_conn, TfwHttpReq *req) +__tfw_http_req_nip_delist(TfwSrvConn *srv_conn, TfwHttpReq *req) { BUG_ON(list_empty(&req->nip_list)); list_del_init(&req->nip_list); @@ -404,7 +404,7 @@ __tfw_http_req_nip_delist(TfwSrvConnection *srv_conn, TfwHttpReq *req) * Raise the flag saying that the connection has non-idempotent requests. */ static inline void -__tfw_http_req_nip_enlist(TfwSrvConnection *srv_conn, TfwHttpReq *req) +__tfw_http_req_nip_enlist(TfwSrvConn *srv_conn, TfwHttpReq *req) { BUG_ON(!list_empty(&req->nip_list)); list_add_tail(&req->nip_list, &srv_conn->nip_queue); @@ -417,7 +417,7 @@ __tfw_http_req_nip_enlist(TfwSrvConnection *srv_conn, TfwHttpReq *req) * the list. */ static inline void -tfw_http_req_nip_delist(TfwSrvConnection *srv_conn, TfwHttpReq *req) +tfw_http_req_nip_delist(TfwSrvConn *srv_conn, TfwHttpReq *req) { if (!list_empty(&req->nip_list)) __tfw_http_req_nip_delist(srv_conn, req); @@ -431,7 +431,7 @@ tfw_http_req_nip_delist(TfwSrvConnection *srv_conn, TfwHttpReq *req) * to tfw_http_req_add_seq_queue(). */ static inline void -tfw_http_conn_nip_delist(TfwSrvConnection *srv_conn) +tfw_http_conn_nip_delist(TfwSrvConn *srv_conn) { TfwHttpReq *req, *tmp; @@ -447,7 +447,7 @@ tfw_http_conn_nip_delist(TfwSrvConnection *srv_conn) * It's on hold it the request that was sent last was non-idempotent. */ static inline bool -tfw_http_conn_on_hold(TfwSrvConnection *srv_conn) +tfw_http_conn_on_hold(TfwSrvConn *srv_conn) { TfwHttpReq *req_sent = (TfwHttpReq *)srv_conn->msg_sent; @@ -461,7 +461,7 @@ tfw_http_conn_on_hold(TfwSrvConnection *srv_conn) * request that was sent last. */ static inline bool -tfw_http_conn_drained(TfwSrvConnection *srv_conn) +tfw_http_conn_drained(TfwSrvConn *srv_conn) { struct list_head *fwd_queue = &srv_conn->fwd_queue; TfwHttpReq *req_sent = (TfwHttpReq *)srv_conn->msg_sent; @@ -482,7 +482,7 @@ tfw_http_conn_drained(TfwSrvConnection *srv_conn) * that need to be forwarded. */ static inline bool -tfw_http_conn_need_fwd(TfwSrvConnection *srv_conn) +tfw_http_conn_need_fwd(TfwSrvConn *srv_conn) { return (!tfw_http_conn_on_hold(srv_conn) && !tfw_http_conn_drained(srv_conn)); @@ -492,7 +492,7 @@ tfw_http_conn_need_fwd(TfwSrvConnection *srv_conn) * Remove @req from the server connection's forwarding queue. */ static inline void -tfw_http_req_delist(TfwSrvConnection *srv_conn, TfwHttpReq *req) +tfw_http_req_delist(TfwSrvConn *srv_conn, TfwHttpReq *req) { tfw_http_req_nip_delist(srv_conn, req); list_del_init(&req->fwd_list); @@ -505,7 +505,7 @@ tfw_http_req_delist(TfwSrvConnection *srv_conn, TfwHttpReq *req) * in @equeue. The error code for an error response is saved as well. */ static inline void -tfw_http_req_move2equeue(TfwSrvConnection *srv_conn, TfwHttpReq *req, +tfw_http_req_move2equeue(TfwSrvConn *srv_conn, TfwHttpReq *req, struct list_head *equeue, unsigned short status) { tfw_http_req_delist(srv_conn, req); @@ -552,7 +552,7 @@ tfw_http_req_zap_error(struct list_head *equeue) * move it to the error queue @equeue for sending an error response later. */ static inline bool -tfw_http_req_evict_timeout(TfwSrvConnection *srv_conn, TfwServer *srv, +tfw_http_req_evict_timeout(TfwSrvConn *srv_conn, TfwServer *srv, TfwHttpReq *req, struct list_head *equeue) { unsigned long jqage = jiffies - req->jrxtstamp; @@ -572,7 +572,7 @@ tfw_http_req_evict_timeout(TfwSrvConnection *srv_conn, TfwServer *srv, * move it to the error queue @equeue for sending an error response later. */ static inline bool -tfw_http_req_evict_retries(TfwSrvConnection *srv_conn, TfwServer *srv, +tfw_http_req_evict_retries(TfwSrvConn *srv_conn, TfwServer *srv, TfwHttpReq *req, struct list_head *equeue) { if (unlikely(req->retries++ >= srv->sg->max_refwd)) { @@ -589,12 +589,12 @@ tfw_http_req_evict_retries(TfwSrvConnection *srv_conn, TfwServer *srv, * move it to the error queue @equeue for sending an error response later. */ static inline bool -tfw_http_req_fwd_send(TfwSrvConnection *srv_conn, TfwServer *srv, +tfw_http_req_fwd_send(TfwSrvConn *srv_conn, TfwServer *srv, TfwHttpReq *req, struct list_head *equeue) { req->jtxtstamp = jiffies; - if (tfw_connection_send((TfwConnection *)srv_conn, (TfwMsg *)req)) { + if (tfw_connection_send((TfwConn *)srv_conn, (TfwMsg *)req)) { TFW_DBG2("%s: Forwarding error: conn=[%p] req=[%p]\n", __func__, srv_conn, req); tfw_http_req_move2equeue(srv_conn, req, equeue, 500); @@ -608,7 +608,7 @@ tfw_http_req_fwd_send(TfwSrvConnection *srv_conn, TfwServer *srv, * Return false if forwarding must be stopped, or true otherwise. */ static inline bool -__tfw_http_req_fwd_single(TfwSrvConnection *srv_conn, TfwServer *srv, +__tfw_http_req_fwd_single(TfwSrvConn *srv_conn, TfwServer *srv, TfwHttpReq *req, struct list_head *equeue) { if (tfw_http_req_evict_timeout(srv_conn, srv, req, equeue)) @@ -627,7 +627,7 @@ __tfw_http_req_fwd_single(TfwSrvConnection *srv_conn, TfwServer *srv, * IT's also assumed that the forwarding queue is NOT drained. */ static void -__tfw_http_req_fwd_unsent(TfwSrvConnection *srv_conn, struct list_head *equeue) +__tfw_http_req_fwd_unsent(TfwSrvConn *srv_conn, struct list_head *equeue) { TfwHttpReq *req, *tmp; TfwServer *srv = (TfwServer *)srv_conn->peer; @@ -670,7 +670,7 @@ __tfw_http_req_fwd_unsent(TfwSrvConnection *srv_conn, struct list_head *equeue) * It's assumed that the forwarding queue in @srv_conn is locked. */ static inline void -tfw_http_req_fwd_unsent(TfwSrvConnection *srv_conn, struct list_head *equeue) +tfw_http_req_fwd_unsent(TfwSrvConn *srv_conn, struct list_head *equeue) { TFW_DBG2("%s: conn=[%p]\n", __func__, srv_conn); @@ -692,7 +692,7 @@ tfw_http_req_fwd_unsent(TfwSrvConnection *srv_conn, struct list_head *equeue) * See RFC 7230 6.3.2. */ static void -tfw_http_req_fwd(TfwSrvConnection *srv_conn, TfwHttpReq *req) +tfw_http_req_fwd(TfwSrvConn *srv_conn, TfwHttpReq *req) { LIST_HEAD(equeue); @@ -730,7 +730,7 @@ tfw_http_req_fwd(TfwSrvConnection *srv_conn, TfwHttpReq *req) * Note: @srv_conn->msg_sent may change in result. */ static inline void -tfw_http_req_fwd_treatnip(TfwSrvConnection *srv_conn, struct list_head *equeue) +tfw_http_req_fwd_treatnip(TfwSrvConn *srv_conn, struct list_head *equeue) { TfwServer *srv = (TfwServer *)srv_conn->peer; TfwHttpReq *req_sent = (TfwHttpReq *)srv_conn->msg_sent; @@ -752,8 +752,7 @@ tfw_http_req_fwd_treatnip(TfwSrvConnection *srv_conn, struct list_head *equeue) * the set limits are evicted. */ static TfwHttpReq * -tfw_http_req_resend(TfwSrvConnection *srv_conn, - bool first, struct list_head *equeue) +tfw_http_req_resend(TfwSrvConn *srv_conn, bool first, struct list_head *equeue) { TfwHttpReq *req, *tmp, *req_resent = NULL; TfwServer *srv = (TfwServer *)srv_conn->peer; @@ -790,7 +789,7 @@ tfw_http_req_resend(TfwSrvConnection *srv_conn, * Re-send only the first unanswered request in the forwarding queue. */ static inline TfwHttpReq * -tfw_http_req_resend_first(TfwSrvConnection *srv_conn, struct list_head *equeue) +tfw_http_req_resend_first(TfwSrvConn *srv_conn, struct list_head *equeue) { return tfw_http_req_resend(srv_conn, true, equeue); } @@ -799,7 +798,7 @@ tfw_http_req_resend_first(TfwSrvConnection *srv_conn, struct list_head *equeue) * Re-send all unanswered requests in the forwarding queue. */ static inline TfwHttpReq * -tfw_http_req_resend_all(TfwSrvConnection *srv_conn, struct list_head *equeue) +tfw_http_req_resend_all(TfwSrvConn *srv_conn, struct list_head *equeue) { return tfw_http_req_resend(srv_conn, false, equeue); } @@ -810,7 +809,7 @@ tfw_http_req_resend_all(TfwSrvConnection *srv_conn, struct list_head *equeue) * The connection is not scheduled until all requests in it are re-sent. */ static void -__tfw_http_req_fwd_repair(TfwSrvConnection *srv_conn, struct list_head *equeue) +__tfw_http_req_fwd_repair(TfwSrvConn *srv_conn, struct list_head *equeue) { TFW_DBG2("%s: conn=[%p]\n", __func__, srv_conn); WARN_ON(!spin_is_locked(&srv_conn->fwd_qlock)); @@ -861,10 +860,10 @@ __tfw_http_req_fwd_repair(TfwSrvConnection *srv_conn, struct list_head *equeue) * Unlucky requests are just given another chance with minimal effort. */ static void -tfw_http_req_resched(TfwSrvConnection *srv_conn, struct list_head *equeue) +tfw_http_req_resched(TfwSrvConn *srv_conn, struct list_head *equeue) { TfwHttpReq *req, *tmp; - TfwSrvConnection *sch_conn; + TfwSrvConn *sch_conn; TfwServer *srv = (TfwServer *)srv_conn->peer; struct list_head *fwd_queue = &srv_conn->fwd_queue; @@ -900,9 +899,9 @@ tfw_http_req_resched(TfwSrvConnection *srv_conn, struct list_head *equeue) * rest of those unanswered requests (__tfw_http_req_fwd_repair()). */ static void -tfw_http_conn_repair(TfwConnection *conn) +tfw_http_conn_repair(TfwConn *conn) { - TfwSrvConnection *srv_conn = (TfwSrvConnection *)conn; + TfwSrvConn *srv_conn = (TfwSrvConn *)conn; TfwHttpReq *req_resent = NULL; LIST_HEAD(equeue); @@ -961,7 +960,7 @@ tfw_http_req_destruct(void *msg) * of the connection structure. Initialize GFSM for the message. */ static TfwMsg * -tfw_http_conn_msg_alloc(TfwConnection *conn) +tfw_http_conn_msg_alloc(TfwConn *conn) { TfwHttpMsg *hm = tfw_http_msg_alloc(TFW_CONN_TYPE(conn)); if (unlikely(!hm)) @@ -974,7 +973,7 @@ tfw_http_conn_msg_alloc(TfwConnection *conn) TFW_INC_STAT_BH(clnt.rx_messages); } else { TfwHttpReq *req; - TfwSrvConnection *srv_conn = (TfwSrvConnection *)conn; + TfwSrvConn *srv_conn = (TfwSrvConn *)conn; spin_lock(&srv_conn->fwd_qlock); req = list_first_entry_or_null(&srv_conn->fwd_queue, @@ -1032,12 +1031,12 @@ tfw_http_conn_msg_free(TfwHttpMsg *hm) * state machine here. */ static int -tfw_http_conn_init(TfwConnection *conn) +tfw_http_conn_init(TfwConn *conn) { TFW_DBG2("%s: conn=[%p]\n", __func__, conn); if (TFW_CONN_TYPE(conn) & Conn_Srv) { - TfwSrvConnection *srv_conn = (TfwSrvConnection *)conn; + TfwSrvConn *srv_conn = (TfwSrvConn *)conn; if (!list_empty(&srv_conn->fwd_queue)) set_bit(TFW_CONN_B_RESEND, &srv_conn->flags); clear_bit(TFW_CONN_B_ISDEAD, &srv_conn->flags); @@ -1053,7 +1052,7 @@ tfw_http_conn_init(TfwConnection *conn) * Called only when Tempesta is stopped. */ static void -tfw_http_conn_srv_release(TfwSrvConnection *srv_conn) +tfw_http_conn_srv_release(TfwSrvConn *srv_conn) { TfwHttpReq *req, *tmp; struct list_head *fwd_queue = &srv_conn->fwd_queue; @@ -1065,10 +1064,10 @@ tfw_http_conn_srv_release(TfwSrvConnection *srv_conn) list_for_each_entry_safe(req, tmp, fwd_queue, fwd_list) { tfw_http_req_delist(srv_conn, req); if (unlikely(!list_empty_careful(&req->msg.seq_list))) { - spin_lock(&((TfwCliConnection *)req->conn)->seq_qlock); + spin_lock(&((TfwCliConn *)req->conn)->seq_qlock); if (unlikely(!list_empty(&req->msg.seq_list))) list_del_init(&req->msg.seq_list); - spin_unlock(&((TfwCliConnection *)req->conn)->seq_qlock); + spin_unlock(&((TfwCliConn *)req->conn)->seq_qlock); } tfw_http_conn_msg_free((TfwHttpMsg *)req); } @@ -1086,9 +1085,9 @@ tfw_http_conn_srv_release(TfwSrvConnection *srv_conn) * so locks are not needed. */ static void -tfw_http_conn_release(TfwConnection *conn) +tfw_http_conn_release(TfwConn *conn) { - TfwSrvConnection *srv_conn = (TfwSrvConnection *)conn; + TfwSrvConn *srv_conn = (TfwSrvConn *)conn; TFW_DBG2("%s: conn=[%p]\n", __func__, srv_conn); BUG_ON(!(TFW_CONN_TYPE(srv_conn) & Conn_Srv)); @@ -1129,7 +1128,7 @@ __tfw_http_resp_pair_free(TfwHttpReq *req) * connection threads. */ static void -tfw_http_conn_cli_drop(TfwCliConnection *cli_conn) +tfw_http_conn_cli_drop(TfwCliConn *cli_conn) { TfwHttpReq *req, *tmp; struct list_head *seq_queue = &cli_conn->seq_queue; @@ -1166,12 +1165,12 @@ tfw_http_conn_cli_drop(TfwCliConnection *cli_conn) static void tfw_http_resp_terminate(TfwHttpMsg *hm); static void -tfw_http_conn_drop(TfwConnection *conn) +tfw_http_conn_drop(TfwConn *conn) { TFW_DBG2("%s: conn=[%p]\n", __func__, conn); if (TFW_CONN_TYPE(conn) & Conn_Clnt) { - tfw_http_conn_cli_drop((TfwCliConnection *)conn); + tfw_http_conn_cli_drop((TfwCliConn *)conn); } else if (conn->msg) { if (tfw_http_parse_terminate((TfwHttpMsg *)conn->msg)) tfw_http_resp_terminate((TfwHttpMsg *)conn->msg); @@ -1185,7 +1184,7 @@ tfw_http_conn_drop(TfwConnection *conn) * Called when the connection is used to send a message through. */ static int -tfw_http_conn_send(TfwConnection *conn, TfwMsg *msg) +tfw_http_conn_send(TfwConn *conn, TfwMsg *msg) { return ss_send(conn->sk, &msg->skb_list, msg->ss_flags); } @@ -1458,7 +1457,7 @@ tfw_http_adjust_resp(TfwHttpResp *resp, TfwHttpReq *req) * responses are taken care of by the caller. */ static void -__tfw_http_resp_fwd(TfwCliConnection *cli_conn, struct list_head *ret_queue) +__tfw_http_resp_fwd(TfwCliConn *cli_conn, struct list_head *ret_queue) { TfwHttpReq *req, *tmp; @@ -1483,7 +1482,7 @@ __tfw_http_resp_fwd(TfwCliConnection *cli_conn, struct list_head *ret_queue) void tfw_http_resp_fwd(TfwHttpReq *req, TfwHttpResp *resp) { - TfwCliConnection *cli_conn = (TfwCliConnection *)req->conn; + TfwCliConn *cli_conn = (TfwCliConn *)req->conn; struct list_head *seq_queue = &cli_conn->seq_queue; struct list_head *req_retent = NULL; LIST_HEAD(ret_queue); @@ -1585,7 +1584,7 @@ static void tfw_http_req_cache_cb(TfwHttpReq *req, TfwHttpResp *resp) { int r; - TfwSrvConnection *srv_conn = NULL; + TfwSrvConn *srv_conn = NULL; TFW_DBG2("%s: req = %p, resp = %p\n", __func__, req, resp); @@ -1625,7 +1624,7 @@ tfw_http_req_cache_cb(TfwHttpReq *req, TfwHttpResp *resp) * an appropriate scheduler. That eliminates the long generic * scheduling work flow. When the first request in a session is * scheduled by the generic logic, TfwSession->srv_conn must be - * initialized to point at the appropriate TfwConnection, so that + * initialized to point at the appropriate TfwConn{}, so that * all subsequent session hits are scheduled much faster. */ if (!(srv_conn = tfw_sched_get_srv_conn((TfwMsg *)req))) { @@ -1700,7 +1699,7 @@ static void tfw_http_req_add_seq_queue(TfwHttpReq *req) { TfwHttpReq *req_prev; - TfwCliConnection *cli_conn = (TfwCliConnection *)req->conn; + TfwCliConn *cli_conn = (TfwCliConn *)req->conn; struct list_head *seq_queue = &cli_conn->seq_queue; tfw_http_req_mark_nip(req); @@ -1728,7 +1727,7 @@ tfw_http_req_set_context(TfwHttpReq *req) * TODO enter the function depending on current GFSM state. */ static int -tfw_http_req_process(TfwConnection *conn, struct sk_buff *skb, unsigned int off) +tfw_http_req_process(TfwConn *conn, struct sk_buff *skb, unsigned int off) { int r = TFW_BLOCK; unsigned int data_off = off; @@ -1996,7 +1995,7 @@ static TfwHttpReq * tfw_http_popreq(TfwHttpMsg *hmresp) { TfwHttpReq *req; - TfwSrvConnection *srv_conn = (TfwSrvConnection *)hmresp->conn; + TfwSrvConn *srv_conn = (TfwSrvConn *)hmresp->conn; struct list_head *fwd_queue = &srv_conn->fwd_queue; LIST_HEAD(equeue); @@ -2158,8 +2157,7 @@ tfw_http_resp_terminate(TfwHttpMsg *hm) * TODO enter the function depending on current GFSM state. */ static int -tfw_http_resp_process(TfwConnection *conn, struct sk_buff *skb, - unsigned int off) +tfw_http_resp_process(TfwConn *conn, struct sk_buff *skb, unsigned int off) { int r = TFW_BLOCK; unsigned int data_off = off; @@ -2318,7 +2316,7 @@ tfw_http_resp_process(TfwConnection *conn, struct sk_buff *skb, int tfw_http_msg_process(void *conn, struct sk_buff *skb, unsigned int off) { - TfwConnection *c = (TfwConnection *)conn; + TfwConn *c = (TfwConn *)conn; if (unlikely(!c->msg)) { c->msg = tfw_http_conn_msg_alloc(c); diff --git a/tempesta_fw/http.h b/tempesta_fw/http.h index 33808927b6..935f7aec0a 100644 --- a/tempesta_fw/http.h +++ b/tempesta_fw/http.h @@ -285,7 +285,7 @@ typedef struct { atomic_t users; unsigned long ts; unsigned long expires; - TfwConnection *srv_conn; + TfwSrvConn *srv_conn; } TfwHttpSess; /** @@ -315,7 +315,7 @@ typedef struct { unsigned int flags; \ unsigned long content_length; \ unsigned int keep_alive; \ - TfwConnection *conn; \ + TfwConn *conn; \ void (*destructor)(void *msg); \ TfwStr crlf; \ TfwStr body; diff --git a/tempesta_fw/http_sess.c b/tempesta_fw/http_sess.c index ad9cc6efdd..25729ee6cd 100644 --- a/tempesta_fw/http_sess.c +++ b/tempesta_fw/http_sess.c @@ -623,8 +623,8 @@ tfw_http_sess_init(void) return ret; } - sess_cache = kmem_cache_create("tfw_sess_cache", sizeof(TfwHttpSess), - 0, 0, NULL); + sess_cache = kmem_cache_create("tfw_sess_cache", + sizeof(TfwHttpSess), 0, 0, NULL); if (!sess_cache) { crypto_free_shash(tfw_sticky_shash); return -ENOMEM; diff --git a/tempesta_fw/procfs.c b/tempesta_fw/procfs.c index 7f49ef823d..fb21e3133d 100644 --- a/tempesta_fw/procfs.c +++ b/tempesta_fw/procfs.c @@ -147,7 +147,7 @@ tfw_srvstats_seq_show(struct seq_file *seq, void *off) #define SPRNE(m, e) seq_printf(seq, m": %dms\n", e) int i; - TfwSrvConnection *srv_conn; + TfwSrvConn *srv_conn; TfwServer *srv = seq->private; TfwPrcntl prcntl[ARRAY_SIZE(tfw_procfs_prcntl)]; TfwPrcntlStats pstats = { prcntl, ARRAY_SIZE(prcntl) }; diff --git a/tempesta_fw/sched.c b/tempesta_fw/sched.c index ed19eb0fe3..dba8a64308 100644 --- a/tempesta_fw/sched.c +++ b/tempesta_fw/sched.c @@ -51,10 +51,10 @@ static DEFINE_SPINLOCK(sched_lock); * * This function is always called in SoftIRQ context. */ -TfwSrvConnection * +TfwSrvConn * tfw_sched_get_srv_conn(TfwMsg *msg) { - TfwSrvConnection *srv_conn; + TfwSrvConn *srv_conn; TfwScheduler *sched; rcu_read_lock(); diff --git a/tempesta_fw/sched/tfw_sched_hash.c b/tempesta_fw/sched/tfw_sched_hash.c index 89116169d2..c3c0c8ed57 100644 --- a/tempesta_fw/sched/tfw_sched_hash.c +++ b/tempesta_fw/sched/tfw_sched_hash.c @@ -49,8 +49,8 @@ MODULE_VERSION("0.2.1"); MODULE_LICENSE("GPL"); typedef struct { - TfwSrvConnection *srv_conn; - unsigned long hash; + TfwSrvConn *srv_conn; + unsigned long hash; } TfwConnHash; /* The last item is used as the list teminator. */ @@ -99,8 +99,7 @@ __calc_conn_hash(TfwServer *srv, size_t conn_idx) } static void -tfw_sched_hash_add_conn(TfwSrvGroup *sg, TfwServer *srv, - TfwSrvConnection *srv_conn) +tfw_sched_hash_add_conn(TfwSrvGroup *sg, TfwServer *srv, TfwSrvConn *srv_conn) { size_t i; TfwConnHash *conn_hash = sg->sched_data; @@ -136,11 +135,11 @@ tfw_sched_hash_add_conn(TfwSrvGroup *sg, TfwServer *srv, * - For every HTTP request, we have to scan the list of all servers to find * a matching one with the highest weight. That adds some overhead. */ -static TfwSrvConnection * +static TfwSrvConn * tfw_sched_hash_get_srv_conn(TfwMsg *msg, TfwSrvGroup *sg) { unsigned long tries, msg_hash, curr_weight, best_weight = 0; - TfwSrvConnection *best_srv_conn = NULL; + TfwSrvConn *best_srv_conn = NULL; TfwConnHash *ch; msg_hash = tfw_http_req_key_calc((TfwHttpReq *)msg); diff --git a/tempesta_fw/sched/tfw_sched_http.c b/tempesta_fw/sched/tfw_sched_http.c index 106009f732..83ae129d09 100644 --- a/tempesta_fw/sched/tfw_sched_http.c +++ b/tempesta_fw/sched/tfw_sched_http.c @@ -99,11 +99,11 @@ static TfwHttpMatchList *tfw_sched_http_rules; * The search is based on contents of an HTTP request and match rules * that specify which Server Group the request should be forwarded to. */ -static TfwSrvConnection * +static TfwSrvConn * tfw_sched_http_sched_grp(TfwMsg *msg) { TfwSrvGroup *sg; - TfwSrvConnection *srv_conn; + TfwSrvConn *srv_conn; TfwSchedHttpRule *rule; if(!tfw_sched_http_rules || list_empty(&tfw_sched_http_rules->list)) @@ -136,7 +136,7 @@ tfw_sched_http_sched_grp(TfwMsg *msg) return srv_conn; } -static TfwSrvConnection * +static TfwSrvConn * tfw_sched_http_sched_srv(TfwMsg *msg, TfwSrvGroup *sg) { WARN_ONCE(true, "tfw_sched_http can't select a server from a group\n"); diff --git a/tempesta_fw/sched/tfw_sched_rr.c b/tempesta_fw/sched/tfw_sched_rr.c index 06a2f14674..cd107ff3fd 100644 --- a/tempesta_fw/sched/tfw_sched_rr.c +++ b/tempesta_fw/sched/tfw_sched_rr.c @@ -36,10 +36,10 @@ MODULE_LICENSE("GPL"); * taken into account by the scheduler. */ typedef struct { - atomic64_t rr_counter; - size_t conn_n; - TfwServer *srv; - TfwSrvConnection *srv_conns[TFW_SRV_MAX_CONN]; + atomic64_t rr_counter; + size_t conn_n; + TfwServer *srv; + TfwSrvConn *conns[TFW_SRV_MAX_CONN]; } TfwRrSrv; /** @@ -48,9 +48,9 @@ typedef struct { * whole run-time. This can be changed in future. */ typedef struct { - atomic64_t rr_counter; - size_t srv_n; - TfwRrSrv srvs[TFW_SG_MAX_SRV]; + atomic64_t rr_counter; + size_t srv_n; + TfwRrSrv srvs[TFW_SG_MAX_SRV]; } TfwRrSrvList; static void @@ -71,8 +71,7 @@ tfw_sched_rr_free_data(TfwSrvGroup *sg) * Called at configuration phase, no synchronization is required. */ static void -tfw_sched_rr_add_conn(TfwSrvGroup *sg, TfwServer *srv, - TfwSrvConnection *srv_conn) +tfw_sched_rr_add_conn(TfwSrvGroup *sg, TfwServer *srv, TfwSrvConn *srv_conn) { size_t s, c; TfwRrSrv *srv_cl; @@ -91,12 +90,12 @@ tfw_sched_rr_add_conn(TfwSrvGroup *sg, TfwServer *srv, srv_cl = &sl->srvs[s]; for (c = 0; c < srv_cl->conn_n; ++c) - if (srv_cl->srv_conns[c] == srv_conn) { + if (srv_cl->conns[c] == srv_conn) { TFW_WARN("sched_rr: Try to add existing connection," " srv=%zu conn=%zu\n", s, c); return; } - srv_cl->srv_conns[c] = srv_conn; + srv_cl->conns[c] = srv_conn; ++srv_cl->conn_n; BUG_ON(srv_cl->conn_n > TFW_SRV_MAX_CONN); } @@ -118,7 +117,7 @@ tfw_sched_rr_add_conn(TfwSrvGroup *sg, TfwServer *srv, * optimistic in that there are not many non-idempotent requests, and * there are available server connections. */ -static TfwSrvConnection * +static TfwSrvConn * tfw_sched_rr_get_srv_conn(TfwMsg *msg, TfwSrvGroup *sg) { size_t c, s; @@ -126,7 +125,7 @@ tfw_sched_rr_get_srv_conn(TfwMsg *msg, TfwSrvGroup *sg) int skipnip = 1, nipconn = 0; TfwRrSrvList *sl = sg->sched_data; TfwRrSrv *srv_cl; - TfwSrvConnection *srv_conn; + TfwSrvConn *srv_conn; BUG_ON(!sl); rerun: @@ -135,7 +134,7 @@ tfw_sched_rr_get_srv_conn(TfwMsg *msg, TfwSrvGroup *sg) srv_cl = &sl->srvs[idxval % sl->srv_n]; for (c = 0; c < srv_cl->conn_n; ++c) { idxval = atomic64_inc_return(&srv_cl->rr_counter); - srv_conn = srv_cl->srv_conns[idxval % srv_cl->conn_n]; + srv_conn = srv_cl->conns[idxval % srv_cl->conn_n]; if (unlikely(tfw_srv_conn_restricted(srv_conn) || tfw_server_queue_full(srv_conn))) continue; diff --git a/tempesta_fw/server.c b/tempesta_fw/server.c index bc4dafab38..ccd5440bae 100644 --- a/tempesta_fw/server.c +++ b/tempesta_fw/server.c @@ -185,7 +185,7 @@ tfw_sg_add(TfwSrvGroup *sg, TfwServer *srv) } void -tfw_sg_add_conn(TfwSrvGroup *sg, TfwServer *srv, TfwSrvConnection *srv_conn) +tfw_sg_add_conn(TfwSrvGroup *sg, TfwServer *srv, TfwSrvConn *srv_conn) { if (sg->sched && sg->sched->add_conn) sg->sched->add_conn(sg, srv, srv_conn); diff --git a/tempesta_fw/server.h b/tempesta_fw/server.h index e5bd9dbdaf..22d1e11b5b 100644 --- a/tempesta_fw/server.h +++ b/tempesta_fw/server.h @@ -25,8 +25,8 @@ #include "connection.h" #include "peer.h" -#define TFW_SRV_MAX_CONN 32 /* TfwSrvConnection per TfwServer */ -#define TFW_SG_MAX_SRV 32 /* TfwServer per TfwSrvGroup */ +#define TFW_SRV_MAX_CONN 32 /* TfwSrvConn{} per TfwServer{} */ +#define TFW_SG_MAX_SRV 32 /* TfwServer{} per TfwSrvGroup{} */ #define TFW_SG_MAX_CONN (TFW_SG_MAX_SRV * TFW_SRV_MAX_CONN) typedef struct tfw_srv_group_t TfwSrvGroup; @@ -112,9 +112,9 @@ struct tfw_scheduler_t { void (*add_grp)(TfwSrvGroup *sg); void (*del_grp)(TfwSrvGroup *sg); void (*add_conn)(TfwSrvGroup *sg, TfwServer *srv, - TfwSrvConnection *srv_conn); - TfwSrvConnection *(*sched_grp)(TfwMsg *msg); - TfwSrvConnection *(*sched_srv)(TfwMsg *msg, TfwSrvGroup *sg); + TfwSrvConn *srv_conn); + TfwSrvConn *(*sched_grp)(TfwMsg *msg); + TfwSrvConn *(*sched_srv)(TfwMsg *msg, TfwSrvGroup *sg); }; /* Server specific routines. */ @@ -122,10 +122,10 @@ TfwServer *tfw_server_create(const TfwAddr *addr); int tfw_server_apm_create(TfwServer *srv); void tfw_server_destroy(TfwServer *srv); -void tfw_srv_conn_release(TfwSrvConnection *srv_conn); +void tfw_srv_conn_release(TfwSrvConn *srv_conn); static inline bool -tfw_server_queue_full(TfwSrvConnection *srv_conn) +tfw_server_queue_full(TfwSrvConn *srv_conn) { TfwSrvGroup *sg = ((TfwServer *)srv_conn->peer)->sg; return ACCESS_ONCE(srv_conn->qsize) >= sg->max_qsize; @@ -138,14 +138,13 @@ void tfw_sg_free(TfwSrvGroup *sg); int tfw_sg_count(void); void tfw_sg_add(TfwSrvGroup *sg, TfwServer *srv); -void tfw_sg_add_conn(TfwSrvGroup *sg, TfwServer *srv, - TfwSrvConnection *srv_conn); +void tfw_sg_add_conn(TfwSrvGroup *sg, TfwServer *srv, TfwSrvConn *srv_conn); int tfw_sg_set_sched(TfwSrvGroup *sg, const char *sched); int tfw_sg_for_each_srv(int (*cb)(TfwServer *srv)); void tfw_sg_release_all(void); /* Scheduler routines. */ -TfwSrvConnection *tfw_sched_get_srv_conn(TfwMsg *msg); +TfwSrvConn *tfw_sched_get_srv_conn(TfwMsg *msg); TfwScheduler *tfw_sched_lookup(const char *name); int tfw_sched_register(TfwScheduler *sched); void tfw_sched_unregister(TfwScheduler *sched); diff --git a/tempesta_fw/sock_clnt.c b/tempesta_fw/sock_clnt.c index 180eb9d39b..c25371158d 100644 --- a/tempesta_fw/sock_clnt.c +++ b/tempesta_fw/sock_clnt.c @@ -38,20 +38,20 @@ */ static struct kmem_cache *tfw_cli_conn_cache; -static struct kmem_cache *tfw_cli_conn_tls_cache; +static struct kmem_cache *tfw_tls_conn_cache; static int tfw_cli_cfg_ka_timeout = -1; static inline struct kmem_cache * tfw_cli_cache(int type) { return type == Conn_HttpClnt ? - tfw_cli_conn_cache : tfw_cli_conn_tls_cache; + tfw_cli_conn_cache : tfw_tls_conn_cache; } static void tfw_sock_cli_keepalive_timer_cb(unsigned long data) { - TfwCliConnection *cli_conn = (TfwCliConnection *)data; + TfwCliConn *cli_conn = (TfwCliConn *)data; TFW_DBG("Client timeout end\n"); @@ -64,15 +64,15 @@ tfw_sock_cli_keepalive_timer_cb(unsigned long data) mod_timer(&cli_conn->timer, jiffies + msecs_to_jiffies(1000)); } -static TfwCliConnection * +static TfwCliConn * tfw_cli_conn_alloc(int type) { - TfwCliConnection *cli_conn; + TfwCliConn *cli_conn; if (!(cli_conn = kmem_cache_alloc(tfw_cli_cache(type), GFP_ATOMIC))) return NULL; - tfw_connection_init((TfwConnection *)cli_conn); + tfw_connection_init((TfwConn *)cli_conn); INIT_LIST_HEAD(&cli_conn->seq_queue); spin_lock_init(&cli_conn->seq_qlock); spin_lock_init(&cli_conn->ret_qlock); @@ -85,24 +85,24 @@ tfw_cli_conn_alloc(int type) } static void -tfw_cli_conn_free(TfwCliConnection *cli_conn) +tfw_cli_conn_free(TfwCliConn *cli_conn) { BUG_ON(timer_pending(&cli_conn->timer)); /* Check that all nested resources are freed. */ - tfw_connection_validate_cleanup((TfwConnection *)cli_conn); + tfw_connection_validate_cleanup((TfwConn *)cli_conn); BUG_ON(!list_empty(&cli_conn->seq_queue)); kmem_cache_free(tfw_cli_cache(TFW_CONN_TYPE(cli_conn)), cli_conn); } void -tfw_cli_conn_release(TfwCliConnection *cli_conn) +tfw_cli_conn_release(TfwCliConn *cli_conn) { del_timer_sync(&cli_conn->timer); if (likely(cli_conn->sk)) - tfw_connection_unlink_to_sk((TfwConnection *)cli_conn); + tfw_connection_unlink_to_sk((TfwConn *)cli_conn); if (likely(cli_conn->peer)) tfw_client_put((TfwClient *)cli_conn->peer); tfw_cli_conn_free(cli_conn); @@ -110,11 +110,11 @@ tfw_cli_conn_release(TfwCliConnection *cli_conn) } int -tfw_cli_conn_send(TfwCliConnection *cli_conn, TfwMsg *msg) +tfw_cli_conn_send(TfwCliConn *cli_conn, TfwMsg *msg) { int r; - r = tfw_connection_send((TfwConnection *)cli_conn, msg); + r = tfw_connection_send((TfwConn *)cli_conn, msg); mod_timer(&cli_conn->timer, jiffies + msecs_to_jiffies(tfw_cli_cfg_ka_timeout * 1000)); @@ -131,7 +131,7 @@ tfw_sock_clnt_new(struct sock *sk) { int r = -ENOMEM; TfwClient *cli; - TfwConnection *conn; + TfwConn *conn; SsProto *listen_sock_proto; TFW_DBG3("new client socket: sk=%p, state=%u\n", sk, sk->sk_state); @@ -140,8 +140,8 @@ tfw_sock_clnt_new(struct sock *sk) /* * New sk->sk_user_data points to TfwListenSock{} of the parent * listening socket. We set it to NULL to stop other functions - * from referencing TfwListenSock{} while a new TfwConnection{} - * object is not yet allocated/initialized. + * from referencing TfwListenSock{} while a new TfwConn{} object + * is not yet allocated/initialized. */ listen_sock_proto = sk->sk_user_data; tfw_connection_unlink_from_sk(sk); @@ -152,7 +152,7 @@ tfw_sock_clnt_new(struct sock *sk) return -ENOENT; } - conn = (TfwConnection *)tfw_cli_conn_alloc(listen_sock_proto->type); + conn = (TfwConn *)tfw_cli_conn_alloc(listen_sock_proto->type); if (!conn) { TFW_ERR("can't allocate a new client connection\n"); goto err_client; @@ -187,7 +187,7 @@ tfw_sock_clnt_new(struct sock *sk) err_conn: tfw_connection_drop(conn); - tfw_cli_conn_free((TfwCliConnection *)conn); + tfw_cli_conn_free((TfwCliConn *)conn); err_client: tfw_client_put(cli); return r; @@ -196,7 +196,7 @@ tfw_sock_clnt_new(struct sock *sk) static int tfw_sock_clnt_do_drop(struct sock *sk, const char *msg) { - TfwConnection *conn = sk->sk_user_data; + TfwConn *conn = sk->sk_user_data; TFW_DBG3("%s: close client socket: sk=%p, conn=%p, client=%p\n", msg, sk, conn, conn->peer); @@ -578,22 +578,20 @@ int tfw_sock_clnt_init(void) { BUG_ON(tfw_cli_conn_cache); - BUG_ON(tfw_cli_conn_tls_cache); + BUG_ON(tfw_tls_conn_cache); tfw_cli_conn_cache = kmem_cache_create("tfw_cli_conn_cache", - sizeof(TfwCliConnection), - 0, 0, NULL); - tfw_cli_conn_tls_cache = kmem_cache_create("tfw_cli_conn_tls_cache", - sizeof(TfwTlsConnection), - 0, 0, NULL); + sizeof(TfwCliConn), 0, 0, NULL); + tfw_tls_conn_cache = kmem_cache_create("tfw_tls_conn_cache", + sizeof(TfwTlsConn), 0, 0, NULL); - if (tfw_cli_conn_cache && tfw_cli_conn_tls_cache) + if (tfw_cli_conn_cache && tfw_tls_conn_cache) return 0; if (tfw_cli_conn_cache) kmem_cache_destroy(tfw_cli_conn_cache); - if (tfw_cli_conn_tls_cache) - kmem_cache_destroy(tfw_cli_conn_tls_cache); + if (tfw_tls_conn_cache) + kmem_cache_destroy(tfw_tls_conn_cache); return -ENOMEM; } @@ -601,6 +599,6 @@ tfw_sock_clnt_init(void) void tfw_sock_clnt_exit(void) { - kmem_cache_destroy(tfw_cli_conn_tls_cache); + kmem_cache_destroy(tfw_tls_conn_cache); kmem_cache_destroy(tfw_cli_conn_cache); } diff --git a/tempesta_fw/sock_srv.c b/tempesta_fw/sock_srv.c index e7bca68f70..ffa9c4a183 100644 --- a/tempesta_fw/sock_srv.c +++ b/tempesta_fw/sock_srv.c @@ -55,25 +55,25 @@ */ /** - * A server connection differs from a client connection. - * For clients, a new TfwCliConnection{} instance is created when a new - * client socket is accepted (the connection is established at that point). - * For servers, a socket is created first, and then some time passes while + * A server connection differs from a client connection. For clients, + * a new TfwCliConn{} instance is created when a new client socket is + * accepted (the connection is established at that point). For servers, + * a socket is created first, and then there's a period of time while * a connection is being established. * - * TfwSrvConnection{} instance goes though the following periods of life: - * - First, a TfwSrvConnection{} instance is allocated and set up with + * TfwSrvConn{} instance goes though the following periods of life: + * - First, a TfwSrvConn{} instance is allocated and set up with * data from configuration file. - * - When a server socket is created, the TfwSrvConnection{} instance + * - When a server socket is created, the TfwSrvConn{} instance * is partially initialized to allow a connect attempt to complete. - * - When a connection is established, the TfwSrvConnection{} instance + * - When a connection is established, the TfwSrvConn{} instance * is fully initialized and set up. - * - If a connect attempt has failed, or the connection has been reset - * or closed, the same TfwSrvConnection{} instance is reused with + * - If a connect attempt has failed, or the connection has been + * reset or closed, the same TfwSrvConn{} instance is reused with * a new socket. Another attempt to establish a connection is made. * - * So a TfwSrvConnection{} instance has a longer lifetime. In a sense, - * a TfwSrvConnection{} instance is persistent. It lives from the time + * So a TfwSrvConn{} instance has a longer lifetime. In a sense, + * a TfwSrvConn{} instance is persistent. It lives from the time * it is created when Tempesta is started, and until the time it is * destroyed when Tempesta is stopped. * @@ -84,13 +84,13 @@ * reused for a new connection, and a new socket is created. Note that * @sk member is not cleared when it is no longer valid, and there is * a time frame until new connection is actually established. An old - * non-valid @sk stays a member of an TfwSrvConnection{} instance during + * non-valid @sk stays a member of an TfwSrvConn{} instance during * that time frame. However, the condition for reuse of an instance is * that there're no more users of the instance, so no thread can make * use of an old socket @sk. Should something bad happen, then having * a stale pointer in conn->sk is no different than having a NULL pointer. * - * The reference counter is still needed for TfwSrvConnection{} instances. + * The reference counter is still needed for TfwSrvConn{} instances. * It tells when an instance can be reused for a new connect attempt. * A scenario that may occur is as follows: * 1. There's a client's request, so scheduler finds a server connection @@ -99,7 +99,7 @@ * 2. At that time the server sends RST on that connection in response * to an earlier request. It starts the failover procedure that runs * in parallel. Part of the procedure is a new attempt to connect to - * the server, which requires that TfwSrvConnection{} instance can be + * the server, which requires that TfwSrvConn{} instance can be * reused. So the attempt to reconnect has to wait. It is started as * soon as the last client releases the server connection. */ @@ -124,7 +124,7 @@ static const unsigned long tfw_srv_tmo_vals[] = { 1, 10, 100, 250, 500, 1000 }; * Returns immediately without waiting until a connection is established. */ static int -tfw_sock_srv_connect_try(TfwSrvConnection *srv_conn) +tfw_sock_srv_connect_try(TfwSrvConn *srv_conn) { int r; TfwAddr *addr; @@ -147,7 +147,7 @@ tfw_sock_srv_connect_try(TfwSrvConnection *srv_conn) #if defined(DEBUG) && (DEBUG >= 2) sock_set_flag(sk, SOCK_DBG); #endif - tfw_connection_link_from_sk((TfwConnection *)srv_conn, sk); + tfw_connection_link_from_sk((TfwConn *)srv_conn, sk); ss_set_callbacks(sk); /* @@ -195,7 +195,7 @@ tfw_sock_srv_connect_try(TfwSrvConnection *srv_conn) * stay dead until Tempesta is restarted. */ static inline void -tfw_sock_srv_connect_try_later(TfwSrvConnection *srv_conn) +tfw_sock_srv_connect_try_later(TfwSrvConn *srv_conn) { TfwSrvGroup *sg = ((TfwServer *)srv_conn->peer)->sg; unsigned long timeout; @@ -209,7 +209,7 @@ tfw_sock_srv_connect_try_later(TfwSrvConnection *srv_conn) TFW_WARN("The limit of [%d] on reconnect attempts exceeded. " "The server connection [%s] is down.\n", sg->max_recns, s_addr); - tfw_connection_repair((TfwConnection *)srv_conn); + tfw_connection_repair((TfwConn *)srv_conn); set_bit(TFW_CONN_B_ISDEAD, &srv_conn->flags); } if (srv_conn->recns < ARRAY_SIZE(tfw_srv_tmo_vals)) { @@ -237,7 +237,7 @@ tfw_sock_srv_connect_try_later(TfwSrvConnection *srv_conn) static void tfw_sock_srv_connect_retry_timer_cb(unsigned long data) { - TfwSrvConnection *srv_conn = (TfwSrvConnection *)data; + TfwSrvConn *srv_conn = (TfwSrvConn *)data; /* A new socket is created for each connect attempt. */ if (tfw_sock_srv_connect_try(srv_conn)) @@ -245,13 +245,13 @@ tfw_sock_srv_connect_retry_timer_cb(unsigned long data) } static inline void -__reset_retry_timer(TfwSrvConnection *srv_conn) +__reset_retry_timer(TfwSrvConn *srv_conn) { srv_conn->recns = 0; } static inline void -__setup_retry_timer(TfwSrvConnection *srv_conn) +__setup_retry_timer(TfwSrvConn *srv_conn) { __reset_retry_timer(srv_conn); setup_timer(&srv_conn->timer, @@ -260,16 +260,16 @@ __setup_retry_timer(TfwSrvConnection *srv_conn) } void -tfw_srv_conn_release(TfwSrvConnection *srv_conn) +tfw_srv_conn_release(TfwSrvConn *srv_conn) { - tfw_connection_release((TfwConnection *)srv_conn); + tfw_connection_release((TfwConn *)srv_conn); /* * conn->sk may be zeroed if we get here after a failed * connect attempt. In that case no connection has been * established yet, and conn->sk has not been set. */ if (likely(srv_conn->sk)) - tfw_connection_unlink_to_sk((TfwConnection *)srv_conn); + tfw_connection_unlink_to_sk((TfwConn *)srv_conn); /* * After a disconnect, new connect attempts are started * in deferred context after a short pause (in a timer @@ -286,7 +286,7 @@ static int tfw_sock_srv_connect_complete(struct sock *sk) { int r; - TfwConnection *conn = sk->sk_user_data; + TfwConn *conn = sk->sk_user_data; TfwServer *srv = (TfwServer *)conn->peer; /* Link Tempesta with the socket. */ @@ -302,10 +302,10 @@ tfw_sock_srv_connect_complete(struct sock *sk) tfw_connection_revive(conn); /* Repair the connection if necessary. */ - if (unlikely(tfw_srv_conn_restricted((TfwSrvConnection *)conn))) + if (unlikely(tfw_srv_conn_restricted((TfwSrvConn *)conn))) tfw_connection_repair(conn); - __reset_retry_timer((TfwSrvConnection *)conn); + __reset_retry_timer((TfwSrvConn *)conn); TFW_DBG_ADDR("connected", &srv->addr); TFW_INC_STAT_BH(serv.conn_established); @@ -316,7 +316,7 @@ tfw_sock_srv_connect_complete(struct sock *sk) static int tfw_sock_srv_do_failover(struct sock *sk, const char *msg) { - TfwConnection *conn = sk->sk_user_data; + TfwConn *conn = sk->sk_user_data; TfwServer *srv = (TfwServer *)conn->peer; TFW_DBG_ADDR(msg, &srv->addr); @@ -389,9 +389,9 @@ static const SsHooks tfw_sock_srv_ss_hooks = { * FIXME This function is seriously outdated and needs a complete overhaul. */ static void -tfw_sock_srv_disconnect(TfwSrvConnection *srv_conn) +tfw_sock_srv_disconnect(TfwSrvConn *srv_conn) { - TfwConnection *conn = (TfwConnection *)srv_conn; + TfwConn *conn = (TfwConn *)srv_conn; struct sock *sk = conn->sk; /* Prevent races with timer callbacks. */ @@ -436,14 +436,14 @@ tfw_sock_srv_disconnect(TfwSrvConnection *srv_conn) * This behavior may change in future for a forward proxy implementation. * Then we will have a lot of short-living connections. We should keep it in * mind to avoid possible bottlenecks. In particular, this is the reason why we - * don't have a global list of all TfwSrvConnection objects and store + * don't have a global list of all TfwSrvConn{} objects and store * not-yet-established connections in the TfwServer->conn_list. */ static int tfw_sock_srv_connect_srv(TfwServer *srv) { - TfwSrvConnection *srv_conn; + TfwSrvConn *srv_conn; /* * For each server connection, schedule an immediate connect @@ -465,7 +465,7 @@ tfw_sock_srv_connect_srv(TfwServer *srv) static int tfw_sock_srv_disconnect_srv(TfwServer *srv) { - TfwSrvConnection *srv_conn; + TfwSrvConn *srv_conn; list_for_each_entry(srv_conn, &srv->conn_list, list) tfw_sock_srv_disconnect(srv_conn); @@ -477,28 +477,28 @@ tfw_sock_srv_disconnect_srv(TfwServer *srv) * TfwServer creation/deletion helpers. * ------------------------------------------------------------------------ * - * This section of code is responsible for allocating TfwSrvConnection objects + * This section of code is responsible for allocating TfwSrvConn{} objects * and linking them with a TfwServer object. * - * All server connections (TfwSrvConnection objects) are pre-allocated when a - * TfwServer is created. That happens when at the configuration parsing stage. + * All server connections (TfwSrvConn{} objects) are pre-allocated when + * TfwServer{} is created. That happens at the configuration parsing stage. * - * Later on, when Tempesta FW is started, these TfwSrvConnection objects are - * used to establish connections. These connection objects are re-used (but not - * re-allocated) when connections are re-established. + * Later on, when Tempesta FW is started, these TfwSrvConn{} objects are + * used to establish connections. These connection objects are re-used + * (but not re-allocated) when connections are re-established. */ static struct kmem_cache *tfw_srv_conn_cache; -static TfwSrvConnection * +static TfwSrvConn * tfw_srv_conn_alloc(void) { - TfwSrvConnection *srv_conn; + TfwSrvConn *srv_conn; if (!(srv_conn = kmem_cache_alloc(tfw_srv_conn_cache, GFP_ATOMIC))) return NULL; - tfw_connection_init((TfwConnection *)srv_conn); + tfw_connection_init((TfwConn *)srv_conn); INIT_LIST_HEAD(&srv_conn->fwd_queue); INIT_LIST_HEAD(&srv_conn->nip_queue); spin_lock_init(&srv_conn->fwd_qlock); @@ -510,12 +510,12 @@ tfw_srv_conn_alloc(void) } static void -tfw_srv_conn_free(TfwSrvConnection *srv_conn) +tfw_srv_conn_free(TfwSrvConn *srv_conn) { BUG_ON(timer_pending(&srv_conn->timer)); /* Check that all nested resources are freed. */ - tfw_connection_validate_cleanup((TfwConnection *)srv_conn); + tfw_connection_validate_cleanup((TfwConn *)srv_conn); BUG_ON(!list_empty(&srv_conn->nip_queue)); BUG_ON(ACCESS_ONCE(srv_conn->qsize)); @@ -526,12 +526,12 @@ static int tfw_sock_srv_add_conns(TfwServer *srv, int conns_n) { int i; - TfwSrvConnection *srv_conn; + TfwSrvConn *srv_conn; for (i = 0; i < conns_n; ++i) { if (!(srv_conn = tfw_srv_conn_alloc())) return -ENOMEM; - tfw_connection_link_peer((TfwConnection *)srv_conn, + tfw_connection_link_peer((TfwConn *)srv_conn, (TfwPeer *)srv); tfw_sg_add_conn(srv->sg, srv, srv_conn); } @@ -542,10 +542,10 @@ tfw_sock_srv_add_conns(TfwServer *srv, int conns_n) static int tfw_sock_srv_del_conns(TfwServer *srv) { - TfwSrvConnection *srv_conn, *tmp; + TfwSrvConn *srv_conn, *tmp; list_for_each_entry_safe(srv_conn, tmp, &srv->conn_list, list) { - tfw_connection_unlink_from_peer((TfwConnection *)srv_conn); + tfw_connection_unlink_from_peer((TfwConn *)srv_conn); tfw_srv_conn_free(srv_conn); } return 0; @@ -699,6 +699,9 @@ tfw_cfgop_set_conn_tries(TfwSrvGroup *sg, int recns) return 0; } +/* + * Common code to handle 'server' directive. + */ static int tfw_cfgop_server(TfwCfgSpec *cs, TfwCfgEntry *ce, TfwSrvGroup *sg, TfwServer **arg_srv, int *arg_conns_n) @@ -854,8 +857,8 @@ tfw_cfgop_out_server(TfwCfgSpec *cs, TfwCfgEntry *ce) * ... * } * - * Basically it parses the group name and the "sched" attribute, creates a - * new TfwSrvGroup object and sets the context for parsing nested "server"s. + * Basically it parses the group name, creates a new TfwSrvGroup{} object + * and sets the context for parsing nested directives. */ static int tfw_cfgop_begin_srv_group(TfwCfgSpec *cs, TfwCfgEntry *ce) @@ -891,7 +894,7 @@ tfw_cfgop_begin_srv_group(TfwCfgSpec *cs, TfwCfgEntry *ce) /** * The callback is invoked upon exit from a "srv_group" when all nested - * "server"s are parsed, e.g.: + * directives are parsed, e.g.: * * srv_group foo { * server ...; @@ -939,6 +942,9 @@ tfw_cfgop_finish_srv_group(TfwCfgSpec *cs) return 0; } +/* + * Common code to handle 'sched' directive. + */ static int tfw_cfgop_sched(TfwCfgSpec *cs, TfwCfgEntry *ce, TfwScheduler **arg_sched) { @@ -1176,8 +1182,7 @@ tfw_sock_srv_init(void) { BUG_ON(tfw_srv_conn_cache); tfw_srv_conn_cache = kmem_cache_create("tfw_srv_conn_cache", - sizeof(TfwSrvConnection), - 0, 0, NULL); + sizeof(TfwSrvConn), 0, 0, NULL); return !tfw_srv_conn_cache ? -ENOMEM : 0; } diff --git a/tempesta_fw/t/unit/helpers.c b/tempesta_fw/t/unit/helpers.c index 016b0b5551..92ecaf74bf 100644 --- a/tempesta_fw/t/unit/helpers.c +++ b/tempesta_fw/t/unit/helpers.c @@ -33,7 +33,7 @@ */ #include "http_msg.h" -static TfwConnection conn_req, conn_resp; +static TfwConn conn_req, conn_resp; TfwHttpReq * test_req_alloc(size_t data_len) @@ -52,7 +52,7 @@ test_req_alloc(size_t data_len) ret = tfw_http_msg_setup(hmreq, &it, data_len); BUG_ON(ret); - memset(&conn_req, 0, sizeof(TfwConnection)); + memset(&conn_req, 0, sizeof(TfwConn)); tfw_connection_init(&conn_req); conn_req.proto.type = Conn_HttpClnt; hmreq->conn = &conn_req; @@ -83,7 +83,7 @@ test_resp_alloc(size_t data_len) ret = tfw_http_msg_setup(hmresp, &it, data_len); BUG_ON(ret); - memset(&conn_resp, 0, sizeof(TfwConnection)); + memset(&conn_resp, 0, sizeof(TfwConn)); tfw_connection_init(&conn_req); conn_resp.proto.type = Conn_HttpSrv; hmresp->conn = &conn_resp; diff --git a/tempesta_fw/t/unit/sched_helper.c b/tempesta_fw/t/unit/sched_helper.c index 119e35b99f..1c75fb0ad3 100644 --- a/tempesta_fw/t/unit/sched_helper.c +++ b/tempesta_fw/t/unit/sched_helper.c @@ -112,19 +112,19 @@ test_create_srv(const char *in_addr, TfwSrvGroup *sg) return srv; } -TfwSrvConnection * +TfwSrvConn * test_create_conn(TfwPeer *peer) { static struct sock __test_sock = { .sk_state = TCP_ESTABLISHED, }; - TfwConnection *conn; + TfwConn *conn; kernel_fpu_end(); if (!tfw_srv_conn_cache) tfw_sock_srv_init(); - conn = (TfwConnection *)tfw_srv_conn_alloc(); + conn = (TfwConn *)tfw_srv_conn_alloc(); BUG_ON(!conn); tfw_connection_link_peer(conn, peer); @@ -134,14 +134,14 @@ test_create_conn(TfwPeer *peer) kernel_fpu_begin(); - return (TfwSrvConnection *)conn; + return (TfwSrvConn *)conn; } void test_conn_release_all(TfwSrvGroup *sg) { TfwServer *srv; - TfwConnection *conn, *tmp; + TfwConn *conn, *tmp; list_for_each_entry(srv, &sg->srv_list, list) { list_for_each_entry_safe(conn, tmp, &srv->conn_list, list) { @@ -149,7 +149,7 @@ test_conn_release_all(TfwSrvGroup *sg) tfw_connection_unlink_from_peer(conn); while (tfw_connection_live(conn)) tfw_connection_put(conn); - tfw_srv_conn_free((TfwSrvConnection *)conn); + tfw_srv_conn_free((TfwSrvConn *)conn); } } } @@ -170,7 +170,7 @@ test_sched_generic_empty_sg(struct TestSchedHelper *sched_helper) for (i = 0; i < sched_helper->conn_types; ++i) { TfwMsg *msg = sched_helper->get_sched_arg(i); - TfwSrvConnection *srv_conn = sg->sched->sched_srv(msg, sg); + TfwSrvConn *srv_conn = sg->sched->sched_srv(msg, sg); EXPECT_NULL(srv_conn); sched_helper->free_sched_arg(msg); @@ -197,7 +197,7 @@ test_sched_generic_one_srv_zero_conn(struct TestSchedHelper *sched_helper) for (i = 0; i < sched_helper->conn_types; ++i) { TfwMsg *msg = sched_helper->get_sched_arg(i); - TfwSrvConnection *srv_conn = sg->sched->sched_srv(msg, sg); + TfwSrvConn *srv_conn = sg->sched->sched_srv(msg, sg); EXPECT_NULL(srv_conn); sched_helper->free_sched_arg(msg); @@ -226,8 +226,7 @@ test_sched_generic_max_srv_zero_conn(struct TestSchedHelper *sched_helper) for (i = 0; i < sched_helper->conn_types; ++i) { for (j = 0; j < TFW_SG_MAX_SRV; ++j) { TfwMsg *msg = sched_helper->get_sched_arg(i); - TfwSrvConnection *srv_conn = - sg->sched->sched_srv(msg, sg); + TfwSrvConn *srv_conn = sg->sched->sched_srv(msg, sg); EXPECT_NULL(srv_conn); sched_helper->free_sched_arg(msg); diff --git a/tempesta_fw/t/unit/sched_helper.h b/tempesta_fw/t/unit/sched_helper.h index 2187a113f3..2f3d2ac57d 100644 --- a/tempesta_fw/t/unit/sched_helper.h +++ b/tempesta_fw/t/unit/sched_helper.h @@ -36,7 +36,7 @@ void test_sg_release_all(void); TfwServer *test_create_srv(const char *in_addr, TfwSrvGroup *sg); -TfwSrvConnection *test_create_conn(TfwPeer *peer); +TfwSrvConn *test_create_conn(TfwPeer *peer); void test_conn_release_all(TfwSrvGroup *sg); diff --git a/tempesta_fw/t/unit/test_http_sticky.c b/tempesta_fw/t/unit/test_http_sticky.c index 865ce2f4f3..ed1d558ffd 100644 --- a/tempesta_fw/t/unit/test_http_sticky.c +++ b/tempesta_fw/t/unit/test_http_sticky.c @@ -81,8 +81,8 @@ static struct { TfwHttpReq *req; TfwHttpResp *resp; - TfwConnection conn_req; - TfwConnection conn_resp; + TfwConn conn_req; + TfwConn conn_resp; TfwClient client; struct sock sock; } mock; @@ -137,7 +137,7 @@ tfw_http_field_value(TfwHttpMsg *hm, const TfwStr *field_name, TfwStr *value) /* custom version for testing purposes */ int -tfw_connection_send(TfwConnection *conn, TfwMsg *msg) +tfw_connection_send(TfwConn *conn, TfwMsg *msg) { struct sk_buff *skb; unsigned int data_off = 0; @@ -177,9 +177,9 @@ tfw_connection_send(TfwConnection *conn, TfwMsg *msg) } /* custom version for testing purposes */ -int tfw_cli_conn_send(TfwCliConnection *cli_conn, TfwMsg *msg) +int tfw_cli_conn_send(TfwCliConn *cli_conn, TfwMsg *msg) { - return tfw_connection_send((TfwConnection *)cli_conn, msg); + return tfw_connection_send((TfwConn *)cli_conn, msg); } /* setup/teardown helpers */ diff --git a/tempesta_fw/t/unit/test_sched_hash.c b/tempesta_fw/t/unit/test_sched_hash.c index 537e00209f..fe1f0a61db 100644 --- a/tempesta_fw/t/unit/test_sched_hash.c +++ b/tempesta_fw/t/unit/test_sched_hash.c @@ -107,18 +107,17 @@ TEST(tfw_sched_hash, one_srv_in_sg_and_max_conn) TfwServer *srv = test_create_srv("127.0.0.1", sg); for (i = 0; i < TFW_SRV_MAX_CONN; ++i) { - TfwSrvConnection *srv_conn = test_create_conn((TfwPeer *)srv); + TfwSrvConn *srv_conn = test_create_conn((TfwPeer *)srv); sg->sched->add_conn(sg, srv, srv_conn); } /* Check that every request is scheduled to the same connection. */ for (i = 0; i < sched_helper_hash.conn_types; ++i) { - TfwSrvConnection *expect_conn = NULL; + TfwSrvConn *expect_conn = NULL; for (j = 0; j < TFW_SRV_MAX_CONN; ++j) { TfwMsg *msg = sched_helper_hash.get_sched_arg(i); - TfwSrvConnection *srv_conn = - sg->sched->sched_srv(msg, sg); + TfwSrvConn *srv_conn = sg->sched->sched_srv(msg, sg); EXPECT_NOT_NULL(srv_conn); if (!expect_conn) @@ -157,20 +156,18 @@ TEST(tfw_sched_hash, max_srv_in_sg_and_max_conn) TfwServer *srv = test_create_srv("127.0.0.1", sg); for (j = 0; j < TFW_SRV_MAX_CONN; ++j) { - TfwSrvConnection *srv_conn = - test_create_conn((TfwPeer *)srv); + TfwSrvConn *srv_conn = test_create_conn((TfwPeer *)srv); sg->sched->add_conn(sg, srv, srv_conn); } } /* Check that every request is scheduled to the same connection. */ for (i = 0; i < sched_helper_hash.conn_types; ++i) { - TfwSrvConnection *expect_conn = NULL; + TfwSrvConn *expect_conn = NULL; for (j = 0; j < TFW_SG_MAX_SRV * TFW_SRV_MAX_CONN; ++j) { TfwMsg *msg = sched_helper_hash.get_sched_arg(i); - TfwSrvConnection *srv_conn = - sg->sched->sched_srv(msg, sg); + TfwSrvConn *srv_conn = sg->sched->sched_srv(msg, sg); EXPECT_NOT_NULL(srv_conn); if (!expect_conn) diff --git a/tempesta_fw/t/unit/test_sched_http.c b/tempesta_fw/t/unit/test_sched_http.c index 626a881436..b20813f8c2 100644 --- a/tempesta_fw/t/unit/test_sched_http.c +++ b/tempesta_fw/t/unit/test_sched_http.c @@ -78,10 +78,10 @@ cleanup_cfg(void) } static void -test_req(char *req_str, TfwSrvConnection *expect_conn) +test_req(char *req_str, TfwSrvConn *expect_conn) { TfwScheduler *sched; - TfwSrvConnection *srv_conn; + TfwSrvConn *srv_conn; TfwHttpReq *req = test_req_alloc(req_str? strlen(req_str): 1); if (req_str) { @@ -126,7 +126,7 @@ TEST(tfw_sched_http, one_wildcard_rule) { TfwSrvGroup *sg; TfwServer *srv; - TfwSrvConnection *expect_conn; + TfwSrvConn *expect_conn; sg = test_create_sg("default", "round-robin"); srv = test_create_srv("127.0.0.1", sg); @@ -149,10 +149,9 @@ TEST(tfw_sched_http, some_rules) TfwServer *srv; TfwSrvGroup *sg1, *sg2, *sg3, *sg4, *sg5, *sg6, *sg7, *sg8, *sg9, *sg10; - TfwSrvConnection *expect_conn1, *expect_conn2, *expect_conn3, - *expect_conn4, *expect_conn5, *expect_conn6, - *expect_conn7, *expect_conn8, *expect_conn9, - *expect_conn10; + TfwSrvConn *expect_conn1, *expect_conn2, *expect_conn3, *expect_conn4, + *expect_conn5, *expect_conn6, *expect_conn7, *expect_conn8, + *expect_conn9, *expect_conn10; sg1 = test_create_sg("sg1", "round-robin"); srv = test_create_srv("127.0.0.1", sg1); @@ -312,7 +311,7 @@ TEST(tfw_sched_http, one_rule) { TfwSrvGroup *sg; TfwServer *srv; - TfwSrvConnection *expect_conn; + TfwSrvConn *expect_conn; sg = test_create_sg("default", "round-robin"); srv = test_create_srv("127.0.0.1", sg); diff --git a/tempesta_fw/t/unit/test_sched_rr.c b/tempesta_fw/t/unit/test_sched_rr.c index 7d982d2b31..b90bc2f13b 100644 --- a/tempesta_fw/t/unit/test_sched_rr.c +++ b/tempesta_fw/t/unit/test_sched_rr.c @@ -88,7 +88,7 @@ TEST(tfw_sched_rr, one_srv_in_sg_and_max_conn) TfwServer *srv = test_create_srv("127.0.0.1", sg); for (i = 0; i < TFW_SRV_MAX_CONN; ++i) { - TfwSrvConnection *srv_conn = test_create_conn((TfwPeer *)srv); + TfwSrvConn *srv_conn = test_create_conn((TfwPeer *)srv); sg->sched->add_conn(sg, srv, srv_conn); conn_acc ^= (long long)srv_conn; } @@ -102,8 +102,7 @@ TEST(tfw_sched_rr, one_srv_in_sg_and_max_conn) for (j = 0; j < TFW_SRV_MAX_CONN; ++j) { TfwMsg *msg = sched_helper_rr.get_sched_arg(i); - TfwSrvConnection *srv_conn = - sg->sched->sched_srv(msg, sg); + TfwSrvConn *srv_conn = sg->sched->sched_srv(msg, sg); EXPECT_NOT_NULL(srv_conn); conn_acc_check ^= (long long)srv_conn; @@ -141,8 +140,7 @@ TEST(tfw_sched_rr, max_srv_in_sg_and_max_conn) TfwServer *srv = test_create_srv("127.0.0.1", sg); for (j = 0; j < TFW_SRV_MAX_CONN; ++j) { - TfwSrvConnection *srv_conn = - test_create_conn((TfwPeer *)srv); + TfwSrvConn *srv_conn = test_create_conn((TfwPeer *)srv); sg->sched->add_conn(sg, srv, srv_conn); conn_acc ^= (long long)srv_conn; } @@ -157,8 +155,7 @@ TEST(tfw_sched_rr, max_srv_in_sg_and_max_conn) for (j = 0; j < TFW_SG_MAX_SRV * TFW_SRV_MAX_CONN; ++j) { TfwMsg *msg = sched_helper_rr.get_sched_arg(i); - TfwSrvConnection *srv_conn = - sg->sched->sched_srv(msg, sg); + TfwSrvConn *srv_conn = sg->sched->sched_srv(msg, sg); EXPECT_NOT_NULL(srv_conn); conn_acc_check ^= (long long)srv_conn; diff --git a/tempesta_fw/tls.c b/tempesta_fw/tls.c index 46ff673ccc..2a1bed9618 100644 --- a/tempesta_fw/tls.c +++ b/tempesta_fw/tls.c @@ -97,7 +97,7 @@ static int tfw_tls_msg_process(void *conn, struct sk_buff *skb, unsigned int off) { int r; - TfwConnection *c = conn; + TfwConn *c = conn; TfwTlsContext *tls = tfw_tls_context(c); tls_dbg(c, "=>"); @@ -149,7 +149,7 @@ tfw_tls_msg_process(void *conn, struct sk_buff *skb, unsigned int off) * Send @buf of length @len using TLS context @tls. */ static inline int -tfw_tls_send_buf(TfwConnection *c, const unsigned char *buf, size_t len) +tfw_tls_send_buf(TfwConn *c, const unsigned char *buf, size_t len) { int r; TfwTlsContext *tls = tfw_tls_context(c); @@ -172,7 +172,7 @@ tfw_tls_send_buf(TfwConnection *c, const unsigned char *buf, size_t len) * Send @skb using TLS context @tls. */ static inline int -tfw_tls_send_skb(TfwConnection *c, struct sk_buff *skb) +tfw_tls_send_skb(TfwConn *c, struct sk_buff *skb) { int i; @@ -200,7 +200,7 @@ tfw_tls_send_skb(TfwConnection *c, struct sk_buff *skb) static int tfw_tls_send_cb(void *conn, const unsigned char *buf, size_t len) { - TfwConnection *c = conn; + TfwConn *c = conn; TfwTlsContext *tls = tfw_tls_context(c); struct sk_buff *skb; @@ -231,7 +231,7 @@ tfw_tls_send_cb(void *conn, const unsigned char *buf, size_t len) static int tfw_tls_recv_cb(void *conn, unsigned char *buf, size_t len) { - TfwConnection *c = conn; + TfwConn *c = conn; TfwTlsContext *tls = tfw_tls_context(c); struct sk_buff *skb = ss_skb_peek_tail(&tls->rx_queue); @@ -257,16 +257,16 @@ tfw_tls_recv_cb(void *conn, unsigned char *buf, size_t len) } static void -tfw_tls_conn_dtor(TfwConnection *c) +tfw_tls_conn_dtor(TfwConn *c) { TfwTlsContext *tls = tfw_tls_context(c); mbedtls_ssl_free(&tls->ssl); - tfw_cli_conn_release((TfwCliConnection *)c); + tfw_cli_conn_release((TfwCliConn *)c); } static int -tfw_tls_conn_init(TfwConnection *c) +tfw_tls_conn_init(TfwConn *c) { int r; TfwTlsContext *tls = tfw_tls_context(c); @@ -306,7 +306,7 @@ tfw_tls_conn_init(TfwConnection *c) } static void -tfw_tls_conn_drop(TfwConnection *c) +tfw_tls_conn_drop(TfwConn *c) { TfwTlsContext *tls = tfw_tls_context(c); @@ -318,7 +318,7 @@ tfw_tls_conn_drop(TfwConnection *c) } static int -tfw_tls_conn_send(TfwConnection *c, TfwMsg *msg) +tfw_tls_conn_send(TfwConn *c, TfwMsg *msg) { struct sk_buff *skb; TfwTlsContext *tls = tfw_tls_context(c);