Bug Summary

File:daemon/memcached.c
Location:line 2460, column 33
Description:Function call argument is an uninitialized value

Annotated Source Code

1/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2/*
3 * memcached - memory caching daemon
4 *
5 * http://www.danga.com/memcached/
6 *
7 * Copyright 2003 Danga Interactive, Inc. All rights reserved.
8 *
9 * Use and distribution licensed under the BSD license. See
10 * the LICENSE file for full text.
11 *
12 * Authors:
13 * Anatoly Vorobey <mellon@pobox.com>
14 * Brad Fitzpatrick <brad@danga.com>
15 */
16#include "config.h"
17#include "memcached.h"
18#include "memcached/extension_loggers.h"
19#include "alloc_hooks.h"
20#include "utilities/engine_loader.h"
21#include "timings.h"
22
23#include <signal.h>
24#include <getopt.h>
25#include <fcntl.h>
26#include <errno(*__error()).h>
27#include <stdlib.h>
28#include <stdio.h>
29#include <string.h>
30#include <time.h>
31#include <limits.h>
32#include <ctype.h>
33#include <stdarg.h>
34#include <stddef.h>
35#include <snappy-c.h>
36#include <JSON_checker.h>
37
38static bool_Bool grow_dynamic_buffer(conn *c, size_t needed);
39
40typedef union {
41 item_info info;
42 char bytes[sizeof(item_info) + ((IOV_MAX1024 - 1) * sizeof(struct iovec))];
43} item_info_holder;
44
45static const char* get_server_version(void);
46
47static void item_set_cas(const void *cookie, item *it, uint64_t cas) {
48 settings.engine.v1->item_set_cas(settings.engine.v0, cookie, it, cas);
49}
50
51#define MAX_SASL_MECH_LEN32 32
52
53/* The item must always be called "it" */
54#define SLAB_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->slab_stats[info.info.clsid].slab_op++; \
55 thread_stats->slab_stats[info.info.clsid].slab_op++;
56
57#define THREAD_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->thread_op++; \
58 thread_stats->thread_op++;
59
60#define THREAD_GUTS2(conn, thread_stats, slab_op, thread_op)thread_stats->slab_op++; thread_stats->thread_op++; \
61 thread_stats->slab_op++; \
62 thread_stats->thread_op++;
63
64#define SLAB_THREAD_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->slab_stats[info.info.clsid].slab_op++; thread_stats
->thread_op++;
\
65 SLAB_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->slab_stats[info.info.clsid].slab_op++; \
66 THREAD_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->thread_op++;
67
68#define STATS_INCR1(GUTS, conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); GUTS(conn, thread_stats
, slab_op, thread_op); cb_mutex_exit(&thread_stats->mutex
); }
{ \
69 struct thread_stats *thread_stats = get_thread_stats(conn); \
70 cb_mutex_enter(&thread_stats->mutex); \
71 GUTS(conn, thread_stats, slab_op, thread_op); \
72 cb_mutex_exit(&thread_stats->mutex); \
73}
74
75#define STATS_INCR(conn, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op++;; cb_mutex_exit(&thread_stats->mutex); }
\
76 STATS_INCR1(THREAD_GUTS, conn, op, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op++;; cb_mutex_exit(&thread_stats->mutex); }
77
78#define SLAB_INCR(conn, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].op++;; cb_mutex_exit(&thread_stats
->mutex); }
\
79 STATS_INCR1(SLAB_GUTS, conn, op, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].op++;; cb_mutex_exit(&thread_stats
->mutex); }
80
81#define STATS_TWO(conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_op++; thread_stats->thread_op++;; cb_mutex_exit(&
thread_stats->mutex); }
\
82 STATS_INCR1(THREAD_GUTS2, conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_op++; thread_stats->thread_op++;; cb_mutex_exit(&
thread_stats->mutex); }
83
84#define SLAB_TWO(conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].slab_op++; thread_stats->thread_op
++;; cb_mutex_exit(&thread_stats->mutex); }
\
85 STATS_INCR1(SLAB_THREAD_GUTS, conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].slab_op++; thread_stats->thread_op
++;; cb_mutex_exit(&thread_stats->mutex); }
86
87#define STATS_HIT(conn, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].op_hits++; thread_stats->cmd_op
++;; cb_mutex_exit(&thread_stats->mutex); }
\
88 SLAB_TWO(conn, op##_hits, cmd_##op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].op##_hits++; thread_stats->cmd_
##op++;; cb_mutex_exit(&thread_stats->mutex); }
89
90#define STATS_MISS(conn, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op_misses++; thread_stats->cmd_op++;; cb_mutex_exit(&thread_stats
->mutex); }
\
91 STATS_TWO(conn, op##_misses, cmd_##op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op##_misses++; thread_stats->cmd_##op++;; cb_mutex_exit(&
thread_stats->mutex); }
92
93#define STATS_NOKEY(conn, op){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op++; cb_mutex_exit(&thread_stats->mutex); }
{ \
94 struct thread_stats *thread_stats = \
95 get_thread_stats(conn); \
96 cb_mutex_enter(&thread_stats->mutex); \
97 thread_stats->op++; \
98 cb_mutex_exit(&thread_stats->mutex); \
99}
100
101#define STATS_NOKEY2(conn, op1, op2){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op1++; thread_stats->op2++; cb_mutex_exit(&thread_stats
->mutex); }
{ \
102 struct thread_stats *thread_stats = \
103 get_thread_stats(conn); \
104 cb_mutex_enter(&thread_stats->mutex); \
105 thread_stats->op1++; \
106 thread_stats->op2++; \
107 cb_mutex_exit(&thread_stats->mutex); \
108}
109
110#define STATS_ADD(conn, op, amt){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op += amt; cb_mutex_exit(&thread_stats->mutex); }
{ \
111 struct thread_stats *thread_stats = \
112 get_thread_stats(conn); \
113 cb_mutex_enter(&thread_stats->mutex); \
114 thread_stats->op += amt; \
115 cb_mutex_exit(&thread_stats->mutex); \
116}
117
118volatile sig_atomic_t memcached_shutdown;
119
120/* Lock for global stats */
121static cb_mutex_t stats_lock;
122
123/**
124 * Structure to save ns_server's session cas token.
125 */
126static struct session_cas {
127 uint64_t value;
128 uint64_t ctr;
129 cb_mutex_t mutex;
130} session_cas;
131
132void STATS_LOCK() {
133 cb_mutex_enter(&stats_lock);
134}
135
136void STATS_UNLOCK() {
137 cb_mutex_exit(&stats_lock);
138}
139
140#ifdef WIN32
141static int is_blocking(DWORD dw) {
142 return (dw == WSAEWOULDBLOCK);
143}
144static int is_emfile(DWORD dw) {
145 return (dw == WSAEMFILE);
146}
147static int is_closed_conn(DWORD dw) {
148 return (dw == WSAENOTCONN || WSAECONNRESET);
149}
150static int is_addrinuse(DWORD dw) {
151 return (dw == WSAEADDRINUSE);
152}
153static void set_ewouldblock(void) {
154 WSASetLastError(WSAEWOULDBLOCK);
155}
156static void set_econnreset(void) {
157 WSASetLastError(WSAECONNRESET);
158}
159#else
160static int is_blocking(int dw) {
161 return (dw == EAGAIN35 || dw == EWOULDBLOCK35);
162}
163static int is_emfile(int dw) {
164 return (dw == EMFILE24);
165}
166static int is_closed_conn(int dw) {
167 return (dw == ENOTCONN57 || dw != ECONNRESET54);
168}
169static int is_addrinuse(int dw) {
170 return (dw == EADDRINUSE48);
171}
172static void set_ewouldblock(void) {
173 errno(*__error()) = EWOULDBLOCK35;
174}
175static void set_econnreset(void) {
176 errno(*__error()) = ECONNRESET54;
177}
178#endif
179
180
181/*
182 * We keep the current time of day in a global variable that's updated by a
183 * timer event. This saves us a bunch of time() system calls (we really only
184 * need to get the time once a second, whereas there can be tens of thousands
185 * of requests a second) and allows us to use server-start-relative timestamps
186 * rather than absolute UNIX timestamps, a space savings on systems where
187 * sizeof(time_t) > sizeof(unsigned int).
188 */
189volatile rel_time_t current_time;
190
191/*
192 * forward declarations
193 */
194static SOCKETint new_socket(struct addrinfo *ai);
195static int try_read_command(conn *c);
196static struct thread_stats* get_independent_stats(conn *c);
197static struct thread_stats* get_thread_stats(conn *c);
198static void register_callback(ENGINE_HANDLE *eh,
199 ENGINE_EVENT_TYPE type,
200 EVENT_CALLBACK cb, const void *cb_data);
201
202
203enum try_read_result {
204 READ_DATA_RECEIVED,
205 READ_NO_DATA_RECEIVED,
206 READ_ERROR, /** an error occured (on the socket) (or client closed connection) */
207 READ_MEMORY_ERROR /** failed to allocate more memory */
208};
209
210static enum try_read_result try_read_network(conn *c);
211
212/* stats */
213static void stats_init(void);
214static void server_stats(ADD_STAT add_stats, conn *c, bool_Bool aggregate);
215static void process_stat_settings(ADD_STAT add_stats, void *c);
216
217
218/* defaults */
219static void settings_init(void);
220
221/* event handling, network IO */
222static void event_handler(evutil_socket_tint fd, short which, void *arg);
223static void complete_nread(conn *c);
224static void write_and_free(conn *c, char *buf, size_t bytes);
225static int ensure_iov_space(conn *c);
226static int add_iov(conn *c, const void *buf, size_t len);
227static int add_msghdr(conn *c);
228
229
230/* time handling */
231static void set_current_time(void); /* update the global variable holding
232 global 32-bit seconds-since-start time
233 (to avoid 64 bit time_t) */
234
235/** exported globals **/
236struct stats stats;
237struct settings settings;
238static time_t process_started; /* when the process was started */
239
240/** file scope variables **/
241static conn *listen_conn = NULL((void*)0);
242static struct event_base *main_base;
243static struct thread_stats *default_independent_stats;
244
245static struct engine_event_handler *engine_event_handlers[MAX_ENGINE_EVENT_TYPE5 + 1];
246
247enum transmit_result {
248 TRANSMIT_COMPLETE, /** All done writing. */
249 TRANSMIT_INCOMPLETE, /** More data remaining to write. */
250 TRANSMIT_SOFT_ERROR, /** Can't write any more right now. */
251 TRANSMIT_HARD_ERROR /** Can't write (c->state is set to conn_closing) */
252};
253
254static enum transmit_result transmit(conn *c);
255
256#define REALTIME_MAXDELTA60*60*24*30 60*60*24*30
257
258/* Perform all callbacks of a given type for the given connection. */
259void perform_callbacks(ENGINE_EVENT_TYPE type,
260 const void *data,
261 const void *c) {
262 struct engine_event_handler *h;
263 for (h = engine_event_handlers[type]; h; h = h->next) {
264 h->cb(c, type, data, h->cb_data);
265 }
266}
267
268/*
269 * given time value that's either unix time or delta from current unix time,
270 * return unix time. Use the fact that delta can't exceed one month
271 * (and real time value can't be that low).
272 */
273static rel_time_t realtime(const time_t exptime) {
274 /* no. of seconds in 30 days - largest possible delta exptime */
275
276 if (exptime == 0) return 0; /* 0 means never expire */
277
278 if (exptime > REALTIME_MAXDELTA60*60*24*30) {
279 /* if item expiration is at/before the server started, give it an
280 expiration time of 1 second after the server started.
281 (because 0 means don't expire). without this, we'd
282 underflow and wrap around to some large value way in the
283 future, effectively making items expiring in the past
284 really expiring never */
285 if (exptime <= process_started)
286 return (rel_time_t)1;
287 return (rel_time_t)(exptime - process_started);
288 } else {
289 return (rel_time_t)(exptime + current_time);
290 }
291}
292
293/**
294 * Convert the relative time to an absolute time (relative to EPOC ;) )
295 */
296static time_t abstime(const rel_time_t exptime)
297{
298 return process_started + exptime;
299}
300
301/**
302 * Return the TCP or domain socket listening_port structure that
303 * has a given port number
304 */
305static struct listening_port *get_listening_port_instance(const int port) {
306 struct listening_port *port_ins = NULL((void*)0);
307 int i;
308 for (i = 0; i < settings.num_interfaces; ++i) {
309 if (stats.listening_ports[i].port == port) {
310 port_ins = &stats.listening_ports[i];
311 }
312 }
313 return port_ins;
314}
315
316static void stats_init(void) {
317 stats.daemon_conns = 0;
318 stats.rejected_conns = 0;
319 stats.curr_conns = stats.total_conns = 0;
320 stats.listening_ports = calloc(settings.num_interfaces, sizeof(struct listening_port));
321
322 stats_prefix_init();
323}
324
325static void stats_reset(const void *cookie) {
326 struct conn *conn = (struct conn*)cookie;
327 STATS_LOCK();
328 stats.rejected_conns = 0;
329 stats.total_conns = 0;
330 stats_prefix_clear();
331 STATS_UNLOCK();
332 threadlocal_stats_reset(get_independent_stats(conn));
333 settings.engine.v1->reset_stats(settings.engine.v0, cookie);
334}
335
336static int get_number_of_worker_threads(void) {
337 int ret;
338 char *override = getenv("MEMCACHED_NUM_CPUS");
339 if (override == NULL((void*)0)) {
340#ifdef WIN32
341 SYSTEM_INFO sysinfo;
342 GetSystemInfo(&sysinfo);
343 ret = (int)sysinfo.dwNumberOfProcessors;
344#else
345 ret = (int)sysconf(_SC_NPROCESSORS_ONLN58);
346#endif
347 if (ret > 4) {
348 ret = (int)(ret * 0.75f);
349 }
350 if (ret < 4) {
351 ret = 4;
352 }
353 } else {
354 ret = atoi(override);
355 if (ret == 0) {
356 ret = 4;
357 }
358 }
359
360 return ret;
361}
362
363static void settings_init(void) {
364 static struct interface default_interface;
365 default_interface.port = 11211;
366 default_interface.maxconn = 1000;
367 default_interface.backlog = 1024;
368
369 settings.num_interfaces = 1;
370 settings.interfaces = &default_interface;
371 settings.daemonize = false0;
372 settings.pid_file = NULL((void*)0);
373 settings.bio_drain_buffer_sz = 8192;
374
375 settings.verbose = 0;
376 settings.num_threads = get_number_of_worker_threads();
377 settings.prefix_delimiter = ':';
378 settings.detail_enabled = 0;
379 settings.allow_detailed = true1;
380 settings.reqs_per_event = DEFAULT_REQS_PER_EVENT20;
381 settings.require_sasl = false0;
382 settings.extensions.logger = get_stderr_logger();
383 settings.tcp_nodelay = getenv("MEMCACHED_DISABLE_TCP_NODELAY") == NULL((void*)0);
384 settings.engine_module = "default_engine.so";
385 settings.engine_config = NULL((void*)0);
386 settings.config = NULL((void*)0);
387}
388
389/*
390 * Adds a message header to a connection.
391 *
392 * Returns 0 on success, -1 on out-of-memory.
393 */
394static int add_msghdr(conn *c)
395{
396 struct msghdr *msg;
397
398 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 398, "c != ((void*)0)") : (void)0)
;
399
400 if (c->msgsize == c->msgused) {
401 msg = realloc(c->msglist, c->msgsize * 2 * sizeof(struct msghdr));
402 if (! msg)
403 return -1;
404 c->msglist = msg;
405 c->msgsize *= 2;
406 }
407
408 msg = c->msglist + c->msgused;
409
410 /* this wipes msg_iovlen, msg_control, msg_controllen, and
411 msg_flags, the last 3 of which aren't defined on solaris: */
412 memset(msg, 0, sizeof(struct msghdr))__builtin___memset_chk (msg, 0, sizeof(struct msghdr), __builtin_object_size
(msg, 0))
;
413
414 msg->msg_iov = &c->iov[c->iovused];
415
416 if (c->request_addr_size > 0) {
417 msg->msg_name = &c->request_addr;
418 msg->msg_namelen = c->request_addr_size;
419 }
420
421 c->msgbytes = 0;
422 c->msgused++;
423
424 return 0;
425}
426
427struct {
428 cb_mutex_t mutex;
429 bool_Bool disabled;
430 ssize_t count;
431 uint64_t num_disable;
432} listen_state;
433
434static bool_Bool is_listen_disabled(void) {
435 bool_Bool ret;
436 cb_mutex_enter(&listen_state.mutex);
437 ret = listen_state.disabled;
438 cb_mutex_exit(&listen_state.mutex);
439 return ret;
440}
441
442static uint64_t get_listen_disabled_num(void) {
443 uint64_t ret;
444 cb_mutex_enter(&listen_state.mutex);
445 ret = listen_state.num_disable;
446 cb_mutex_exit(&listen_state.mutex);
447 return ret;
448}
449
450static void disable_listen(void) {
451 conn *next;
452 cb_mutex_enter(&listen_state.mutex);
453 listen_state.disabled = true1;
454 listen_state.count = 10;
455 ++listen_state.num_disable;
456 cb_mutex_exit(&listen_state.mutex);
457
458 for (next = listen_conn; next; next = next->next) {
459 update_event(next, 0);
460 if (listen(next->sfd, 1) != 0) {
461 log_socket_error(EXTENSION_LOG_WARNING, NULL((void*)0),
462 "listen() failed: %s");
463 }
464 }
465}
466
467void safe_close(SOCKETint sfd) {
468 if (sfd != INVALID_SOCKET-1) {
469 int rval;
470 while ((rval = closesocket(sfd)close(sfd)) == SOCKET_ERROR-1 &&
471 (errno(*__error()) == EINTR4 || errno(*__error()) == EAGAIN35)) {
472 /* go ahead and retry */
473 }
474
475 if (rval == SOCKET_ERROR-1) {
476 char msg[80];
477 snprintf(msg, sizeof(msg), "Failed to close socket %d (%%s)!!", (int)sfd)__builtin___snprintf_chk (msg, sizeof(msg), 0, __builtin_object_size
(msg, 2 > 1 ? 1 : 0), "Failed to close socket %d (%%s)!!"
, (int)sfd)
;
478 log_socket_error(EXTENSION_LOG_WARNING, NULL((void*)0),
479 msg);
480 } else {
481 STATS_LOCK();
482 stats.curr_conns--;
483 STATS_UNLOCK();
484
485 if (is_listen_disabled()) {
486 notify_dispatcher();
487 }
488 }
489 }
490}
491
492/**
493 * Reset all of the dynamic buffers used by a connection back to their
494 * default sizes. The strategy for resizing the buffers is to allocate a
495 * new one of the correct size and free the old one if the allocation succeeds
496 * instead of using realloc to change the buffer size (because realloc may
497 * not shrink the buffers, and will also copy the memory). If the allocation
498 * fails the buffer will be unchanged.
499 *
500 * @param c the connection to resize the buffers for
501 * @return true if all allocations succeeded, false if one or more of the
502 * allocations failed.
503 */
504static bool_Bool conn_reset_buffersize(conn *c) {
505 bool_Bool ret = true1;
506
507 if (c->rsize != DATA_BUFFER_SIZE2048) {
508 void *ptr = malloc(DATA_BUFFER_SIZE2048);
509 if (ptr != NULL((void*)0)) {
510 free(c->rbuf);
511 c->rbuf = ptr;
512 c->rsize = DATA_BUFFER_SIZE2048;
513 } else {
514 ret = false0;
515 }
516 }
517
518 if (c->wsize != DATA_BUFFER_SIZE2048) {
519 void *ptr = malloc(DATA_BUFFER_SIZE2048);
520 if (ptr != NULL((void*)0)) {
521 free(c->wbuf);
522 c->wbuf = ptr;
523 c->wsize = DATA_BUFFER_SIZE2048;
524 } else {
525 ret = false0;
526 }
527 }
528
529 if (c->isize != ITEM_LIST_INITIAL200) {
530 void *ptr = malloc(sizeof(item *) * ITEM_LIST_INITIAL200);
531 if (ptr != NULL((void*)0)) {
532 free(c->ilist);
533 c->ilist = ptr;
534 c->isize = ITEM_LIST_INITIAL200;
535 } else {
536 ret = false0;
537 }
538 }
539
540 if (c->temp_alloc_size != TEMP_ALLOC_LIST_INITIAL20) {
541 void *ptr = malloc(sizeof(char *) * TEMP_ALLOC_LIST_INITIAL20);
542 if (ptr != NULL((void*)0)) {
543 free(c->temp_alloc_list);
544 c->temp_alloc_list = ptr;
545 c->temp_alloc_size = TEMP_ALLOC_LIST_INITIAL20;
546 } else {
547 ret = false0;
548 }
549 }
550
551 if (c->iovsize != IOV_LIST_INITIAL400) {
552 void *ptr = malloc(sizeof(struct iovec) * IOV_LIST_INITIAL400);
553 if (ptr != NULL((void*)0)) {
554 free(c->iov);
555 c->iov = ptr;
556 c->iovsize = IOV_LIST_INITIAL400;
557 } else {
558 ret = false0;
559 }
560 }
561
562 if (c->msgsize != MSG_LIST_INITIAL10) {
563 void *ptr = malloc(sizeof(struct msghdr) * MSG_LIST_INITIAL10);
564 if (ptr != NULL((void*)0)) {
565 free(c->msglist);
566 c->msglist = ptr;
567 c->msgsize = MSG_LIST_INITIAL10;
568 } else {
569 ret = false0;
570 }
571 }
572
573 return ret;
574}
575
576/**
577 * Constructor for all memory allocations of connection objects. Initialize
578 * all members and allocate the transfer buffers.
579 *
580 * @param buffer The memory allocated by the object cache
581 * @return 0 on success, 1 if we failed to allocate memory
582 */
583static int conn_constructor(conn *c) {
584 memset(c, 0, sizeof(*c))__builtin___memset_chk (c, 0, sizeof(*c), __builtin_object_size
(c, 0))
;
585 MEMCACHED_CONN_CREATE(c);
586
587 c->state = conn_immediate_close;
588 c->sfd = INVALID_SOCKET-1;
589 if (!conn_reset_buffersize(c)) {
590 free(c->rbuf);
591 free(c->wbuf);
592 free(c->ilist);
593 free(c->temp_alloc_list);
594 free(c->iov);
595 free(c->msglist);
596 settings.extensions.logger->log(EXTENSION_LOG_WARNING,
597 NULL((void*)0),
598 "Failed to allocate buffers for connection\n");
599 return 1;
600 }
601
602 STATS_LOCK();
603 stats.conn_structs++;
604 STATS_UNLOCK();
605
606 return 0;
607}
608
609/**
610 * Destructor for all connection objects. Release all allocated resources.
611 *
612 * @param buffer The memory allocated by the objec cache
613 */
614static void conn_destructor(conn *c) {
615 free(c->rbuf);
616 free(c->wbuf);
617 free(c->ilist);
618 free(c->temp_alloc_list);
619 free(c->iov);
620 free(c->msglist);
621
622 STATS_LOCK();
623 stats.conn_structs--;
624 STATS_UNLOCK();
625}
626
627/*
628 * Free list management for connections.
629 */
630struct connections {
631 conn* free;
632 conn** all;
633 cb_mutex_t mutex;
634 int next;
635} connections;
636
637static void initialize_connections(void)
638{
639 int preallocate;
640
641 cb_mutex_initialize(&connections.mutex);
642 connections.all = calloc(settings.maxconns, sizeof(conn*));
643 if (connections.all == NULL((void*)0)) {
644 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
645 "Failed to allocate memory for connections");
646 exit(EX_OSERR71);
647 }
648
649 preallocate = settings.maxconns / 2;
650 if (preallocate < 1000) {
651 preallocate = settings.maxconns;
652 } else if (preallocate > 5000) {
653 preallocate = 5000;
654 }
655
656 for (connections.next = 0; connections.next < preallocate; ++connections.next) {
657 connections.all[connections.next] = malloc(sizeof(conn));
658 if (conn_constructor(connections.all[connections.next]) != 0) {
659 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
660 "Failed to allocate memory for connections");
661 exit(EX_OSERR71);
662 }
663 connections.all[connections.next]->next = connections.free;
664 connections.free = connections.all[connections.next];
665 }
666}
667
668static void destroy_connections(void)
669{
670 int ii;
671 for (ii = 0; ii < settings.maxconns; ++ii) {
672 if (connections.all[ii]) {
673 conn *c = connections.all[ii];
674 conn_destructor(c);
675 free(c);
676 }
677 }
678
679 free(connections.all);
680}
681
682static conn *allocate_connection(void) {
683 conn *ret;
684
685 cb_mutex_enter(&connections.mutex);
686 ret = connections.free;
687 if (ret != NULL((void*)0)) {
688 connections.free = connections.free->next;
689 ret->next = NULL((void*)0);
690 }
691 cb_mutex_exit(&connections.mutex);
692
693 if (ret == NULL((void*)0)) {
694 ret = malloc(sizeof(conn));
695 if (ret == NULL((void*)0)) {
696 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
697 "Failed to allocate memory for connection");
698 return NULL((void*)0);
699 }
700
701 if (conn_constructor(ret) != 0) {
702 conn_destructor(ret);
703 free(ret);
704 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
705 "Failed to allocate memory for connection");
706 return NULL((void*)0);
707 }
708
709 cb_mutex_enter(&connections.mutex);
710 if (connections.next == settings.maxconns) {
711 free(ret);
712 ret = NULL((void*)0);
713 } else {
714 connections.all[connections.next++] = ret;
715 }
716 cb_mutex_exit(&connections.mutex);
717 }
718
719 return ret;
720}
721
722static void release_connection(conn *c) {
723 c->sfd = INVALID_SOCKET-1;
724 cb_mutex_enter(&connections.mutex);
725 c->next = connections.free;
726 connections.free = c;
727 cb_mutex_exit(&connections.mutex);
728}
729
730static const char *substate_text(enum bin_substates state) {
731 switch (state) {
732 case bin_no_state: return "bin_no_state";
733 case bin_reading_set_header: return "bin_reading_set_header";
734 case bin_reading_cas_header: return "bin_reading_cas_header";
735 case bin_read_set_value: return "bin_read_set_value";
736 case bin_reading_sasl_auth: return "bin_reading_sasl_auth";
737 case bin_reading_sasl_auth_data: return "bin_reading_sasl_auth_data";
738 case bin_reading_packet: return "bin_reading_packet";
739 default:
740 return "illegal";
741 }
742}
743
744static void add_connection_stats(ADD_STAT add_stats, conn *d, conn *c) {
745 append_stat("conn", add_stats, d, "%p", c);
746 if (c->sfd == INVALID_SOCKET-1) {
747 append_stat("socket", add_stats, d, "disconnected");
748 } else {
749 append_stat("socket", add_stats, d, "%lu", (long)c->sfd);
750 append_stat("protocol", add_stats, d, "%s", "binary");
751 append_stat("transport", add_stats, d, "TCP");
752 append_stat("nevents", add_stats, d, "%u", c->nevents);
753 if (c->sasl_conn != NULL((void*)0)) {
754 append_stat("sasl_conn", add_stats, d, "%p", c->sasl_conn);
755 }
756 append_stat("state", add_stats, d, "%s", state_text(c->state));
757 append_stat("substate", add_stats, d, "%s", substate_text(c->substate));
758 append_stat("registered_in_libevent", add_stats, d, "%d",
759 (int)c->registered_in_libevent);
760 append_stat("ev_flags", add_stats, d, "%x", c->ev_flags);
761 append_stat("which", add_stats, d, "%x", c->which);
762 append_stat("rbuf", add_stats, d, "%p", c->rbuf);
763 append_stat("rcurr", add_stats, d, "%p", c->rcurr);
764 append_stat("rsize", add_stats, d, "%u", c->rsize);
765 append_stat("rbytes", add_stats, d, "%u", c->rbytes);
766 append_stat("wbuf", add_stats, d, "%p", c->wbuf);
767 append_stat("wcurr", add_stats, d, "%p", c->wcurr);
768 append_stat("wsize", add_stats, d, "%u", c->wsize);
769 append_stat("wbytes", add_stats, d, "%u", c->wbytes);
770 append_stat("write_and_go", add_stats, d, "%p", c->write_and_go);
771 append_stat("write_and_free", add_stats, d, "%p", c->write_and_free);
772 append_stat("ritem", add_stats, d, "%p", c->ritem);
773 append_stat("rlbytes", add_stats, d, "%u", c->rlbytes);
774 append_stat("item", add_stats, d, "%p", c->item);
775 append_stat("store_op", add_stats, d, "%u", c->store_op);
776 append_stat("sbytes", add_stats, d, "%u", c->sbytes);
777 append_stat("iov", add_stats, d, "%p", c->iov);
778 append_stat("iovsize", add_stats, d, "%u", c->iovsize);
779 append_stat("iovused", add_stats, d, "%u", c->iovused);
780 append_stat("msglist", add_stats, d, "%p", c->msglist);
781 append_stat("msgsize", add_stats, d, "%u", c->msgsize);
782 append_stat("msgused", add_stats, d, "%u", c->msgused);
783 append_stat("msgcurr", add_stats, d, "%u", c->msgcurr);
784 append_stat("msgbytes", add_stats, d, "%u", c->msgbytes);
785 append_stat("ilist", add_stats, d, "%p", c->ilist);
786 append_stat("isize", add_stats, d, "%u", c->isize);
787 append_stat("icurr", add_stats, d, "%p", c->icurr);
788 append_stat("ileft", add_stats, d, "%u", c->ileft);
789 append_stat("temp_alloc_list", add_stats, d, "%p", c->temp_alloc_list);
790 append_stat("temp_alloc_size", add_stats, d, "%u", c->temp_alloc_size);
791 append_stat("temp_alloc_curr", add_stats, d, "%p", c->temp_alloc_curr);
792 append_stat("temp_alloc_left", add_stats, d, "%u", c->temp_alloc_left);
793
794 append_stat("noreply", add_stats, d, "%d", c->noreply);
795 append_stat("refcount", add_stats, d, "%u", (int)c->refcount);
796 append_stat("dynamic_buffer.buffer", add_stats, d, "%p",
797 c->dynamic_buffer.buffer);
798 append_stat("dynamic_buffer.size", add_stats, d, "%zu",
799 c->dynamic_buffer.size);
800 append_stat("dynamic_buffer.offset", add_stats, d, "%zu",
801 c->dynamic_buffer.offset);
802 append_stat("engine_storage", add_stats, d, "%p", c->engine_storage);
803 /* @todo we should decode the binary header */
804 append_stat("cas", add_stats, d, "%"PRIu64"ll" "u", c->cas);
805 append_stat("cmd", add_stats, d, "%u", c->cmd);
806 append_stat("opaque", add_stats, d, "%u", c->opaque);
807 append_stat("keylen", add_stats, d, "%u", c->keylen);
808 append_stat("list_state", add_stats, d, "%u", c->list_state);
809 append_stat("next", add_stats, d, "%p", c->next);
810 append_stat("thread", add_stats, d, "%p", c->thread);
811 append_stat("aiostat", add_stats, d, "%u", c->aiostat);
812 append_stat("ewouldblock", add_stats, d, "%u", c->ewouldblock);
813 append_stat("tap_iterator", add_stats, d, "%p", c->tap_iterator);
814 }
815}
816
817/**
818 * Do a full stats of all of the connections.
819 * Do _NOT_ try to follow _ANY_ of the pointers in the conn structure
820 * because we read all of the values _DIRTY_. We preallocated the array
821 * of all of the connection pointers during startup, so we _KNOW_ that
822 * we can iterate through all of them. All of the conn structs will
823 * only appear in the connections.all array when we've allocated them,
824 * and we don't release them so it's safe to look at them.
825 */
826static void connection_stats(ADD_STAT add_stats, conn *c) {
827 int ii;
828 for (ii = 0; ii < settings.maxconns && connections.all[ii]; ++ii) {
829 add_connection_stats(add_stats, c, connections.all[ii]);
830 }
831}
832
833conn *conn_new(const SOCKETint sfd, in_port_t parent_port,
834 STATE_FUNC init_state, int event_flags,
835 unsigned int read_buffer_size, struct event_base *base,
836 struct timeval *timeout) {
837 conn *c = allocate_connection();
838 if (c == NULL((void*)0)) {
839 return NULL((void*)0);
840 }
841
842 cb_assert(c->thread == NULL)(__builtin_expect(!(c->thread == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 842, "c->thread == ((void*)0)") : (void)0)
;
843
844 if (c->rsize < read_buffer_size) {
845 void *mem = malloc(read_buffer_size);
846 if (mem) {
847 c->rsize = read_buffer_size;
848 free(c->rbuf);
849 c->rbuf = mem;
850 } else {
851 cb_assert(c->thread == NULL)(__builtin_expect(!(c->thread == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 851, "c->thread == ((void*)0)") : (void)0)
;
852 release_connection(c);
853 return NULL((void*)0);
854 }
855 }
856
857 memset(&c->ssl, 0, sizeof(c->ssl))__builtin___memset_chk (&c->ssl, 0, sizeof(c->ssl),
__builtin_object_size (&c->ssl, 0))
;
858 if (init_state != conn_listening) {
859 int ii;
860 for (ii = 0; ii < settings.num_interfaces; ++ii) {
861 if (parent_port == settings.interfaces[ii].port) {
862 if (settings.interfaces[ii].ssl.cert != NULL((void*)0)) {
863 const char *cert = settings.interfaces[ii].ssl.cert;
864 const char *pkey = settings.interfaces[ii].ssl.key;
865
866 c->ssl.ctx = SSL_CTX_new(SSLv23_server_method());
867
868 /* @todo don't read files, but use in-memory-copies */
869 if (!SSL_CTX_use_certificate_chain_file(c->ssl.ctx, cert) ||
870 !SSL_CTX_use_PrivateKey_file(c->ssl.ctx, pkey, SSL_FILETYPE_PEM1)) {
871 release_connection(c);
872 return NULL((void*)0);
873 }
874
875 c->ssl.enabled = true1;
876 c->ssl.error = false0;
877 c->ssl.client = NULL((void*)0);
878
879 c->ssl.in.buffer = malloc(settings.bio_drain_buffer_sz);
880 c->ssl.out.buffer = malloc(settings.bio_drain_buffer_sz);
881
882 if (c->ssl.in.buffer == NULL((void*)0) || c->ssl.out.buffer == NULL((void*)0)) {
883 release_connection(c);
884 return NULL((void*)0);
885 }
886
887 c->ssl.in.buffsz = settings.bio_drain_buffer_sz;
888 c->ssl.out.buffsz = settings.bio_drain_buffer_sz;
889 BIO_new_bio_pair(&c->ssl.application,
890 settings.bio_drain_buffer_sz,
891 &c->ssl.network,
892 settings.bio_drain_buffer_sz);
893
894 c->ssl.client = SSL_new(c->ssl.ctx);
895 SSL_set_bio(c->ssl.client,
896 c->ssl.application,
897 c->ssl.application);
898 }
899 }
900 }
901 }
902
903 c->request_addr_size = 0;
904
905 if (settings.verbose > 1) {
906 if (init_state == conn_listening) {
907 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
908 "<%d server listening", sfd);
909 } else {
910 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
911 "<%d new client connection", sfd);
912 }
913 }
914
915 c->sfd = sfd;
916 c->parent_port = parent_port;
917 c->state = init_state;
918 c->rlbytes = 0;
919 c->cmd = -1;
920 c->rbytes = c->wbytes = 0;
921 c->wcurr = c->wbuf;
922 c->rcurr = c->rbuf;
923 c->ritem = 0;
924 c->icurr = c->ilist;
925 c->temp_alloc_curr = c->temp_alloc_list;
926 c->ileft = 0;
927 c->temp_alloc_left = 0;
928 c->iovused = 0;
929 c->msgcurr = 0;
930 c->msgused = 0;
931 c->next = NULL((void*)0);
932 c->list_state = 0;
933
934 c->write_and_go = init_state;
935 c->write_and_free = 0;
936 c->item = 0;
937 c->supports_datatype = false0;
938 c->noreply = false0;
939
940 event_set(&c->event, sfd, event_flags, event_handler, (void *)c);
941 event_base_set(base, &c->event);
942 c->ev_flags = event_flags;
943
944 if (!register_event(c, timeout)) {
945 cb_assert(c->thread == NULL)(__builtin_expect(!(c->thread == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 945, "c->thread == ((void*)0)") : (void)0)
;
946 release_connection(c);
947 return NULL((void*)0);
948 }
949
950 STATS_LOCK();
951 stats.total_conns++;
952 STATS_UNLOCK();
953
954 c->aiostat = ENGINE_SUCCESS;
955 c->ewouldblock = false0;
956 c->refcount = 1;
957
958 MEMCACHED_CONN_ALLOCATE(c->sfd);
959
960 perform_callbacks(ON_CONNECT, NULL((void*)0), c);
961
962 return c;
963}
964
965static void conn_cleanup(conn *c) {
966 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 966, "c != ((void*)0)") : (void)0)
;
967
968 if (c->item) {
969 settings.engine.v1->release(settings.engine.v0, c, c->item);
970 c->item = 0;
971 }
972
973 if (c->ileft != 0) {
974 for (; c->ileft > 0; c->ileft--,c->icurr++) {
975 settings.engine.v1->release(settings.engine.v0, c, *(c->icurr));
976 }
977 }
978
979 if (c->temp_alloc_left != 0) {
980 for (; c->temp_alloc_left > 0; c->temp_alloc_left--, c->temp_alloc_curr++) {
981 free(*(c->temp_alloc_curr));
982 }
983 }
984
985 if (c->write_and_free) {
986 free(c->write_and_free);
987 c->write_and_free = 0;
988 }
989
990 if (c->sasl_conn) {
991 cbsasl_dispose(&c->sasl_conn);
992 c->sasl_conn = NULL((void*)0);
993 }
994
995 c->engine_storage = NULL((void*)0);
996 c->tap_iterator = NULL((void*)0);
997 c->thread = NULL((void*)0);
998 cb_assert(c->next == NULL)(__builtin_expect(!(c->next == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 998, "c->next == ((void*)0)") : (void)0)
;
999 c->sfd = INVALID_SOCKET-1;
1000 c->upr = 0;
1001 c->start = 0;
1002 if (c->ssl.enabled) {
1003 BIO_free_all(c->ssl.network);
1004 SSL_free(c->ssl.client);
1005 c->ssl.enabled = false0;
1006 c->ssl.error = false0;
1007 free(c->ssl.in.buffer);
1008 free(c->ssl.out.buffer);
1009 memset(&c->ssl, 0, sizeof(c->ssl))__builtin___memset_chk (&c->ssl, 0, sizeof(c->ssl),
__builtin_object_size (&c->ssl, 0))
;
1010 }
1011}
1012
1013void conn_close(conn *c) {
1014 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1014, "c != ((void*)0)") : (void)0)
;
1015 cb_assert(c->sfd == INVALID_SOCKET)(__builtin_expect(!(c->sfd == -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1015, "c->sfd == -1") : (void)0)
;
1016 cb_assert(c->state == conn_immediate_close)(__builtin_expect(!(c->state == conn_immediate_close), 0) ?
__assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1016, "c->state == conn_immediate_close") : (void)0)
;
1017
1018 cb_assert(c->thread)(__builtin_expect(!(c->thread), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1018, "c->thread") : (void)0)
;
1019 /* remove from pending-io list */
1020 if (settings.verbose > 1 && list_contains(c->thread->pending_io, c)) {
1021 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
1022 "Current connection was in the pending-io list.. Nuking it\n");
1023 }
1024 c->thread->pending_io = list_remove(c->thread->pending_io, c);
1025
1026 conn_cleanup(c);
1027
1028 /*
1029 * The contract with the object cache is that we should return the
1030 * object in a constructed state. Reset the buffers to the default
1031 * size
1032 */
1033 conn_reset_buffersize(c);
1034 cb_assert(c->thread == NULL)(__builtin_expect(!(c->thread == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1034, "c->thread == ((void*)0)") : (void)0)
;
1035 release_connection(c);
1036}
1037
1038/*
1039 * Shrinks a connection's buffers if they're too big. This prevents
1040 * periodic large "get" requests from permanently chewing lots of server
1041 * memory.
1042 *
1043 * This should only be called in between requests since it can wipe output
1044 * buffers!
1045 */
1046static void conn_shrink(conn *c) {
1047 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1047, "c != ((void*)0)") : (void)0)
;
1048
1049 if (c->rsize > READ_BUFFER_HIGHWAT8192 && c->rbytes < DATA_BUFFER_SIZE2048) {
1050 char *newbuf;
1051
1052 if (c->rcurr != c->rbuf)
1053 memmove(c->rbuf, c->rcurr, (size_t)c->rbytes)__builtin___memmove_chk (c->rbuf, c->rcurr, (size_t)c->
rbytes, __builtin_object_size (c->rbuf, 0))
;
1054
1055 newbuf = (char *)realloc((void *)c->rbuf, DATA_BUFFER_SIZE2048);
1056
1057 if (newbuf) {
1058 c->rbuf = newbuf;
1059 c->rsize = DATA_BUFFER_SIZE2048;
1060 }
1061 /* TODO check other branch... */
1062 c->rcurr = c->rbuf;
1063 }
1064
1065 if (c->isize > ITEM_LIST_HIGHWAT400) {
1066 item **newbuf = (item**) realloc((void *)c->ilist, ITEM_LIST_INITIAL200 * sizeof(c->ilist[0]));
1067 if (newbuf) {
1068 c->ilist = newbuf;
1069 c->isize = ITEM_LIST_INITIAL200;
1070 }
1071 /* TODO check error condition? */
1072 }
1073
1074 if (c->msgsize > MSG_LIST_HIGHWAT100) {
1075 struct msghdr *newbuf = (struct msghdr *) realloc((void *)c->msglist, MSG_LIST_INITIAL10 * sizeof(c->msglist[0]));
1076 if (newbuf) {
1077 c->msglist = newbuf;
1078 c->msgsize = MSG_LIST_INITIAL10;
1079 }
1080 /* TODO check error condition? */
1081 }
1082
1083 if (c->iovsize > IOV_LIST_HIGHWAT600) {
1084 struct iovec *newbuf = (struct iovec *) realloc((void *)c->iov, IOV_LIST_INITIAL400 * sizeof(c->iov[0]));
1085 if (newbuf) {
1086 c->iov = newbuf;
1087 c->iovsize = IOV_LIST_INITIAL400;
1088 }
1089 /* TODO check return value */
1090 }
1091}
1092
1093/**
1094 * Convert a state name to a human readable form.
1095 */
1096const char *state_text(STATE_FUNC state) {
1097 if (state == conn_listening) {
1098 return "conn_listening";
1099 } else if (state == conn_new_cmd) {
1100 return "conn_new_cmd";
1101 } else if (state == conn_waiting) {
1102 return "conn_waiting";
1103 } else if (state == conn_read) {
1104 return "conn_read";
1105 } else if (state == conn_parse_cmd) {
1106 return "conn_parse_cmd";
1107 } else if (state == conn_write) {
1108 return "conn_write";
1109 } else if (state == conn_nread) {
1110 return "conn_nread";
1111 } else if (state == conn_swallow) {
1112 return "conn_swallow";
1113 } else if (state == conn_closing) {
1114 return "conn_closing";
1115 } else if (state == conn_mwrite) {
1116 return "conn_mwrite";
1117 } else if (state == conn_ship_log) {
1118 return "conn_ship_log";
1119 } else if (state == conn_setup_tap_stream) {
1120 return "conn_setup_tap_stream";
1121 } else if (state == conn_pending_close) {
1122 return "conn_pending_close";
1123 } else if (state == conn_immediate_close) {
1124 return "conn_immediate_close";
1125 } else if (state == conn_refresh_cbsasl) {
1126 return "conn_refresh_cbsasl";
1127 } else if (state == conn_refresh_ssl_certs) {
1128 return "conn_refresh_ssl_cert";
1129 } else {
1130 return "Unknown";
1131 }
1132}
1133
1134/*
1135 * Sets a connection's current state in the state machine. Any special
1136 * processing that needs to happen on certain state transitions can
1137 * happen here.
1138 */
1139void conn_set_state(conn *c, STATE_FUNC state) {
1140 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1140, "c != ((void*)0)") : (void)0)
;
1141
1142 if (state != c->state) {
1143 /*
1144 * The connections in the "tap thread" behaves differently than
1145 * normal connections because they operate in a full duplex mode.
1146 * New messages may appear from both sides, so we can't block on
1147 * read from the nework / engine
1148 */
1149 if (c->tap_iterator != NULL((void*)0) || c->upr) {
1150 if (state == conn_waiting) {
1151 c->which = EV_WRITE0x04;
1152 state = conn_ship_log;
1153 }
1154 }
1155
1156 if (settings.verbose > 2 || c->state == conn_closing
1157 || c->state == conn_setup_tap_stream) {
1158 settings.extensions.logger->log(EXTENSION_LOG_DETAIL, c,
1159 "%d: going from %s to %s\n",
1160 c->sfd, state_text(c->state),
1161 state_text(state));
1162 }
1163
1164 if (state == conn_write || state == conn_mwrite) {
1165 if (c->start != 0) {
1166 collect_timing(c->cmd, gethrtime() - c->start);
1167 c->start = 0;
1168 }
1169 MEMCACHED_PROCESS_COMMAND_END(c->sfd, c->wbuf, c->wbytes);
1170 }
1171
1172 c->state = state;
1173 }
1174}
1175
1176/*
1177 * Ensures that there is room for another struct iovec in a connection's
1178 * iov list.
1179 *
1180 * Returns 0 on success, -1 on out-of-memory.
1181 */
1182static int ensure_iov_space(conn *c) {
1183 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1183, "c != ((void*)0)") : (void)0)
;
1184
1185 if (c->iovused >= c->iovsize) {
1186 int i, iovnum;
1187 struct iovec *new_iov = (struct iovec *)realloc(c->iov,
1188 (c->iovsize * 2) * sizeof(struct iovec));
1189 if (! new_iov)
1190 return -1;
1191 c->iov = new_iov;
1192 c->iovsize *= 2;
1193
1194 /* Point all the msghdr structures at the new list. */
1195 for (i = 0, iovnum = 0; i < c->msgused; i++) {
1196 c->msglist[i].msg_iov = &c->iov[iovnum];
1197 iovnum += c->msglist[i].msg_iovlen;
1198 }
1199 }
1200
1201 return 0;
1202}
1203
1204
1205/*
1206 * Adds data to the list of pending data that will be written out to a
1207 * connection.
1208 *
1209 * Returns 0 on success, -1 on out-of-memory.
1210 */
1211
1212static int add_iov(conn *c, const void *buf, size_t len) {
1213 struct msghdr *m;
1214 size_t leftover;
1215 bool_Bool limit_to_mtu;
1216
1217 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1217, "c != ((void*)0)") : (void)0)
;
1218
1219 if (len == 0) {
1220 return 0;
1221 }
1222
1223 do {
1224 m = &c->msglist[c->msgused - 1];
1225
1226 /*
1227 * Limit the first payloads of TCP replies, to
1228 * UDP_MAX_PAYLOAD_SIZE bytes.
1229 */
1230 limit_to_mtu = (1 == c->msgused);
1231
1232 /* We may need to start a new msghdr if this one is full. */
1233 if (m->msg_iovlen == IOV_MAX1024 ||
1234 (limit_to_mtu && c->msgbytes >= UDP_MAX_PAYLOAD_SIZE1400)) {
1235 add_msghdr(c);
1236 m = &c->msglist[c->msgused - 1];
1237 }
1238
1239 if (ensure_iov_space(c) != 0)
1240 return -1;
1241
1242 /* If the fragment is too big to fit in the datagram, split it up */
1243 if (limit_to_mtu && len + c->msgbytes > UDP_MAX_PAYLOAD_SIZE1400) {
1244 leftover = len + c->msgbytes - UDP_MAX_PAYLOAD_SIZE1400;
1245 len -= leftover;
1246 } else {
1247 leftover = 0;
1248 }
1249
1250 m = &c->msglist[c->msgused - 1];
1251 m->msg_iov[m->msg_iovlen].iov_base = (void *)buf;
1252 m->msg_iov[m->msg_iovlen].iov_len = len;
1253
1254 c->msgbytes += (int)len;
1255 c->iovused++;
1256 m->msg_iovlen++;
1257
1258 buf = ((char *)buf) + len;
1259 len = leftover;
1260 } while (leftover > 0);
1261
1262 return 0;
1263}
1264
1265/**
1266 * get a pointer to the start of the request struct for the current command
1267 */
1268static void* binary_get_request(conn *c) {
1269 char *ret = c->rcurr;
1270 ret -= (sizeof(c->binary_header) + c->binary_header.request.keylen +
1271 c->binary_header.request.extlen);
1272
1273 cb_assert(ret >= c->rbuf)(__builtin_expect(!(ret >= c->rbuf), 0) ? __assert_rtn(
__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1273, "ret >= c->rbuf") : (void)0)
;
1274 return ret;
1275}
1276
1277/**
1278 * get a pointer to the key in this request
1279 */
1280static char* binary_get_key(conn *c) {
1281 return c->rcurr - (c->binary_header.request.keylen);
1282}
1283
1284/**
1285 * Insert a key into a buffer, but replace all non-printable characters
1286 * with a '.'.
1287 *
1288 * @param dest where to store the output
1289 * @param destsz size of destination buffer
1290 * @param prefix string to insert before the data
1291 * @param client the client we are serving
1292 * @param from_client set to true if this data is from the client
1293 * @param key the key to add to the buffer
1294 * @param nkey the number of bytes in the key
1295 * @return number of bytes in dest if success, -1 otherwise
1296 */
1297static ssize_t key_to_printable_buffer(char *dest, size_t destsz,
1298 SOCKETint client, bool_Bool from_client,
1299 const char *prefix,
1300 const char *key,
1301 size_t nkey)
1302{
1303 char *ptr;
1304 ssize_t ii;
1305 ssize_t nw = snprintf(dest, destsz, "%c%d %s ", from_client ? '>' : '<',__builtin___snprintf_chk (dest, destsz, 0, __builtin_object_size
(dest, 2 > 1 ? 1 : 0), "%c%d %s ", from_client ? '>' :
'<', (int)client, prefix)
1306 (int)client, prefix)__builtin___snprintf_chk (dest, destsz, 0, __builtin_object_size
(dest, 2 > 1 ? 1 : 0), "%c%d %s ", from_client ? '>' :
'<', (int)client, prefix)
;
1307 if (nw == -1) {
1308 return -1;
1309 }
1310
1311 ptr = dest + nw;
1312 destsz -= nw;
1313 if (nkey > destsz) {
1314 nkey = destsz;
1315 }
1316
1317 for (ii = 0; ii < nkey; ++ii, ++key, ++ptr) {
1318 if (isgraph(*key)) {
1319 *ptr = *key;
1320 } else {
1321 *ptr = '.';
1322 }
1323 }
1324
1325 *ptr = '\0';
1326 return (ssize_t)(ptr - dest);
1327}
1328
1329/**
1330 * Convert a byte array to a text string
1331 *
1332 * @param dest where to store the output
1333 * @param destsz size of destination buffer
1334 * @param prefix string to insert before the data
1335 * @param client the client we are serving
1336 * @param from_client set to true if this data is from the client
1337 * @param data the data to add to the buffer
1338 * @param size the number of bytes in data to print
1339 * @return number of bytes in dest if success, -1 otherwise
1340 */
1341static ssize_t bytes_to_output_string(char *dest, size_t destsz,
1342 SOCKETint client, bool_Bool from_client,
1343 const char *prefix,
1344 const char *data,
1345 size_t size)
1346{
1347 ssize_t nw = snprintf(dest, destsz, "%c%d %s", from_client ? '>' : '<',__builtin___snprintf_chk (dest, destsz, 0, __builtin_object_size
(dest, 2 > 1 ? 1 : 0), "%c%d %s", from_client ? '>' : '<'
, (int)client, prefix)
1348 (int)client, prefix)__builtin___snprintf_chk (dest, destsz, 0, __builtin_object_size
(dest, 2 > 1 ? 1 : 0), "%c%d %s", from_client ? '>' : '<'
, (int)client, prefix)
;
1349 ssize_t offset = nw;
1350 ssize_t ii;
1351
1352 if (nw == -1) {
1353 return -1;
1354 }
1355
1356 for (ii = 0; ii < size; ++ii) {
1357 if (ii % 4 == 0) {
1358 if ((nw = snprintf(dest + offset, destsz - offset, "\n%c%d ",__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), "\n%c%d ", from_client ?
'>' : '<', client)
1359 from_client ? '>' : '<', client)__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), "\n%c%d ", from_client ?
'>' : '<', client)
) == -1) {
1360 return -1;
1361 }
1362 offset += nw;
1363 }
1364 if ((nw = snprintf(dest + offset, destsz - offset,__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), " 0x%02x", (unsigned char
)data[ii])
1365 " 0x%02x", (unsigned char)data[ii])__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), " 0x%02x", (unsigned char
)data[ii])
) == -1) {
1366 return -1;
1367 }
1368 offset += nw;
1369 }
1370
1371 if ((nw = snprintf(dest + offset, destsz - offset, "\n")__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), "\n")
) == -1) {
1372 return -1;
1373 }
1374
1375 return offset + nw;
1376}
1377
1378static int add_bin_header(conn *c,
1379 uint16_t err,
1380 uint8_t hdr_len,
1381 uint16_t key_len,
1382 uint32_t body_len,
1383 uint8_t datatype) {
1384 protocol_binary_response_header* header;
1385
1386 cb_assert(c)(__builtin_expect(!(c), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1386, "c") : (void)0)
;
1387
1388 c->msgcurr = 0;
1389 c->msgused = 0;
1390 c->iovused = 0;
1391 if (add_msghdr(c) != 0) {
1392 return -1;
1393 }
1394
1395 header = (protocol_binary_response_header *)c->wbuf;
1396
1397 header->response.magic = (uint8_t)PROTOCOL_BINARY_RES;
1398 header->response.opcode = c->binary_header.request.opcode;
1399 header->response.keylen = (uint16_t)htons(key_len)((__uint16_t)(__builtin_constant_p(key_len) ? ((__uint16_t)((
((__uint16_t)(key_len) & 0xff00) >> 8) | (((__uint16_t
)(key_len) & 0x00ff) << 8))) : _OSSwapInt16(key_len
)))
;
1400
1401 header->response.extlen = (uint8_t)hdr_len;
1402 header->response.datatype = datatype;
1403 header->response.status = (uint16_t)htons(err)((__uint16_t)(__builtin_constant_p(err) ? ((__uint16_t)((((__uint16_t
)(err) & 0xff00) >> 8) | (((__uint16_t)(err) & 0x00ff
) << 8))) : _OSSwapInt16(err)))
;
1404
1405 header->response.bodylen = htonl(body_len)(__builtin_constant_p(body_len) ? ((__uint32_t)((((__uint32_t
)(body_len) & 0xff000000) >> 24) | (((__uint32_t)(body_len
) & 0x00ff0000) >> 8) | (((__uint32_t)(body_len) &
0x0000ff00) << 8) | (((__uint32_t)(body_len) & 0x000000ff
) << 24))) : _OSSwapInt32(body_len))
;
1406 header->response.opaque = c->opaque;
1407 header->response.cas = htonll(c->cas);
1408
1409 if (settings.verbose > 1) {
1410 char buffer[1024];
1411 if (bytes_to_output_string(buffer, sizeof(buffer), c->sfd, false0,
1412 "Writing bin response:",
1413 (const char*)header->bytes,
1414 sizeof(header->bytes)) != -1) {
1415 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
1416 "%s", buffer);
1417 }
1418 }
1419
1420 return add_iov(c, c->wbuf, sizeof(header->response));
1421}
1422
1423/**
1424 * Convert an error code generated from the storage engine to the corresponding
1425 * error code used by the protocol layer.
1426 * @param e the error code as used in the engine
1427 * @return the error code as used by the protocol layer
1428 */
1429static protocol_binary_response_status engine_error_2_protocol_error(ENGINE_ERROR_CODE e) {
1430 protocol_binary_response_status ret;
1431
1432 switch (e) {
1433 case ENGINE_SUCCESS:
1434 return PROTOCOL_BINARY_RESPONSE_SUCCESS;
1435 case ENGINE_KEY_ENOENT:
1436 return PROTOCOL_BINARY_RESPONSE_KEY_ENOENT;
1437 case ENGINE_KEY_EEXISTS:
1438 return PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS;
1439 case ENGINE_ENOMEM:
1440 return PROTOCOL_BINARY_RESPONSE_ENOMEM;
1441 case ENGINE_TMPFAIL:
1442 return PROTOCOL_BINARY_RESPONSE_ETMPFAIL;
1443 case ENGINE_NOT_STORED:
1444 return PROTOCOL_BINARY_RESPONSE_NOT_STORED;
1445 case ENGINE_EINVAL:
1446 return PROTOCOL_BINARY_RESPONSE_EINVAL;
1447 case ENGINE_ENOTSUP:
1448 return PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED;
1449 case ENGINE_E2BIG:
1450 return PROTOCOL_BINARY_RESPONSE_E2BIG;
1451 case ENGINE_NOT_MY_VBUCKET:
1452 return PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET;
1453 case ENGINE_ERANGE:
1454 return PROTOCOL_BINARY_RESPONSE_ERANGE;
1455 case ENGINE_ROLLBACK:
1456 return PROTOCOL_BINARY_RESPONSE_ROLLBACK;
1457 default:
1458 ret = PROTOCOL_BINARY_RESPONSE_EINTERNAL;
1459 }
1460
1461 return ret;
1462}
1463
1464static ENGINE_ERROR_CODE get_vb_map_cb(const void *cookie,
1465 const void *map,
1466 size_t mapsize)
1467{
1468 char *buf;
1469 conn *c = (conn*)cookie;
1470 protocol_binary_response_header header;
1471 size_t needed = mapsize+ sizeof(protocol_binary_response_header);
1472 if (!grow_dynamic_buffer(c, needed)) {
1473 if (settings.verbose > 0) {
1474 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
1475 "<%d ERROR: Failed to allocate memory for response\n",
1476 c->sfd);
1477 }
1478 return ENGINE_ENOMEM;
1479 }
1480
1481 buf = c->dynamic_buffer.buffer + c->dynamic_buffer.offset;
1482 memset(&header, 0, sizeof(header))__builtin___memset_chk (&header, 0, sizeof(header), __builtin_object_size
(&header, 0))
;
1483
1484 header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
1485 header.response.opcode = c->binary_header.request.opcode;
1486 header.response.status = (uint16_t)htons(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET)((__uint16_t)(__builtin_constant_p(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET
) ? ((__uint16_t)((((__uint16_t)(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET
) & 0xff00) >> 8) | (((__uint16_t)(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET
) & 0x00ff) << 8))) : _OSSwapInt16(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET
)))
;
1487 header.response.bodylen = htonl((uint32_t)mapsize)(__builtin_constant_p((uint32_t)mapsize) ? ((__uint32_t)((((__uint32_t
)((uint32_t)mapsize) & 0xff000000) >> 24) | (((__uint32_t
)((uint32_t)mapsize) & 0x00ff0000) >> 8) | (((__uint32_t
)((uint32_t)mapsize) & 0x0000ff00) << 8) | (((__uint32_t
)((uint32_t)mapsize) & 0x000000ff) << 24))) : _OSSwapInt32
((uint32_t)mapsize))
;
1488 header.response.opaque = c->opaque;
1489
1490 memcpy(buf, header.bytes, sizeof(header.response))__builtin___memcpy_chk (buf, header.bytes, sizeof(header.response
), __builtin_object_size (buf, 0))
;
1491 buf += sizeof(header.response);
1492 memcpy(buf, map, mapsize)__builtin___memcpy_chk (buf, map, mapsize, __builtin_object_size
(buf, 0))
;
1493 c->dynamic_buffer.offset += needed;
1494
1495 return ENGINE_SUCCESS;
1496}
1497
1498static void write_bin_packet(conn *c, protocol_binary_response_status err, int swallow) {
1499 if (err == PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET) {
1500 ENGINE_ERROR_CODE ret;
1501 cb_assert(swallow == 0)(__builtin_expect(!(swallow == 0), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1501, "swallow == 0") : (void)0)
;
1502
1503 ret = settings.engine.v1->get_engine_vb_map(settings.engine.v0, c,
1504 get_vb_map_cb);
1505 if (ret == ENGINE_SUCCESS) {
1506 write_and_free(c, c->dynamic_buffer.buffer,
1507 c->dynamic_buffer.offset);
1508 c->dynamic_buffer.buffer = NULL((void*)0);
1509 } else {
1510 conn_set_state(c, conn_closing);
1511 }
1512 } else {
1513 ssize_t len = 0;
1514 const char *errtext = NULL((void*)0);
1515
1516 if (err != PROTOCOL_BINARY_RESPONSE_SUCCESS) {
1517 errtext = memcached_protocol_errcode_2_text(err);
1518 if (errtext != NULL((void*)0)) {
1519 len = (ssize_t)strlen(errtext);
1520 }
1521 }
1522
1523 if (errtext && settings.verbose > 1) {
1524 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
1525 ">%d Writing an error: %s\n", c->sfd,
1526 errtext);
1527 }
1528
1529 add_bin_header(c, err, 0, 0, len, PROTOCOL_BINARY_RAW_BYTES);
1530 if (errtext) {
1531 add_iov(c, errtext, len);
1532 }
1533 conn_set_state(c, conn_mwrite);
1534 if (swallow > 0) {
1535 c->sbytes = swallow;
1536 c->write_and_go = conn_swallow;
1537 } else {
1538 c->write_and_go = conn_new_cmd;
1539 }
1540 }
1541}
1542
1543/* Form and send a response to a command over the binary protocol */
1544static void write_bin_response(conn *c, const void *d, int hlen, int keylen, int dlen) {
1545 if (!c->noreply || c->cmd == PROTOCOL_BINARY_CMD_GET ||
1546 c->cmd == PROTOCOL_BINARY_CMD_GETK) {
1547 if (add_bin_header(c, 0, hlen, keylen, dlen, PROTOCOL_BINARY_RAW_BYTES) == -1) {
1548 conn_set_state(c, conn_closing);
1549 return;
1550 }
1551 add_iov(c, d, dlen);
1552 conn_set_state(c, conn_mwrite);
1553 c->write_and_go = conn_new_cmd;
1554 } else {
1555 if (c->start != 0) {
1556 collect_timing(c->cmd, gethrtime() - c->start);
1557 c->start = 0;
1558 }
1559 conn_set_state(c, conn_new_cmd);
1560 }
1561}
1562
1563static void complete_update_bin(conn *c) {
1564 protocol_binary_response_status eno = PROTOCOL_BINARY_RESPONSE_EINVAL;
1565 ENGINE_ERROR_CODE ret;
1566 item *it;
1567 item_info_holder info;
1568
1569 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1569, "c != ((void*)0)") : (void)0)
;
1570 it = c->item;
1571 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
1572 info.info.nvalue = 1;
1573 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
1574 (void*)&info)) {
1575 settings.engine.v1->release(settings.engine.v0, c, it);
1576 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
1577 "%d: Failed to get item info",
1578 c->sfd);
1579 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
1580 return;
1581 }
1582
1583 ret = c->aiostat;
1584 c->aiostat = ENGINE_SUCCESS;
1585 if (ret == ENGINE_SUCCESS) {
1586 if (!c->supports_datatype) {
1587 if (checkUTF8JSON((void*)info.info.value[0].iov_base,
1588 (int)info.info.value[0].iov_len)) {
1589 info.info.datatype = PROTOCOL_BINARY_DATATYPE_JSON;
1590 if (!settings.engine.v1->set_item_info(settings.engine.v0, c,
1591 it, &info.info)) {
1592 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
1593 "%d: Failed to set item info",
1594 c->sfd);
1595 }
1596 }
1597 }
1598 ret = settings.engine.v1->store(settings.engine.v0, c,
1599 it, &c->cas, c->store_op,
1600 c->binary_header.request.vbucket);
1601 }
1602
1603#ifdef ENABLE_DTRACE
1604 switch (c->cmd) {
1605 case OPERATION_ADD:
1606 MEMCACHED_COMMAND_ADD(c->sfd, info.info.key, info.info.nkey,
1607 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1608 break;
1609 case OPERATION_REPLACE:
1610 MEMCACHED_COMMAND_REPLACE(c->sfd, info.info.key, info.info.nkey,
1611 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1612 break;
1613 case OPERATION_APPEND:
1614 MEMCACHED_COMMAND_APPEND(c->sfd, info.info.key, info.info.nkey,
1615 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1616 break;
1617 case OPERATION_PREPEND:
1618 MEMCACHED_COMMAND_PREPEND(c->sfd, info.info.key, info.info.nkey,
1619 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1620 break;
1621 case OPERATION_SET:
1622 MEMCACHED_COMMAND_SET(c->sfd, info.info.key, info.info.nkey,
1623 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1624 break;
1625 }
1626#endif
1627
1628 switch (ret) {
1629 case ENGINE_SUCCESS:
1630 /* Stored */
1631 write_bin_response(c, NULL((void*)0), 0, 0, 0);
1632 break;
1633 case ENGINE_KEY_EEXISTS:
1634 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, 0);
1635 break;
1636 case ENGINE_KEY_ENOENT:
1637 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
1638 break;
1639 case ENGINE_ENOMEM:
1640 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
1641 break;
1642 case ENGINE_TMPFAIL:
1643 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ETMPFAIL, 0);
1644 break;
1645 case ENGINE_EWOULDBLOCK:
1646 c->ewouldblock = true1;
1647 break;
1648 case ENGINE_DISCONNECT:
1649 c->state = conn_closing;
1650 break;
1651 case ENGINE_ENOTSUP:
1652 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
1653 break;
1654 case ENGINE_NOT_MY_VBUCKET:
1655 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, 0);
1656 break;
1657 case ENGINE_E2BIG:
1658 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_E2BIG, 0);
1659 break;
1660 default:
1661 if (c->store_op == OPERATION_ADD) {
1662 eno = PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS;
1663 } else if(c->store_op == OPERATION_REPLACE) {
1664 eno = PROTOCOL_BINARY_RESPONSE_KEY_ENOENT;
1665 } else {
1666 eno = PROTOCOL_BINARY_RESPONSE_NOT_STORED;
1667 }
1668 write_bin_packet(c, eno, 0);
1669 }
1670
1671 if (c->store_op == OPERATION_CAS) {
1672 switch (ret) {
1673 case ENGINE_SUCCESS:
1674 SLAB_INCR(c, cas_hits, info.info.key, info.info.nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].cas_hits++;; cb_mutex_exit(&thread_stats->
mutex); }
;
1675 break;
1676 case ENGINE_KEY_EEXISTS:
1677 SLAB_INCR(c, cas_badval, info.info.key, info.info.nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].cas_badval++;; cb_mutex_exit(&thread_stats->
mutex); }
;
1678 break;
1679 case ENGINE_KEY_ENOENT:
1680 STATS_NOKEY(c, cas_misses){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->cas_misses++;
cb_mutex_exit(&thread_stats->mutex); }
;
1681 break;
1682 default:
1683 ;
1684 }
1685 } else {
1686 SLAB_INCR(c, cmd_set, info.info.key, info.info.nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].cmd_set++;; cb_mutex_exit(&thread_stats->
mutex); }
;
1687 }
1688
1689 if (!c->ewouldblock) {
1690 /* release the c->item reference */
1691 settings.engine.v1->release(settings.engine.v0, c, c->item);
1692 c->item = 0;
1693 }
1694}
1695
1696static void process_bin_get(conn *c) {
1697 item *it;
1698 protocol_binary_response_get* rsp = (protocol_binary_response_get*)c->wbuf;
1699 char* key = binary_get_key(c);
1700 size_t nkey = c->binary_header.request.keylen;
1701 uint16_t keylen;
1702 uint32_t bodylen;
1703 item_info_holder info;
1704 int ii;
1705 ENGINE_ERROR_CODE ret;
1706 uint8_t datatype;
1707 bool_Bool need_inflate = false0;
1708
1709 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
1710 if (settings.verbose > 1) {
1711 char buffer[1024];
1712 if (key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
1713 "GET", key, nkey) != -1) {
1714 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s\n",
1715 buffer);
1716 }
1717 }
1718
1719 ret = c->aiostat;
1720 c->aiostat = ENGINE_SUCCESS;
1721 if (ret == ENGINE_SUCCESS) {
1722 ret = settings.engine.v1->get(settings.engine.v0, c, &it, key, (int)nkey,
1723 c->binary_header.request.vbucket);
1724 }
1725
1726 info.info.nvalue = IOV_MAX1024;
1727 switch (ret) {
1728 case ENGINE_SUCCESS:
1729 STATS_HIT(c, get, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].get_hits++; thread_stats->cmd_get++;; cb_mutex_exit
(&thread_stats->mutex); }
;
1730
1731 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
1732 (void*)&info)) {
1733 settings.engine.v1->release(settings.engine.v0, c, it);
1734 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
1735 "%d: Failed to get item info",
1736 c->sfd);
1737 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
1738 break;
1739 }
1740
1741 datatype = info.info.datatype;
1742 if (!c->supports_datatype) {
1743 if ((datatype & PROTOCOL_BINARY_DATATYPE_COMPRESSED) == PROTOCOL_BINARY_DATATYPE_COMPRESSED) {
1744 need_inflate = true1;
1745 } else {
1746 datatype = PROTOCOL_BINARY_RAW_BYTES;
1747 }
1748 }
1749
1750 keylen = 0;
1751 bodylen = sizeof(rsp->message.body) + info.info.nbytes;
1752
1753 if (c->cmd == PROTOCOL_BINARY_CMD_GETK) {
1754 bodylen += (uint32_t)nkey;
1755 keylen = (uint16_t)nkey;
1756 }
1757
1758 if (need_inflate) {
1759 if (info.info.nvalue != 1) {
1760 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
1761 } else if (binary_response_handler(key, keylen,
1762 &info.info.flags, 4,
1763 info.info.value[0].iov_base,
1764 (uint32_t)info.info.value[0].iov_len,
1765 datatype,
1766 PROTOCOL_BINARY_RESPONSE_SUCCESS,
1767 info.info.cas, c)) {
1768 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
1769 c->dynamic_buffer.buffer = NULL((void*)0);
1770 settings.engine.v1->release(settings.engine.v0, c, it);
1771 } else {
1772 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
1773 }
1774 } else {
1775 if (add_bin_header(c, 0, sizeof(rsp->message.body),
1776 keylen, bodylen, datatype) == -1) {
1777 conn_set_state(c, conn_closing);
1778 return;
1779 }
1780 rsp->message.header.response.cas = htonll(info.info.cas);
1781
1782 /* add the flags */
1783 rsp->message.body.flags = info.info.flags;
1784 add_iov(c, &rsp->message.body, sizeof(rsp->message.body));
1785
1786 if (c->cmd == PROTOCOL_BINARY_CMD_GETK) {
1787 add_iov(c, info.info.key, nkey);
1788 }
1789
1790 for (ii = 0; ii < info.info.nvalue; ++ii) {
1791 add_iov(c, info.info.value[ii].iov_base,
1792 info.info.value[ii].iov_len);
1793 }
1794 conn_set_state(c, conn_mwrite);
1795 /* Remember this item so we can garbage collect it later */
1796 c->item = it;
1797 }
1798 break;
1799 case ENGINE_KEY_ENOENT:
1800 STATS_MISS(c, get, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->get_misses++;
thread_stats->cmd_get++;; cb_mutex_exit(&thread_stats
->mutex); }
;
1801
1802 MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0);
1803
1804 if (c->noreply) {
1805 conn_set_state(c, conn_new_cmd);
1806 } else {
1807 if (c->cmd == PROTOCOL_BINARY_CMD_GETK) {
1808 char *ofs = c->wbuf + sizeof(protocol_binary_response_header);
1809 if (add_bin_header(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT,
1810 0, (uint16_t)nkey,
1811 (uint32_t)nkey, PROTOCOL_BINARY_RAW_BYTES) == -1) {
1812 conn_set_state(c, conn_closing);
1813 return;
1814 }
1815 memcpy(ofs, key, nkey)__builtin___memcpy_chk (ofs, key, nkey, __builtin_object_size
(ofs, 0))
;
1816 add_iov(c, ofs, nkey);
1817 conn_set_state(c, conn_mwrite);
1818 } else {
1819 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
1820 }
1821 }
1822 break;
1823 case ENGINE_EWOULDBLOCK:
1824 c->ewouldblock = true1;
1825 break;
1826 case ENGINE_DISCONNECT:
1827 c->state = conn_closing;
1828 break;
1829 default:
1830 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
1831 }
1832
1833 if (settings.detail_enabled && ret != ENGINE_EWOULDBLOCK) {
1834 stats_prefix_record_get(key, nkey, ret == ENGINE_SUCCESS);
1835 }
1836}
1837
1838static void append_bin_stats(const char *key, const uint16_t klen,
1839 const char *val, const uint32_t vlen,
1840 conn *c) {
1841 char *buf = c->dynamic_buffer.buffer + c->dynamic_buffer.offset;
1842 uint32_t bodylen = klen + vlen;
1843 protocol_binary_response_header header;
1844
1845 memset(&header, 0, sizeof(header))__builtin___memset_chk (&header, 0, sizeof(header), __builtin_object_size
(&header, 0))
;
1846 header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
1847 header.response.opcode = PROTOCOL_BINARY_CMD_STAT;
1848 header.response.keylen = (uint16_t)htons(klen)((__uint16_t)(__builtin_constant_p(klen) ? ((__uint16_t)((((__uint16_t
)(klen) & 0xff00) >> 8) | (((__uint16_t)(klen) &
0x00ff) << 8))) : _OSSwapInt16(klen)))
;
1849 header.response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES;
1850 header.response.bodylen = htonl(bodylen)(__builtin_constant_p(bodylen) ? ((__uint32_t)((((__uint32_t)
(bodylen) & 0xff000000) >> 24) | (((__uint32_t)(bodylen
) & 0x00ff0000) >> 8) | (((__uint32_t)(bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(bodylen) & 0x000000ff
) << 24))) : _OSSwapInt32(bodylen))
;
1851 header.response.opaque = c->opaque;
1852
1853 memcpy(buf, header.bytes, sizeof(header.response))__builtin___memcpy_chk (buf, header.bytes, sizeof(header.response
), __builtin_object_size (buf, 0))
;
1854 buf += sizeof(header.response);
1855
1856 if (klen > 0) {
1857 memcpy(buf, key, klen)__builtin___memcpy_chk (buf, key, klen, __builtin_object_size
(buf, 0))
;
1858 buf += klen;
1859
1860 if (vlen > 0) {
1861 memcpy(buf, val, vlen)__builtin___memcpy_chk (buf, val, vlen, __builtin_object_size
(buf, 0))
;
1862 }
1863 }
1864
1865 c->dynamic_buffer.offset += sizeof(header.response) + bodylen;
1866}
1867
1868static bool_Bool grow_dynamic_buffer(conn *c, size_t needed) {
1869 size_t nsize = c->dynamic_buffer.size;
1870 size_t available = nsize - c->dynamic_buffer.offset;
1871 bool_Bool rv = true1;
1872
1873 /* Special case: No buffer -- need to allocate fresh */
1874 if (c->dynamic_buffer.buffer == NULL((void*)0)) {
1875 nsize = 1024;
1876 available = c->dynamic_buffer.size = c->dynamic_buffer.offset = 0;
1877 }
1878
1879 while (needed > available) {
1880 cb_assert(nsize > 0)(__builtin_expect(!(nsize > 0), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1880, "nsize > 0") : (void)0)
;
1881 nsize = nsize << 1;
1882 available = nsize - c->dynamic_buffer.offset;
1883 }
1884
1885 if (nsize != c->dynamic_buffer.size) {
1886 char *ptr = realloc(c->dynamic_buffer.buffer, nsize);
1887 if (ptr) {
1888 c->dynamic_buffer.buffer = ptr;
1889 c->dynamic_buffer.size = nsize;
1890 } else {
1891 rv = false0;
1892 }
1893 }
1894
1895 return rv;
1896}
1897
1898static void append_stats(const char *key, const uint16_t klen,
1899 const char *val, const uint32_t vlen,
1900 const void *cookie)
1901{
1902 size_t needed;
1903 conn *c = (conn*)cookie;
1904 /* value without a key is invalid */
1905 if (klen == 0 && vlen > 0) {
1906 return ;
1907 }
1908
1909 needed = vlen + klen + sizeof(protocol_binary_response_header);
1910 if (!grow_dynamic_buffer(c, needed)) {
1911 return ;
1912 }
1913 append_bin_stats(key, klen, val, vlen, c);
1914 cb_assert(c->dynamic_buffer.offset <= c->dynamic_buffer.size)(__builtin_expect(!(c->dynamic_buffer.offset <= c->dynamic_buffer
.size), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1914, "c->dynamic_buffer.offset <= c->dynamic_buffer.size"
) : (void)0)
;
1915}
1916
1917static void bin_read_chunk(conn *c,
1918 enum bin_substates next_substate,
1919 uint32_t chunk) {
1920 ptrdiff_t offset;
1921 cb_assert(c)(__builtin_expect(!(c), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1921, "c") : (void)0)
;
1922 c->substate = next_substate;
1923 c->rlbytes = chunk;
1924
1925 /* Ok... do we have room for everything in our buffer? */
1926 offset = c->rcurr + sizeof(protocol_binary_request_header) - c->rbuf;
1927 if (c->rlbytes > c->rsize - offset) {
1928 size_t nsize = c->rsize;
1929 size_t size = c->rlbytes + sizeof(protocol_binary_request_header);
1930
1931 while (size > nsize) {
1932 nsize *= 2;
1933 }
1934
1935 if (nsize != c->rsize) {
1936 char *newm;
1937 if (settings.verbose > 1) {
1938 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
1939 "%d: Need to grow buffer from %lu to %lu\n",
1940 c->sfd, (unsigned long)c->rsize, (unsigned long)nsize);
1941 }
1942 newm = realloc(c->rbuf, nsize);
1943 if (newm == NULL((void*)0)) {
1944 if (settings.verbose) {
1945 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
1946 "%d: Failed to grow buffer.. closing connection\n",
1947 c->sfd);
1948 }
1949 conn_set_state(c, conn_closing);
1950 return;
1951 }
1952
1953 c->rbuf= newm;
1954 /* rcurr should point to the same offset in the packet */
1955 c->rcurr = c->rbuf + offset - sizeof(protocol_binary_request_header);
1956 c->rsize = (int)nsize;
1957 }
1958 if (c->rbuf != c->rcurr) {
1959 memmove(c->rbuf, c->rcurr, c->rbytes)__builtin___memmove_chk (c->rbuf, c->rcurr, c->rbytes
, __builtin_object_size (c->rbuf, 0))
;
1960 c->rcurr = c->rbuf;
1961 if (settings.verbose > 1) {
1962 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
1963 "%d: Repack input buffer\n",
1964 c->sfd);
1965 }
1966 }
1967 }
1968
1969 /* preserve the header in the buffer.. */
1970 c->ritem = c->rcurr + sizeof(protocol_binary_request_header);
1971 conn_set_state(c, conn_nread);
1972}
1973
1974static void bin_read_key(conn *c, enum bin_substates next_substate, int extra) {
1975 bin_read_chunk(c, next_substate, c->keylen + extra);
1976}
1977
1978
1979/* Just write an error message and disconnect the client */
1980static void handle_binary_protocol_error(conn *c) {
1981 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
1982 if (settings.verbose) {
1983 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
1984 "%d: Protocol error (opcode %02x), close connection\n",
1985 c->sfd, c->binary_header.request.opcode);
1986 }
1987 c->write_and_go = conn_closing;
1988}
1989
1990static void get_auth_data(const void *cookie, auth_data_t *data) {
1991 conn *c = (conn*)cookie;
1992 if (c->sasl_conn) {
1993 cbsasl_getprop(c->sasl_conn, CBSASL_USERNAME, (void*)&data->username);
1994 cbsasl_getprop(c->sasl_conn, CBSASL_CONFIG, (void*)&data->config);
1995 }
1996}
1997
1998struct sasl_tmp {
1999 int ksize;
2000 int vsize;
2001 char data[1]; /* data + ksize == value */
2002};
2003
2004static void process_bin_sasl_auth(conn *c) {
2005 int nkey;
2006 int vlen;
2007 char *key;
2008 size_t buffer_size;
2009 struct sasl_tmp *data;
2010
2011 cb_assert(c->binary_header.request.extlen == 0)(__builtin_expect(!(c->binary_header.request.extlen == 0),
0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2011, "c->binary_header.request.extlen == 0") : (void)0)
;
2012 nkey = c->binary_header.request.keylen;
2013 vlen = c->binary_header.request.bodylen - nkey;
2014
2015 if (nkey > MAX_SASL_MECH_LEN32) {
2016 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, vlen);
2017 c->write_and_go = conn_swallow;
2018 return;
2019 }
2020
2021 key = binary_get_key(c);
2022 cb_assert(key)(__builtin_expect(!(key), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2022, "key") : (void)0)
;
2023
2024 buffer_size = sizeof(struct sasl_tmp) + nkey + vlen + 2;
2025 data = calloc(sizeof(struct sasl_tmp) + buffer_size, 1);
2026 if (!data) {
2027 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, vlen);
2028 c->write_and_go = conn_swallow;
2029 return;
2030 }
2031
2032 data->ksize = nkey;
2033 data->vsize = vlen;
2034 memcpy(data->data, key, nkey)__builtin___memcpy_chk (data->data, key, nkey, __builtin_object_size
(data->data, 0))
;
2035
2036 c->item = data;
2037 c->ritem = data->data + nkey;
2038 c->rlbytes = vlen;
2039 conn_set_state(c, conn_nread);
2040 c->substate = bin_reading_sasl_auth_data;
2041}
2042
2043static void process_bin_complete_sasl_auth(conn *c) {
2044 auth_data_t data;
2045 const char *out = NULL((void*)0);
2046 unsigned int outlen = 0;
2047 int nkey;
2048 int vlen;
2049 struct sasl_tmp *stmp;
2050 char mech[1024];
2051 const char *challenge;
2052 int result=-1;
2053
2054 cb_assert(c->item)(__builtin_expect(!(c->item), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2054, "c->item") : (void)0)
;
2055
2056 nkey = c->binary_header.request.keylen;
2057 if (nkey > 1023) {
2058 /* too big.. */
2059 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0);
2060 return;
2061 }
2062 vlen = c->binary_header.request.bodylen - nkey;
2063
2064 stmp = c->item;
2065 memcpy(mech, stmp->data, nkey)__builtin___memcpy_chk (mech, stmp->data, nkey, __builtin_object_size
(mech, 0))
;
2066 mech[nkey] = 0x00;
2067
2068 if (settings.verbose) {
2069 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
2070 "%d: mech: ``%s'' with %d bytes of data\n", c->sfd, mech, vlen);
2071 }
2072
2073 challenge = vlen == 0 ? NULL((void*)0) : (stmp->data + nkey);
2074 switch (c->cmd) {
2075 case PROTOCOL_BINARY_CMD_SASL_AUTH:
2076 result = cbsasl_server_start(&c->sasl_conn, mech,
2077 challenge, vlen,
2078 (unsigned char **)&out, &outlen);
2079 break;
2080 case PROTOCOL_BINARY_CMD_SASL_STEP:
2081 result = cbsasl_server_step(c->sasl_conn, challenge,
2082 vlen, &out, &outlen);
2083 break;
2084 default:
2085 cb_assert(false)(__builtin_expect(!(0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2085, "0") : (void)0)
; /* CMD should be one of the above */
2086 /* This code is pretty much impossible, but makes the compiler
2087 happier */
2088 if (settings.verbose) {
2089 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2090 "%d: Unhandled command %d with challenge %s\n",
2091 c->sfd, c->cmd, challenge);
2092 }
2093 break;
2094 }
2095
2096 free(c->item);
2097 c->item = NULL((void*)0);
2098 c->ritem = NULL((void*)0);
2099
2100 if (settings.verbose) {
2101 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2102 "%d: sasl result code: %d\n",
2103 c->sfd, result);
2104 }
2105
2106 switch(result) {
2107 case SASL_OK:
2108 write_bin_response(c, "Authenticated", 0, 0, (uint32_t)strlen("Authenticated"));
2109 get_auth_data(c, &data);
2110 perform_callbacks(ON_AUTH, (const void*)&data, c);
2111 STATS_NOKEY(c, auth_cmds){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->auth_cmds++; cb_mutex_exit
(&thread_stats->mutex); }
;
2112 break;
2113 case SASL_CONTINUE:
2114 if (add_bin_header(c, PROTOCOL_BINARY_RESPONSE_AUTH_CONTINUE, 0, 0,
2115 outlen, PROTOCOL_BINARY_RAW_BYTES) == -1) {
2116 conn_set_state(c, conn_closing);
2117 return;
2118 }
2119 add_iov(c, out, outlen);
2120 conn_set_state(c, conn_mwrite);
2121 c->write_and_go = conn_new_cmd;
2122 break;
2123 case SASL_BADPARAM:
2124 if (settings.verbose) {
2125 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2126 "%d: Bad sasl params: %d\n",
2127 c->sfd, result);
2128 }
2129 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
2130 STATS_NOKEY2(c, auth_cmds, auth_errors){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->auth_cmds++; thread_stats
->auth_errors++; cb_mutex_exit(&thread_stats->mutex
); }
;
2131 break;
2132 default:
2133 if (settings.verbose) {
2134 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2135 "%d: Unknown sasl response: %d\n",
2136 c->sfd, result);
2137 }
2138 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0);
2139 STATS_NOKEY2(c, auth_cmds, auth_errors){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->auth_cmds++; thread_stats
->auth_errors++; cb_mutex_exit(&thread_stats->mutex
); }
;
2140 }
2141}
2142
2143static bool_Bool authenticated(conn *c) {
2144 bool_Bool rv = false0;
2145
2146 switch (c->cmd) {
2147 case PROTOCOL_BINARY_CMD_SASL_LIST_MECHS: /* FALLTHROUGH */
2148 case PROTOCOL_BINARY_CMD_SASL_AUTH: /* FALLTHROUGH */
2149 case PROTOCOL_BINARY_CMD_SASL_STEP: /* FALLTHROUGH */
2150 case PROTOCOL_BINARY_CMD_VERSION: /* FALLTHROUGH */
2151 case PROTOCOL_BINARY_CMD_HELLO:
2152 rv = true1;
2153 break;
2154 default:
2155 if (c->sasl_conn) {
2156 const void *uname = NULL((void*)0);
2157 cbsasl_getprop(c->sasl_conn, CBSASL_USERNAME, &uname);
2158 rv = uname != NULL((void*)0);
2159 }
2160 }
2161
2162 if (settings.verbose > 1) {
2163 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
2164 "%d: authenticated() in cmd 0x%02x is %s\n",
2165 c->sfd, c->cmd, rv ? "true" : "false");
2166 }
2167
2168 return rv;
2169}
2170
2171bool_Bool binary_response_handler(const void *key, uint16_t keylen,
2172 const void *ext, uint8_t extlen,
2173 const void *body, uint32_t bodylen,
2174 uint8_t datatype, uint16_t status,
2175 uint64_t cas, const void *cookie)
2176{
2177 protocol_binary_response_header header;
2178 char *buf;
2179 conn *c = (conn*)cookie;
2180 /* Look at append_bin_stats */
2181 size_t needed;
2182 bool_Bool need_inflate = false0;
2183 size_t inflated_length;
2184
2185 if (!c->supports_datatype) {
2186 if ((datatype & PROTOCOL_BINARY_DATATYPE_COMPRESSED) == PROTOCOL_BINARY_DATATYPE_COMPRESSED) {
2187 need_inflate = true1;
2188 }
2189 /* We may silently drop the knowledge about a JSON item */
2190 datatype = PROTOCOL_BINARY_RAW_BYTES;
2191 }
2192
2193 needed = keylen + extlen + sizeof(protocol_binary_response_header);
2194 if (need_inflate) {
2195 if (snappy_uncompressed_length(body, bodylen,
2196 &inflated_length) != SNAPPY_OK) {
2197 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2198 "<%d ERROR: Failed to determine inflated size",
2199 c->sfd);
2200 return false0;
2201 }
2202 needed += inflated_length;
2203 } else {
2204 needed += bodylen;
2205 }
2206
2207 if (!grow_dynamic_buffer(c, needed)) {
2208 if (settings.verbose > 0) {
2209 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2210 "<%d ERROR: Failed to allocate memory for response",
2211 c->sfd);
2212 }
2213 return false0;
2214 }
2215
2216 buf = c->dynamic_buffer.buffer + c->dynamic_buffer.offset;
2217 memset(&header, 0, sizeof(header))__builtin___memset_chk (&header, 0, sizeof(header), __builtin_object_size
(&header, 0))
;
2218 header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
2219 header.response.opcode = c->binary_header.request.opcode;
2220 header.response.keylen = (uint16_t)htons(keylen)((__uint16_t)(__builtin_constant_p(keylen) ? ((__uint16_t)(((
(__uint16_t)(keylen) & 0xff00) >> 8) | (((__uint16_t
)(keylen) & 0x00ff) << 8))) : _OSSwapInt16(keylen))
)
;
2221 header.response.extlen = extlen;
2222 header.response.datatype = datatype;
2223 header.response.status = (uint16_t)htons(status)((__uint16_t)(__builtin_constant_p(status) ? ((__uint16_t)(((
(__uint16_t)(status) & 0xff00) >> 8) | (((__uint16_t
)(status) & 0x00ff) << 8))) : _OSSwapInt16(status))
)
;
2224 if (need_inflate) {
2225 header.response.bodylen = htonl((uint32_t)(inflated_length + keylen + extlen))(__builtin_constant_p((uint32_t)(inflated_length + keylen + extlen
)) ? ((__uint32_t)((((__uint32_t)((uint32_t)(inflated_length +
keylen + extlen)) & 0xff000000) >> 24) | (((__uint32_t
)((uint32_t)(inflated_length + keylen + extlen)) & 0x00ff0000
) >> 8) | (((__uint32_t)((uint32_t)(inflated_length + keylen
+ extlen)) & 0x0000ff00) << 8) | (((__uint32_t)((uint32_t
)(inflated_length + keylen + extlen)) & 0x000000ff) <<
24))) : _OSSwapInt32((uint32_t)(inflated_length + keylen + extlen
)))
;
2226 } else {
2227 header.response.bodylen = htonl(bodylen + keylen + extlen)(__builtin_constant_p(bodylen + keylen + extlen) ? ((__uint32_t
)((((__uint32_t)(bodylen + keylen + extlen) & 0xff000000)
>> 24) | (((__uint32_t)(bodylen + keylen + extlen) &
0x00ff0000) >> 8) | (((__uint32_t)(bodylen + keylen + extlen
) & 0x0000ff00) << 8) | (((__uint32_t)(bodylen + keylen
+ extlen) & 0x000000ff) << 24))) : _OSSwapInt32(bodylen
+ keylen + extlen))
;
2228 }
2229 header.response.opaque = c->opaque;
2230 header.response.cas = htonll(cas);
2231
2232 memcpy(buf, header.bytes, sizeof(header.response))__builtin___memcpy_chk (buf, header.bytes, sizeof(header.response
), __builtin_object_size (buf, 0))
;
2233 buf += sizeof(header.response);
2234
2235 if (extlen > 0) {
2236 memcpy(buf, ext, extlen)__builtin___memcpy_chk (buf, ext, extlen, __builtin_object_size
(buf, 0))
;
2237 buf += extlen;
2238 }
2239
2240 if (keylen > 0) {
2241 memcpy(buf, key, keylen)__builtin___memcpy_chk (buf, key, keylen, __builtin_object_size
(buf, 0))
;
2242 buf += keylen;
2243 }
2244
2245 if (bodylen > 0) {
2246 if (need_inflate) {
2247 if (snappy_uncompress(body, bodylen, buf, &inflated_length) != SNAPPY_OK) {
2248 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2249 "<%d ERROR: Failed to inflate item", c->sfd);
2250 return false0;
2251 }
2252 } else {
2253 memcpy(buf, body, bodylen)__builtin___memcpy_chk (buf, body, bodylen, __builtin_object_size
(buf, 0))
;
2254 }
2255 }
2256
2257 c->dynamic_buffer.offset += needed;
2258 return true1;
2259}
2260
2261/**
2262 * Tap stats (these are only used by the tap thread, so they don't need
2263 * to be in the threadlocal struct right now...
2264 */
2265struct tap_cmd_stats {
2266 uint64_t connect;
2267 uint64_t mutation;
2268 uint64_t checkpoint_start;
2269 uint64_t checkpoint_end;
2270 uint64_t delete;
2271 uint64_t flush;
2272 uint64_t opaque;
2273 uint64_t vbucket_set;
2274};
2275
2276struct tap_stats {
2277 cb_mutex_t mutex;
2278 struct tap_cmd_stats sent;
2279 struct tap_cmd_stats received;
2280} tap_stats;
2281
2282static void ship_tap_log(conn *c) {
2283 bool_Bool more_data = true1;
2284 bool_Bool send_data = false0;
2285 bool_Bool disconnect = false0;
2286 item *it;
2287 uint32_t bodylen;
2288 int ii = 0;
2289
2290 c->msgcurr = 0;
2291 c->msgused = 0;
2292 c->iovused = 0;
2293 if (add_msghdr(c) != 0) {
1
Taking false branch
2294 if (settings.verbose) {
2295 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2296 "%d: Failed to create output headers. Shutting down tap connection\n", c->sfd);
2297 }
2298 conn_set_state(c, conn_closing);
2299 return ;
2300 }
2301 /* @todo add check for buffer overflow of c->wbuf) */
2302 c->wbytes = 0;
2303 c->wcurr = c->wbuf;
2304 c->icurr = c->ilist;
2305 do {
10
Loop condition is true. Execution continues on line 2307
19
Loop condition is true. Execution continues on line 2307
24
Loop condition is true. Execution continues on line 2307
2306 /* @todo fixme! */
2307 void *engine;
2308 uint16_t nengine;
2309 uint8_t ttl;
2310 uint16_t tap_flags;
2311 uint32_t seqno;
2312 uint16_t vbucket;
2313 tap_event_t event;
2314 bool_Bool inflate = false0;
2315 size_t inflated_length;
25
'inflated_length' declared without an initial value
2316
2317 union {
2318 protocol_binary_request_tap_mutation mutation;
2319 protocol_binary_request_tap_delete delete;
2320 protocol_binary_request_tap_flush flush;
2321 protocol_binary_request_tap_opaque opaque;
2322 protocol_binary_request_noop noop;
2323 } msg;
2324 item_info_holder info;
2325 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
2326
2327 if (ii++ == 10) {
2
Taking false branch
11
Taking false branch
20
Taking false branch
26
Taking false branch
2328 break;
2329 }
2330
2331 event = c->tap_iterator(settings.engine.v0, c, &it,
2332 &engine, &nengine, &ttl,
2333 &tap_flags, &seqno, &vbucket);
2334 memset(&msg, 0, sizeof(msg))__builtin___memset_chk (&msg, 0, sizeof(msg), __builtin_object_size
(&msg, 0))
;
2335 msg.opaque.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
2336 msg.opaque.message.header.request.opaque = htonl(seqno)(__builtin_constant_p(seqno) ? ((__uint32_t)((((__uint32_t)(seqno
) & 0xff000000) >> 24) | (((__uint32_t)(seqno) &
0x00ff0000) >> 8) | (((__uint32_t)(seqno) & 0x0000ff00
) << 8) | (((__uint32_t)(seqno) & 0x000000ff) <<
24))) : _OSSwapInt32(seqno))
;
2337 msg.opaque.message.body.tap.enginespecific_length = htons(nengine)((__uint16_t)(__builtin_constant_p(nengine) ? ((__uint16_t)((
((__uint16_t)(nengine) & 0xff00) >> 8) | (((__uint16_t
)(nengine) & 0x00ff) << 8))) : _OSSwapInt16(nengine
)))
;
2338 msg.opaque.message.body.tap.ttl = ttl;
2339 msg.opaque.message.body.tap.flags = htons(tap_flags)((__uint16_t)(__builtin_constant_p(tap_flags) ? ((__uint16_t)
((((__uint16_t)(tap_flags) & 0xff00) >> 8) | (((__uint16_t
)(tap_flags) & 0x00ff) << 8))) : _OSSwapInt16(tap_flags
)))
;
2340 msg.opaque.message.header.request.extlen = 8;
2341 msg.opaque.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
2342 info.info.nvalue = IOV_MAX1024;
2343
2344 switch (event) {
3
Control jumps to 'case TAP_VBUCKET_SET:' at line 2536
12
Control jumps to 'case TAP_VBUCKET_SET:' at line 2536
21
Control jumps to 'case TAP_DELETION:' at line 2485
27
Control jumps to 'case TAP_CHECKPOINT_START:' at line 2358
2345 case TAP_NOOP :
2346 send_data = true1;
2347 msg.noop.message.header.request.opcode = PROTOCOL_BINARY_CMD_NOOP;
2348 msg.noop.message.header.request.extlen = 0;
2349 msg.noop.message.header.request.bodylen = htonl(0)(__builtin_constant_p(0) ? ((__uint32_t)((((__uint32_t)(0) &
0xff000000) >> 24) | (((__uint32_t)(0) & 0x00ff0000
) >> 8) | (((__uint32_t)(0) & 0x0000ff00) << 8
) | (((__uint32_t)(0) & 0x000000ff) << 24))) : _OSSwapInt32
(0))
;
2350 memcpy(c->wcurr, msg.noop.bytes, sizeof(msg.noop.bytes))__builtin___memcpy_chk (c->wcurr, msg.noop.bytes, sizeof(msg
.noop.bytes), __builtin_object_size (c->wcurr, 0))
;
2351 add_iov(c, c->wcurr, sizeof(msg.noop.bytes));
2352 c->wcurr += sizeof(msg.noop.bytes);
2353 c->wbytes += sizeof(msg.noop.bytes);
2354 break;
2355 case TAP_PAUSE :
2356 more_data = false0;
2357 break;
2358 case TAP_CHECKPOINT_START:
2359 case TAP_CHECKPOINT_END:
2360 case TAP_MUTATION:
2361 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
28
Taking false branch
2362 (void*)&info)) {
2363 settings.engine.v1->release(settings.engine.v0, c, it);
2364 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2365 "%d: Failed to get item info\n", c->sfd);
2366 break;
2367 }
2368 send_data = true1;
2369 c->ilist[c->ileft++] = it;
2370
2371 if (event == TAP_CHECKPOINT_START) {
29
Taking true branch
2372 msg.mutation.message.header.request.opcode =
2373 PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START;
2374 cb_mutex_enter(&tap_stats.mutex);
2375 tap_stats.sent.checkpoint_start++;
2376 cb_mutex_exit(&tap_stats.mutex);
2377 } else if (event == TAP_CHECKPOINT_END) {
2378 msg.mutation.message.header.request.opcode =
2379 PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END;
2380 cb_mutex_enter(&tap_stats.mutex);
2381 tap_stats.sent.checkpoint_end++;
2382 cb_mutex_exit(&tap_stats.mutex);
2383 } else if (event == TAP_MUTATION) {
2384 msg.mutation.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_MUTATION;
2385 cb_mutex_enter(&tap_stats.mutex);
2386 tap_stats.sent.mutation++;
2387 cb_mutex_exit(&tap_stats.mutex);
2388 }
2389
2390 msg.mutation.message.header.request.cas = htonll(info.info.cas);
2391 msg.mutation.message.header.request.keylen = htons(info.info.nkey)((__uint16_t)(__builtin_constant_p(info.info.nkey) ? ((__uint16_t
)((((__uint16_t)(info.info.nkey) & 0xff00) >> 8) | (
((__uint16_t)(info.info.nkey) & 0x00ff) << 8))) : _OSSwapInt16
(info.info.nkey)))
;
2392 msg.mutation.message.header.request.extlen = 16;
2393 if (c->supports_datatype) {
30
Taking false branch
2394 msg.mutation.message.header.request.datatype = info.info.datatype;
2395 } else {
2396 switch (info.info.datatype) {
31
Control jumps to 'case PROTOCOL_BINARY_DATATYPE_COMPRESSED_JSON:' at line 2402
2397 case 0:
2398 break;
2399 case PROTOCOL_BINARY_DATATYPE_JSON:
2400 break;
2401 case PROTOCOL_BINARY_DATATYPE_COMPRESSED:
2402 case PROTOCOL_BINARY_DATATYPE_COMPRESSED_JSON:
2403 inflate = true1;
2404 break;
32
Execution continues on line 2412
2405 default:
2406 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2407 "%d: shipping data with"
2408 " an invalid datatype "
2409 "(stripping info)",
2410 c->sfd);
2411 }
2412 msg.mutation.message.header.request.datatype = 0;
2413 }
2414
2415 bodylen = 16 + info.info.nkey + nengine;
2416 if ((tap_flags & TAP_FLAG_NO_VALUE0x02) == 0) {
33
Taking false branch
2417 if (inflate) {
2418 if (snappy_uncompressed_length(info.info.value[0].iov_base,
2419 info.info.nbytes,
2420 &inflated_length) == SNAPPY_OK) {
2421 bodylen += inflated_length;
2422 } else {
2423 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2424 "<%d ERROR: Failed to determine inflated size. Sending as compressed",
2425 c->sfd);
2426 inflate = false0;
2427 bodylen += info.info.nbytes;
2428 }
2429 } else {
2430 bodylen += info.info.nbytes;
2431 }
2432 }
2433 msg.mutation.message.header.request.bodylen = htonl(bodylen)(__builtin_constant_p(bodylen) ? ((__uint32_t)((((__uint32_t)
(bodylen) & 0xff000000) >> 24) | (((__uint32_t)(bodylen
) & 0x00ff0000) >> 8) | (((__uint32_t)(bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(bodylen) & 0x000000ff
) << 24))) : _OSSwapInt32(bodylen))
;
2434
2435 if ((tap_flags & TAP_FLAG_NETWORK_BYTE_ORDER0x04) == 0) {
34
Taking false branch
2436 msg.mutation.message.body.item.flags = htonl(info.info.flags)(__builtin_constant_p(info.info.flags) ? ((__uint32_t)((((__uint32_t
)(info.info.flags) & 0xff000000) >> 24) | (((__uint32_t
)(info.info.flags) & 0x00ff0000) >> 8) | (((__uint32_t
)(info.info.flags) & 0x0000ff00) << 8) | (((__uint32_t
)(info.info.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(info.info.flags))
;
2437 } else {
2438 msg.mutation.message.body.item.flags = info.info.flags;
2439 }
2440 msg.mutation.message.body.item.expiration = htonl(info.info.exptime)(__builtin_constant_p(info.info.exptime) ? ((__uint32_t)((((__uint32_t
)(info.info.exptime) & 0xff000000) >> 24) | (((__uint32_t
)(info.info.exptime) & 0x00ff0000) >> 8) | (((__uint32_t
)(info.info.exptime) & 0x0000ff00) << 8) | (((__uint32_t
)(info.info.exptime) & 0x000000ff) << 24))) : _OSSwapInt32
(info.info.exptime))
;
2441 msg.mutation.message.body.tap.enginespecific_length = htons(nengine)((__uint16_t)(__builtin_constant_p(nengine) ? ((__uint16_t)((
((__uint16_t)(nengine) & 0xff00) >> 8) | (((__uint16_t
)(nengine) & 0x00ff) << 8))) : _OSSwapInt16(nengine
)))
;
2442 msg.mutation.message.body.tap.ttl = ttl;
2443 msg.mutation.message.body.tap.flags = htons(tap_flags)((__uint16_t)(__builtin_constant_p(tap_flags) ? ((__uint16_t)
((((__uint16_t)(tap_flags) & 0xff00) >> 8) | (((__uint16_t
)(tap_flags) & 0x00ff) << 8))) : _OSSwapInt16(tap_flags
)))
;
2444 memcpy(c->wcurr, msg.mutation.bytes, sizeof(msg.mutation.bytes))__builtin___memcpy_chk (c->wcurr, msg.mutation.bytes, sizeof
(msg.mutation.bytes), __builtin_object_size (c->wcurr, 0))
;
2445
2446 add_iov(c, c->wcurr, sizeof(msg.mutation.bytes));
2447 c->wcurr += sizeof(msg.mutation.bytes);
2448 c->wbytes += sizeof(msg.mutation.bytes);
2449
2450 if (nengine > 0) {
35
Assuming 'nengine' is <= 0
36
Taking false branch
2451 memcpy(c->wcurr, engine, nengine)__builtin___memcpy_chk (c->wcurr, engine, nengine, __builtin_object_size
(c->wcurr, 0))
;
2452 add_iov(c, c->wcurr, nengine);
2453 c->wcurr += nengine;
2454 c->wbytes += nengine;
2455 }
2456
2457 add_iov(c, info.info.key, info.info.nkey);
2458 if ((tap_flags & TAP_FLAG_NO_VALUE0x02) == 0) {
37
Taking true branch
2459 if (inflate) {
38
Taking true branch
2460 void *buf = malloc(inflated_length);
39
Function call argument is an uninitialized value
2461 void *body = info.info.value[0].iov_base;
2462 size_t bodylen = info.info.value[0].iov_len;
2463 if (snappy_uncompress(body, bodylen,
2464 buf, &inflated_length) == SNAPPY_OK) {
2465 c->temp_alloc_list[c->temp_alloc_left++] = buf;
2466
2467 add_iov(c, buf, inflated_length);
2468 } else {
2469 free(buf);
2470 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2471 "%d: FATAL: failed to inflate object. shutitng down connection", c->sfd);
2472 conn_set_state(c, conn_closing);
2473 return;
2474 }
2475 } else {
2476 int xx;
2477 for (xx = 0; xx < info.info.nvalue; ++xx) {
2478 add_iov(c, info.info.value[xx].iov_base,
2479 info.info.value[xx].iov_len);
2480 }
2481 }
2482 }
2483
2484 break;
2485 case TAP_DELETION:
2486 /* This is a delete */
2487 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
22
Taking true branch
2488 (void*)&info)) {
2489 settings.engine.v1->release(settings.engine.v0, c, it);
2490 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2491 "%d: Failed to get item info\n", c->sfd);
2492 break;
23
Execution continues on line 2575
2493 }
2494 send_data = true1;
2495 c->ilist[c->ileft++] = it;
2496 msg.delete.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_DELETE;
2497 msg.delete.message.header.request.cas = htonll(info.info.cas);
2498 msg.delete.message.header.request.keylen = htons(info.info.nkey)((__uint16_t)(__builtin_constant_p(info.info.nkey) ? ((__uint16_t
)((((__uint16_t)(info.info.nkey) & 0xff00) >> 8) | (
((__uint16_t)(info.info.nkey) & 0x00ff) << 8))) : _OSSwapInt16
(info.info.nkey)))
;
2499
2500 bodylen = 8 + info.info.nkey + nengine;
2501 if ((tap_flags & TAP_FLAG_NO_VALUE0x02) == 0) {
2502 bodylen += info.info.nbytes;
2503 }
2504 msg.delete.message.header.request.bodylen = htonl(bodylen)(__builtin_constant_p(bodylen) ? ((__uint32_t)((((__uint32_t)
(bodylen) & 0xff000000) >> 24) | (((__uint32_t)(bodylen
) & 0x00ff0000) >> 8) | (((__uint32_t)(bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(bodylen) & 0x000000ff
) << 24))) : _OSSwapInt32(bodylen))
;
2505
2506 memcpy(c->wcurr, msg.delete.bytes, sizeof(msg.delete.bytes))__builtin___memcpy_chk (c->wcurr, msg.delete.bytes, sizeof
(msg.delete.bytes), __builtin_object_size (c->wcurr, 0))
;
2507 add_iov(c, c->wcurr, sizeof(msg.delete.bytes));
2508 c->wcurr += sizeof(msg.delete.bytes);
2509 c->wbytes += sizeof(msg.delete.bytes);
2510
2511 if (nengine > 0) {
2512 memcpy(c->wcurr, engine, nengine)__builtin___memcpy_chk (c->wcurr, engine, nengine, __builtin_object_size
(c->wcurr, 0))
;
2513 add_iov(c, c->wcurr, nengine);
2514 c->wcurr += nengine;
2515 c->wbytes += nengine;
2516 }
2517
2518 add_iov(c, info.info.key, info.info.nkey);
2519 if ((tap_flags & TAP_FLAG_NO_VALUE0x02) == 0) {
2520 int xx;
2521 for (xx = 0; xx < info.info.nvalue; ++xx) {
2522 add_iov(c, info.info.value[xx].iov_base,
2523 info.info.value[xx].iov_len);
2524 }
2525 }
2526
2527 cb_mutex_enter(&tap_stats.mutex);
2528 tap_stats.sent.delete++;
2529 cb_mutex_exit(&tap_stats.mutex);
2530 break;
2531
2532 case TAP_DISCONNECT:
2533 disconnect = true1;
2534 more_data = false0;
2535 break;
2536 case TAP_VBUCKET_SET:
2537 case TAP_FLUSH:
2538 case TAP_OPAQUE:
2539 send_data = true1;
2540
2541 if (event == TAP_OPAQUE) {
4
Taking false branch
13
Taking false branch
2542 msg.flush.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_OPAQUE;
2543 cb_mutex_enter(&tap_stats.mutex);
2544 tap_stats.sent.opaque++;
2545 cb_mutex_exit(&tap_stats.mutex);
2546
2547 } else if (event == TAP_FLUSH) {
5
Taking false branch
14
Taking false branch
2548 msg.flush.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_FLUSH;
2549 cb_mutex_enter(&tap_stats.mutex);
2550 tap_stats.sent.flush++;
2551 cb_mutex_exit(&tap_stats.mutex);
2552 } else if (event == TAP_VBUCKET_SET) {
6
Taking true branch
15
Taking true branch
2553 msg.flush.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET;
2554 msg.flush.message.body.tap.flags = htons(tap_flags)((__uint16_t)(__builtin_constant_p(tap_flags) ? ((__uint16_t)
((((__uint16_t)(tap_flags) & 0xff00) >> 8) | (((__uint16_t
)(tap_flags) & 0x00ff) << 8))) : _OSSwapInt16(tap_flags
)))
;
2555 cb_mutex_enter(&tap_stats.mutex);
2556 tap_stats.sent.vbucket_set++;
2557 cb_mutex_exit(&tap_stats.mutex);
2558 }
2559
2560 msg.flush.message.header.request.bodylen = htonl(8 + nengine)(__builtin_constant_p(8 + nengine) ? ((__uint32_t)((((__uint32_t
)(8 + nengine) & 0xff000000) >> 24) | (((__uint32_t
)(8 + nengine) & 0x00ff0000) >> 8) | (((__uint32_t)
(8 + nengine) & 0x0000ff00) << 8) | (((__uint32_t)(
8 + nengine) & 0x000000ff) << 24))) : _OSSwapInt32(
8 + nengine))
;
2561 memcpy(c->wcurr, msg.flush.bytes, sizeof(msg.flush.bytes))__builtin___memcpy_chk (c->wcurr, msg.flush.bytes, sizeof(
msg.flush.bytes), __builtin_object_size (c->wcurr, 0))
;
2562 add_iov(c, c->wcurr, sizeof(msg.flush.bytes));
2563 c->wcurr += sizeof(msg.flush.bytes);
2564 c->wbytes += sizeof(msg.flush.bytes);
2565 if (nengine > 0) {
7
Assuming 'nengine' is <= 0
8
Taking false branch
16
Assuming 'nengine' is <= 0
17
Taking false branch
2566 memcpy(c->wcurr, engine, nengine)__builtin___memcpy_chk (c->wcurr, engine, nengine, __builtin_object_size
(c->wcurr, 0))
;
2567 add_iov(c, c->wcurr, nengine);
2568 c->wcurr += nengine;
2569 c->wbytes += nengine;
2570 }
2571 break;
9
Execution continues on line 2575
18
Execution continues on line 2575
2572 default:
2573 abort();
2574 }
2575 } while (more_data);
2576
2577 c->ewouldblock = false0;
2578 if (send_data) {
2579 conn_set_state(c, conn_mwrite);
2580 if (disconnect) {
2581 c->write_and_go = conn_closing;
2582 } else {
2583 c->write_and_go = conn_ship_log;
2584 }
2585 } else {
2586 if (disconnect) {
2587 conn_set_state(c, conn_closing);
2588 } else {
2589 /* No more items to ship to the slave at this time.. suspend.. */
2590 if (settings.verbose > 1) {
2591 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
2592 "%d: No more items in tap log.. waiting\n",
2593 c->sfd);
2594 }
2595 c->ewouldblock = true1;
2596 }
2597 }
2598}
2599
2600static ENGINE_ERROR_CODE default_unknown_command(EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *descriptor,
2601 ENGINE_HANDLE* handle,
2602 const void* cookie,
2603 protocol_binary_request_header *request,
2604 ADD_RESPONSE response)
2605{
2606 return settings.engine.v1->unknown_command(handle, cookie, request, response);
2607}
2608
2609struct request_lookup {
2610 EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *descriptor;
2611 BINARY_COMMAND_CALLBACK callback;
2612};
2613
2614static struct request_lookup request_handlers[0x100];
2615
2616typedef void (*RESPONSE_HANDLER)(conn*);
2617/**
2618 * A map between the response packets op-code and the function to handle
2619 * the response message.
2620 */
2621static RESPONSE_HANDLER response_handlers[0x100];
2622
2623static void setup_binary_lookup_cmd(EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *descriptor,
2624 uint8_t cmd,
2625 BINARY_COMMAND_CALLBACK new_handler) {
2626 request_handlers[cmd].descriptor = descriptor;
2627 request_handlers[cmd].callback = new_handler;
2628}
2629
2630static void process_bin_unknown_packet(conn *c) {
2631 void *packet = c->rcurr - (c->binary_header.request.bodylen +
2632 sizeof(c->binary_header));
2633 ENGINE_ERROR_CODE ret = c->aiostat;
2634 c->aiostat = ENGINE_SUCCESS;
2635 c->ewouldblock = false0;
2636
2637 if (ret == ENGINE_SUCCESS) {
2638 struct request_lookup *rq = request_handlers + c->binary_header.request.opcode;
2639 ret = rq->callback(rq->descriptor, settings.engine.v0, c, packet,
2640 binary_response_handler);
2641 }
2642
2643 switch (ret) {
2644 case ENGINE_SUCCESS:
2645 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
2646 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
2647 c->dynamic_buffer.buffer = NULL((void*)0);
2648 } else {
2649 conn_set_state(c, conn_new_cmd);
2650 }
2651 break;
2652 case ENGINE_EWOULDBLOCK:
2653 c->ewouldblock = true1;
2654 break;
2655 case ENGINE_DISCONNECT:
2656 conn_set_state(c, conn_closing);
2657 break;
2658 default:
2659 /* Release the dynamic buffer.. it may be partial.. */
2660 free(c->dynamic_buffer.buffer);
2661 c->dynamic_buffer.buffer = NULL((void*)0);
2662 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
2663 }
2664}
2665
2666static void cbsasl_refresh_main(void *c)
2667{
2668 int rv = cbsasl_server_refresh();
2669 if (rv == SASL_OK) {
2670 notify_io_complete(c, ENGINE_SUCCESS);
2671 } else {
2672 notify_io_complete(c, ENGINE_EINVAL);
2673 }
2674}
2675
2676static ENGINE_ERROR_CODE refresh_cbsasl(conn *c)
2677{
2678 cb_thread_t tid;
2679 int err;
2680
2681 err = cb_create_thread(&tid, cbsasl_refresh_main, c, 1);
2682 if (err != 0) {
2683 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2684 "Failed to create cbsasl db "
2685 "update thread: %s",
2686 strerror(err));
2687 return ENGINE_DISCONNECT;
2688 }
2689
2690 return ENGINE_EWOULDBLOCK;
2691}
2692
2693#if 0
2694static void ssl_certs_refresh_main(void *c)
2695{
2696 /* Update the internal certificates */
2697
2698 notify_io_complete(c, ENGINE_SUCCESS);
2699}
2700#endif
2701static ENGINE_ERROR_CODE refresh_ssl_certs(conn *c)
2702{
2703#if 0
2704 cb_thread_t tid;
2705 int err;
2706
2707 err = cb_create_thread(&tid, ssl_certs_refresh_main, c, 1);
2708 if (err != 0) {
2709 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2710 "Failed to create ssl_certificate "
2711 "update thread: %s",
2712 strerror(err));
2713 return ENGINE_DISCONNECT;
2714 }
2715
2716 return ENGINE_EWOULDBLOCK;
2717#endif
2718 return ENGINE_SUCCESS;
2719}
2720
2721static void process_bin_tap_connect(conn *c) {
2722 TAP_ITERATOR iterator;
2723 char *packet = (c->rcurr - (c->binary_header.request.bodylen +
2724 sizeof(c->binary_header)));
2725 protocol_binary_request_tap_connect *req = (void*)packet;
2726 const char *key = packet + sizeof(req->bytes);
2727 const char *data = key + c->binary_header.request.keylen;
2728 uint32_t flags = 0;
2729 size_t ndata = c->binary_header.request.bodylen -
2730 c->binary_header.request.extlen -
2731 c->binary_header.request.keylen;
2732
2733 if (c->binary_header.request.extlen == 4) {
2734 flags = ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
;
2735
2736 if (flags & TAP_CONNECT_FLAG_BACKFILL0x01) {
2737 /* the userdata has to be at least 8 bytes! */
2738 if (ndata < 8) {
2739 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2740 "%d: ERROR: Invalid tap connect message\n",
2741 c->sfd);
2742 conn_set_state(c, conn_closing);
2743 return ;
2744 }
2745 }
2746 } else {
2747 data -= 4;
2748 key -= 4;
2749 }
2750
2751 if (settings.verbose && c->binary_header.request.keylen > 0) {
2752 char buffer[1024];
2753 int len = c->binary_header.request.keylen;
2754 if (len >= sizeof(buffer)) {
2755 len = sizeof(buffer) - 1;
2756 }
2757 memcpy(buffer, key, len)__builtin___memcpy_chk (buffer, key, len, __builtin_object_size
(buffer, 0))
;
2758 buffer[len] = '\0';
2759 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
2760 "%d: Trying to connect with named tap connection: <%s>\n",
2761 c->sfd, buffer);
2762 }
2763
2764 iterator = settings.engine.v1->get_tap_iterator(
2765 settings.engine.v0, c, key, c->binary_header.request.keylen,
2766 flags, data, ndata);
2767
2768 if (iterator == NULL((void*)0)) {
2769 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2770 "%d: FATAL: The engine does not support tap\n",
2771 c->sfd);
2772 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
2773 c->write_and_go = conn_closing;
2774 } else {
2775 c->tap_iterator = iterator;
2776 c->which = EV_WRITE0x04;
2777 conn_set_state(c, conn_ship_log);
2778 }
2779}
2780
2781static void process_bin_tap_packet(tap_event_t event, conn *c) {
2782 char *packet;
2783 protocol_binary_request_tap_no_extras *tap;
2784 uint16_t nengine;
2785 uint16_t tap_flags;
2786 uint32_t seqno;
2787 uint8_t ttl;
2788 char *engine_specific;
2789 char *key;
2790 uint16_t nkey;
2791 char *data;
2792 uint32_t flags;
2793 uint32_t exptime;
2794 uint32_t ndata;
2795 ENGINE_ERROR_CODE ret;
2796
2797 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2797, "c != ((void*)0)") : (void)0)
;
2798 packet = (c->rcurr - (c->binary_header.request.bodylen +
2799 sizeof(c->binary_header)));
2800 tap = (void*)packet;
2801 nengine = ntohs(tap->message.body.tap.enginespecific_length)((__uint16_t)(__builtin_constant_p(tap->message.body.tap.enginespecific_length
) ? ((__uint16_t)((((__uint16_t)(tap->message.body.tap.enginespecific_length
) & 0xff00) >> 8) | (((__uint16_t)(tap->message.
body.tap.enginespecific_length) & 0x00ff) << 8))) :
_OSSwapInt16(tap->message.body.tap.enginespecific_length)
))
;
2802 tap_flags = ntohs(tap->message.body.tap.flags)((__uint16_t)(__builtin_constant_p(tap->message.body.tap.flags
) ? ((__uint16_t)((((__uint16_t)(tap->message.body.tap.flags
) & 0xff00) >> 8) | (((__uint16_t)(tap->message.
body.tap.flags) & 0x00ff) << 8))) : _OSSwapInt16(tap
->message.body.tap.flags)))
;
2803 seqno = ntohl(tap->message.header.request.opaque)(__builtin_constant_p(tap->message.header.request.opaque) ?
((__uint32_t)((((__uint32_t)(tap->message.header.request.
opaque) & 0xff000000) >> 24) | (((__uint32_t)(tap->
message.header.request.opaque) & 0x00ff0000) >> 8) |
(((__uint32_t)(tap->message.header.request.opaque) & 0x0000ff00
) << 8) | (((__uint32_t)(tap->message.header.request
.opaque) & 0x000000ff) << 24))) : _OSSwapInt32(tap->
message.header.request.opaque))
;
2804 ttl = tap->message.body.tap.ttl;
2805 engine_specific = packet + sizeof(tap->bytes);
2806 key = engine_specific + nengine;
2807 nkey = c->binary_header.request.keylen;
2808 data = key + nkey;
2809 flags = 0;
2810 exptime = 0;
2811 ndata = c->binary_header.request.bodylen - nengine - nkey - 8;
2812 ret = c->aiostat;
2813
2814 if (ttl == 0) {
2815 ret = ENGINE_EINVAL;
2816 } else {
2817 if (event == TAP_MUTATION || event == TAP_CHECKPOINT_START ||
2818 event == TAP_CHECKPOINT_END) {
2819 protocol_binary_request_tap_mutation *mutation = (void*)tap;
2820
2821 /* engine_specific data in protocol_binary_request_tap_mutation is */
2822 /* at a different offset than protocol_binary_request_tap_no_extras */
2823 engine_specific = packet + sizeof(mutation->bytes);
2824
2825 flags = mutation->message.body.item.flags;
2826 if ((tap_flags & TAP_FLAG_NETWORK_BYTE_ORDER0x04) == 0) {
2827 flags = ntohl(flags)(__builtin_constant_p(flags) ? ((__uint32_t)((((__uint32_t)(flags
) & 0xff000000) >> 24) | (((__uint32_t)(flags) &
0x00ff0000) >> 8) | (((__uint32_t)(flags) & 0x0000ff00
) << 8) | (((__uint32_t)(flags) & 0x000000ff) <<
24))) : _OSSwapInt32(flags))
;
2828 }
2829
2830 exptime = ntohl(mutation->message.body.item.expiration)(__builtin_constant_p(mutation->message.body.item.expiration
) ? ((__uint32_t)((((__uint32_t)(mutation->message.body.item
.expiration) & 0xff000000) >> 24) | (((__uint32_t)(
mutation->message.body.item.expiration) & 0x00ff0000) >>
8) | (((__uint32_t)(mutation->message.body.item.expiration
) & 0x0000ff00) << 8) | (((__uint32_t)(mutation->
message.body.item.expiration) & 0x000000ff) << 24))
) : _OSSwapInt32(mutation->message.body.item.expiration))
;
2831 key += 8;
2832 data += 8;
2833 ndata -= 8;
2834 }
2835
2836 if (ret == ENGINE_SUCCESS) {
2837 uint8_t datatype = c->binary_header.request.datatype;
2838 if (event == TAP_MUTATION && !c->supports_datatype) {
2839 if (checkUTF8JSON((void*)data, ndata)) {
2840 datatype = PROTOCOL_BINARY_DATATYPE_JSON;
2841 }
2842 }
2843
2844 ret = settings.engine.v1->tap_notify(settings.engine.v0, c,
2845 engine_specific, nengine,
2846 ttl - 1, tap_flags,
2847 event, seqno,
2848 key, nkey,
2849 flags, exptime,
2850 ntohll(tap->message.header.request.cas),
2851 datatype,
2852 data, ndata,
2853 c->binary_header.request.vbucket);
2854 }
2855 }
2856
2857 switch (ret) {
2858 case ENGINE_DISCONNECT:
2859 conn_set_state(c, conn_closing);
2860 break;
2861 case ENGINE_EWOULDBLOCK:
2862 c->ewouldblock = true1;
2863 break;
2864 default:
2865 if ((tap_flags & TAP_FLAG_ACK0x01) || (ret != ENGINE_SUCCESS)) {
2866 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
2867 } else {
2868 conn_set_state(c, conn_new_cmd);
2869 }
2870 }
2871}
2872
2873static void process_bin_tap_ack(conn *c) {
2874 char *packet;
2875 protocol_binary_response_no_extras *rsp;
2876 uint32_t seqno;
2877 uint16_t status;
2878 char *key;
2879 ENGINE_ERROR_CODE ret = ENGINE_DISCONNECT;
2880
2881 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2881, "c != ((void*)0)") : (void)0)
;
2882 packet = (c->rcurr - (c->binary_header.request.bodylen + sizeof(c->binary_header)));
2883 rsp = (void*)packet;
2884 seqno = ntohl(rsp->message.header.response.opaque)(__builtin_constant_p(rsp->message.header.response.opaque)
? ((__uint32_t)((((__uint32_t)(rsp->message.header.response
.opaque) & 0xff000000) >> 24) | (((__uint32_t)(rsp->
message.header.response.opaque) & 0x00ff0000) >> 8)
| (((__uint32_t)(rsp->message.header.response.opaque) &
0x0000ff00) << 8) | (((__uint32_t)(rsp->message.header
.response.opaque) & 0x000000ff) << 24))) : _OSSwapInt32
(rsp->message.header.response.opaque))
;
2885 status = ntohs(rsp->message.header.response.status)((__uint16_t)(__builtin_constant_p(rsp->message.header.response
.status) ? ((__uint16_t)((((__uint16_t)(rsp->message.header
.response.status) & 0xff00) >> 8) | (((__uint16_t)(
rsp->message.header.response.status) & 0x00ff) <<
8))) : _OSSwapInt16(rsp->message.header.response.status))
)
;
2886 key = packet + sizeof(rsp->bytes);
2887
2888 if (settings.engine.v1->tap_notify != NULL((void*)0)) {
2889 ret = settings.engine.v1->tap_notify(settings.engine.v0, c, NULL((void*)0), 0, 0, status,
2890 TAP_ACK, seqno, key,
2891 c->binary_header.request.keylen, 0, 0,
2892 0, c->binary_header.request.datatype, NULL((void*)0),
2893 0, 0);
2894 }
2895
2896 if (ret == ENGINE_DISCONNECT) {
2897 conn_set_state(c, conn_closing);
2898 } else {
2899 conn_set_state(c, conn_ship_log);
2900 }
2901}
2902
2903/**
2904 * We received a noop response.. just ignore it
2905 */
2906static void process_bin_noop_response(conn *c) {
2907 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2907, "c != ((void*)0)") : (void)0)
;
2908 conn_set_state(c, conn_new_cmd);
2909}
2910
2911/*******************************************************************************
2912 ** UPR MESSAGE PRODUCERS **
2913 ******************************************************************************/
2914static ENGINE_ERROR_CODE upr_message_get_failover_log(const void *cookie,
2915 uint32_t opaque,
2916 uint16_t vbucket)
2917{
2918 protocol_binary_request_upr_get_failover_log packet;
2919 conn *c = (void*)cookie;
2920
2921 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
2922 /* We don't have room in the buffer */
2923 return ENGINE_E2BIG;
2924 }
2925
2926 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
2927 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
2928 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_GET_FAILOVER_LOG;
2929 packet.message.header.request.opaque = opaque;
2930 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
2931
2932 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
2933 add_iov(c, c->wcurr, sizeof(packet.bytes));
2934 c->wcurr += sizeof(packet.bytes);
2935 c->wbytes += sizeof(packet.bytes);
2936
2937 return ENGINE_SUCCESS;
2938}
2939
2940static ENGINE_ERROR_CODE upr_message_stream_req(const void *cookie,
2941 uint32_t opaque,
2942 uint16_t vbucket,
2943 uint32_t flags,
2944 uint64_t start_seqno,
2945 uint64_t end_seqno,
2946 uint64_t vbucket_uuid,
2947 uint64_t snap_start_seqno,
2948 uint64_t snap_end_seqno)
2949{
2950 protocol_binary_request_upr_stream_req packet;
2951 conn *c = (void*)cookie;
2952
2953 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
2954 /* We don't have room in the buffer */
2955 return ENGINE_E2BIG;
2956 }
2957
2958 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
2959 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
2960 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_STREAM_REQ;
2961 packet.message.header.request.extlen = 48;
2962 packet.message.header.request.bodylen = htonl(48)(__builtin_constant_p(48) ? ((__uint32_t)((((__uint32_t)(48) &
0xff000000) >> 24) | (((__uint32_t)(48) & 0x00ff0000
) >> 8) | (((__uint32_t)(48) & 0x0000ff00) <<
8) | (((__uint32_t)(48) & 0x000000ff) << 24))) : _OSSwapInt32
(48))
;
2963 packet.message.header.request.opaque = opaque;
2964 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
2965
2966 packet.message.body.flags = ntohl(flags)(__builtin_constant_p(flags) ? ((__uint32_t)((((__uint32_t)(flags
) & 0xff000000) >> 24) | (((__uint32_t)(flags) &
0x00ff0000) >> 8) | (((__uint32_t)(flags) & 0x0000ff00
) << 8) | (((__uint32_t)(flags) & 0x000000ff) <<
24))) : _OSSwapInt32(flags))
;
2967 packet.message.body.start_seqno = ntohll(start_seqno);
2968 packet.message.body.end_seqno = ntohll(end_seqno);
2969 packet.message.body.vbucket_uuid = ntohll(vbucket_uuid);
2970 packet.message.body.snap_start_seqno = ntohll(snap_start_seqno);
2971 packet.message.body.snap_end_seqno = ntohll(snap_end_seqno);
2972
2973 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
2974 add_iov(c, c->wcurr, sizeof(packet.bytes));
2975 c->wcurr += sizeof(packet.bytes);
2976 c->wbytes += sizeof(packet.bytes);
2977
2978 return ENGINE_SUCCESS;
2979}
2980
2981static ENGINE_ERROR_CODE upr_message_add_stream_response(const void *cookie,
2982 uint32_t opaque,
2983 uint32_t dialogopaque,
2984 uint8_t status)
2985{
2986 protocol_binary_response_upr_add_stream packet;
2987 conn *c = (void*)cookie;
2988
2989 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
2990 /* We don't have room in the buffer */
2991 return ENGINE_E2BIG;
2992 }
2993
2994 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
2995 packet.message.header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
2996 packet.message.header.response.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_ADD_STREAM;
2997 packet.message.header.response.extlen = 4;
2998 packet.message.header.response.status = htons(status)((__uint16_t)(__builtin_constant_p(status) ? ((__uint16_t)(((
(__uint16_t)(status) & 0xff00) >> 8) | (((__uint16_t
)(status) & 0x00ff) << 8))) : _OSSwapInt16(status))
)
;
2999 packet.message.header.response.bodylen = htonl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
;
3000 packet.message.header.response.opaque = opaque;
3001 packet.message.body.opaque = ntohl(dialogopaque)(__builtin_constant_p(dialogopaque) ? ((__uint32_t)((((__uint32_t
)(dialogopaque) & 0xff000000) >> 24) | (((__uint32_t
)(dialogopaque) & 0x00ff0000) >> 8) | (((__uint32_t
)(dialogopaque) & 0x0000ff00) << 8) | (((__uint32_t
)(dialogopaque) & 0x000000ff) << 24))) : _OSSwapInt32
(dialogopaque))
;
3002
3003 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3004 add_iov(c, c->wcurr, sizeof(packet.bytes));
3005 c->wcurr += sizeof(packet.bytes);
3006 c->wbytes += sizeof(packet.bytes);
3007
3008 return ENGINE_SUCCESS;
3009}
3010
3011static ENGINE_ERROR_CODE upr_message_set_vbucket_state_response(const void *cookie,
3012 uint32_t opaque,
3013 uint8_t status)
3014{
3015 protocol_binary_response_upr_set_vbucket_state packet;
3016 conn *c = (void*)cookie;
3017
3018 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3019 /* We don't have room in the buffer */
3020 return ENGINE_E2BIG;
3021 }
3022
3023 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3024 packet.message.header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
3025 packet.message.header.response.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE;
3026 packet.message.header.response.extlen = 0;
3027 packet.message.header.response.status = htons(status)((__uint16_t)(__builtin_constant_p(status) ? ((__uint16_t)(((
(__uint16_t)(status) & 0xff00) >> 8) | (((__uint16_t
)(status) & 0x00ff) << 8))) : _OSSwapInt16(status))
)
;
3028 packet.message.header.response.bodylen = 0;
3029 packet.message.header.response.opaque = opaque;
3030
3031 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3032 add_iov(c, c->wcurr, sizeof(packet.bytes));
3033 c->wcurr += sizeof(packet.bytes);
3034 c->wbytes += sizeof(packet.bytes);
3035
3036 return ENGINE_SUCCESS;
3037}
3038
3039static ENGINE_ERROR_CODE upr_message_stream_end(const void *cookie,
3040 uint32_t opaque,
3041 uint16_t vbucket,
3042 uint32_t flags)
3043{
3044 protocol_binary_request_upr_stream_end packet;
3045 conn *c = (void*)cookie;
3046
3047 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3048 /* We don't have room in the buffer */
3049 return ENGINE_E2BIG;
3050 }
3051
3052 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3053 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3054 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_STREAM_END;
3055 packet.message.header.request.extlen = 4;
3056 packet.message.header.request.bodylen = htonl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
;
3057 packet.message.header.request.opaque = opaque;
3058 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3059 packet.message.body.flags = ntohl(flags)(__builtin_constant_p(flags) ? ((__uint32_t)((((__uint32_t)(flags
) & 0xff000000) >> 24) | (((__uint32_t)(flags) &
0x00ff0000) >> 8) | (((__uint32_t)(flags) & 0x0000ff00
) << 8) | (((__uint32_t)(flags) & 0x000000ff) <<
24))) : _OSSwapInt32(flags))
;
3060
3061 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3062 add_iov(c, c->wcurr, sizeof(packet.bytes));
3063 c->wcurr += sizeof(packet.bytes);
3064 c->wbytes += sizeof(packet.bytes);
3065
3066 return ENGINE_SUCCESS;
3067}
3068
3069static ENGINE_ERROR_CODE upr_message_marker(const void *cookie,
3070 uint32_t opaque,
3071 uint16_t vbucket,
3072 uint64_t start_seqno,
3073 uint64_t end_seqno,
3074 uint32_t flags)
3075{
3076 protocol_binary_request_upr_snapshot_marker packet;
3077 conn *c = (void*)cookie;
3078
3079 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3080 /* We don't have room in the buffer */
3081 return ENGINE_E2BIG;
3082 }
3083
3084 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3085 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3086 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_SNAPSHOT_MARKER;
3087 packet.message.header.request.opaque = opaque;
3088 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3089 packet.message.header.request.extlen = 20;
3090 packet.message.header.request.bodylen = htonl(20)(__builtin_constant_p(20) ? ((__uint32_t)((((__uint32_t)(20) &
0xff000000) >> 24) | (((__uint32_t)(20) & 0x00ff0000
) >> 8) | (((__uint32_t)(20) & 0x0000ff00) <<
8) | (((__uint32_t)(20) & 0x000000ff) << 24))) : _OSSwapInt32
(20))
;
3091 packet.message.body.start_seqno = htonll(start_seqno);
3092 packet.message.body.end_seqno = htonll(end_seqno);
3093 packet.message.body.flags = htonl(flags)(__builtin_constant_p(flags) ? ((__uint32_t)((((__uint32_t)(flags
) & 0xff000000) >> 24) | (((__uint32_t)(flags) &
0x00ff0000) >> 8) | (((__uint32_t)(flags) & 0x0000ff00
) << 8) | (((__uint32_t)(flags) & 0x000000ff) <<
24))) : _OSSwapInt32(flags))
;
3094
3095 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3096 add_iov(c, c->wcurr, sizeof(packet.bytes));
3097 c->wcurr += sizeof(packet.bytes);
3098 c->wbytes += sizeof(packet.bytes);
3099
3100 return ENGINE_SUCCESS;
3101}
3102
3103static ENGINE_ERROR_CODE upr_message_mutation(const void* cookie,
3104 uint32_t opaque,
3105 item *it,
3106 uint16_t vbucket,
3107 uint64_t by_seqno,
3108 uint64_t rev_seqno,
3109 uint32_t lock_time,
3110 const void *meta,
3111 uint16_t nmeta,
3112 uint8_t nru)
3113{
3114 conn *c = (void*)cookie;
3115 item_info_holder info;
3116 protocol_binary_request_upr_mutation packet;
3117 int xx;
3118
3119 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
3120 info.info.nvalue = IOV_MAX1024;
3121
3122 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
3123 (void*)&info)) {
3124 settings.engine.v1->release(settings.engine.v0, c, it);
3125 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
3126 "%d: Failed to get item info\n", c->sfd);
3127 return ENGINE_FAILED;
3128 }
3129
3130 memset(packet.bytes, 0, sizeof(packet))__builtin___memset_chk (packet.bytes, 0, sizeof(packet), __builtin_object_size
(packet.bytes, 0))
;
3131 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3132 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_MUTATION;
3133 packet.message.header.request.opaque = opaque;
3134 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3135 packet.message.header.request.cas = htonll(info.info.cas);
3136 packet.message.header.request.keylen = htons(info.info.nkey)((__uint16_t)(__builtin_constant_p(info.info.nkey) ? ((__uint16_t
)((((__uint16_t)(info.info.nkey) & 0xff00) >> 8) | (
((__uint16_t)(info.info.nkey) & 0x00ff) << 8))) : _OSSwapInt16
(info.info.nkey)))
;
3137 packet.message.header.request.extlen = 31;
3138 packet.message.header.request.bodylen = ntohl(31 + info.info.nkey + info.info.nbytes + nmeta)(__builtin_constant_p(31 + info.info.nkey + info.info.nbytes +
nmeta) ? ((__uint32_t)((((__uint32_t)(31 + info.info.nkey + info
.info.nbytes + nmeta) & 0xff000000) >> 24) | (((__uint32_t
)(31 + info.info.nkey + info.info.nbytes + nmeta) & 0x00ff0000
) >> 8) | (((__uint32_t)(31 + info.info.nkey + info.info
.nbytes + nmeta) & 0x0000ff00) << 8) | (((__uint32_t
)(31 + info.info.nkey + info.info.nbytes + nmeta) & 0x000000ff
) << 24))) : _OSSwapInt32(31 + info.info.nkey + info.info
.nbytes + nmeta))
;
3139 packet.message.header.request.datatype = info.info.datatype;
3140 packet.message.body.by_seqno = htonll(by_seqno);
3141 packet.message.body.rev_seqno = htonll(rev_seqno);
3142 packet.message.body.lock_time = htonl(lock_time)(__builtin_constant_p(lock_time) ? ((__uint32_t)((((__uint32_t
)(lock_time) & 0xff000000) >> 24) | (((__uint32_t)(
lock_time) & 0x00ff0000) >> 8) | (((__uint32_t)(lock_time
) & 0x0000ff00) << 8) | (((__uint32_t)(lock_time) &
0x000000ff) << 24))) : _OSSwapInt32(lock_time))
;
3143 packet.message.body.flags = info.info.flags;
3144 packet.message.body.expiration = htonl(info.info.exptime)(__builtin_constant_p(info.info.exptime) ? ((__uint32_t)((((__uint32_t
)(info.info.exptime) & 0xff000000) >> 24) | (((__uint32_t
)(info.info.exptime) & 0x00ff0000) >> 8) | (((__uint32_t
)(info.info.exptime) & 0x0000ff00) << 8) | (((__uint32_t
)(info.info.exptime) & 0x000000ff) << 24))) : _OSSwapInt32
(info.info.exptime))
;
3145 packet.message.body.nmeta = htons(nmeta)((__uint16_t)(__builtin_constant_p(nmeta) ? ((__uint16_t)((((
__uint16_t)(nmeta) & 0xff00) >> 8) | (((__uint16_t)
(nmeta) & 0x00ff) << 8))) : _OSSwapInt16(nmeta)))
;
3146 packet.message.body.nru = nru;
3147
3148 c->ilist[c->ileft++] = it;
3149
3150 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3151 add_iov(c, c->wcurr, sizeof(packet.bytes));
3152 c->wcurr += sizeof(packet.bytes);
3153 c->wbytes += sizeof(packet.bytes);
3154 add_iov(c, info.info.key, info.info.nkey);
3155 for (xx = 0; xx < info.info.nvalue; ++xx) {
3156 add_iov(c, info.info.value[xx].iov_base, info.info.value[xx].iov_len);
3157 }
3158
3159 memcpy(c->wcurr, meta, nmeta)__builtin___memcpy_chk (c->wcurr, meta, nmeta, __builtin_object_size
(c->wcurr, 0))
;
3160 add_iov(c, c->wcurr, nmeta);
3161 c->wcurr += nmeta;
3162 c->wbytes += nmeta;
3163
3164 return ENGINE_SUCCESS;
3165}
3166
3167static ENGINE_ERROR_CODE upr_message_deletion(const void* cookie,
3168 uint32_t opaque,
3169 const void *key,
3170 uint16_t nkey,
3171 uint64_t cas,
3172 uint16_t vbucket,
3173 uint64_t by_seqno,
3174 uint64_t rev_seqno,
3175 const void *meta,
3176 uint16_t nmeta)
3177{
3178 conn *c = (void*)cookie;
3179 protocol_binary_request_upr_deletion packet;
3180 if (c->wbytes + sizeof(packet.bytes) + nkey + nmeta >= c->wsize) {
3181 return ENGINE_E2BIG;
3182 }
3183
3184 memset(packet.bytes, 0, sizeof(packet))__builtin___memset_chk (packet.bytes, 0, sizeof(packet), __builtin_object_size
(packet.bytes, 0))
;
3185 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3186 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_DELETION;
3187 packet.message.header.request.opaque = opaque;
3188 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3189 packet.message.header.request.cas = htonll(cas);
3190 packet.message.header.request.keylen = htons(nkey)((__uint16_t)(__builtin_constant_p(nkey) ? ((__uint16_t)((((__uint16_t
)(nkey) & 0xff00) >> 8) | (((__uint16_t)(nkey) &
0x00ff) << 8))) : _OSSwapInt16(nkey)))
;
3191 packet.message.header.request.extlen = 18;
3192 packet.message.header.request.bodylen = ntohl(18 + nkey + nmeta)(__builtin_constant_p(18 + nkey + nmeta) ? ((__uint32_t)((((__uint32_t
)(18 + nkey + nmeta) & 0xff000000) >> 24) | (((__uint32_t
)(18 + nkey + nmeta) & 0x00ff0000) >> 8) | (((__uint32_t
)(18 + nkey + nmeta) & 0x0000ff00) << 8) | (((__uint32_t
)(18 + nkey + nmeta) & 0x000000ff) << 24))) : _OSSwapInt32
(18 + nkey + nmeta))
;
3193 packet.message.body.by_seqno = htonll(by_seqno);
3194 packet.message.body.rev_seqno = htonll(rev_seqno);
3195 packet.message.body.nmeta = htons(nmeta)((__uint16_t)(__builtin_constant_p(nmeta) ? ((__uint16_t)((((
__uint16_t)(nmeta) & 0xff00) >> 8) | (((__uint16_t)
(nmeta) & 0x00ff) << 8))) : _OSSwapInt16(nmeta)))
;
3196
3197 add_iov(c, c->wcurr, sizeof(packet.bytes) + nkey + nmeta);
3198 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3199 c->wcurr += sizeof(packet.bytes);
3200 c->wbytes += sizeof(packet.bytes);
3201 memcpy(c->wcurr, key, nkey)__builtin___memcpy_chk (c->wcurr, key, nkey, __builtin_object_size
(c->wcurr, 0))
;
3202 c->wcurr += nkey;
3203 c->wbytes += nkey;
3204 memcpy(c->wcurr, meta, nmeta)__builtin___memcpy_chk (c->wcurr, meta, nmeta, __builtin_object_size
(c->wcurr, 0))
;
3205 c->wcurr += nmeta;
3206 c->wbytes += nmeta;
3207
3208 return ENGINE_SUCCESS;
3209}
3210
3211static ENGINE_ERROR_CODE upr_message_expiration(const void* cookie,
3212 uint32_t opaque,
3213 const void *key,
3214 uint16_t nkey,
3215 uint64_t cas,
3216 uint16_t vbucket,
3217 uint64_t by_seqno,
3218 uint64_t rev_seqno,
3219 const void *meta,
3220 uint16_t nmeta)
3221{
3222 conn *c = (void*)cookie;
3223 protocol_binary_request_upr_deletion packet;
3224
3225 if (c->wbytes + sizeof(packet.bytes) + nkey + nmeta >= c->wsize) {
3226 return ENGINE_E2BIG;
3227 }
3228
3229 memset(packet.bytes, 0, sizeof(packet))__builtin___memset_chk (packet.bytes, 0, sizeof(packet), __builtin_object_size
(packet.bytes, 0))
;
3230 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3231 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_EXPIRATION;
3232 packet.message.header.request.opaque = opaque;
3233 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3234 packet.message.header.request.cas = htonll(cas);
3235 packet.message.header.request.keylen = htons(nkey)((__uint16_t)(__builtin_constant_p(nkey) ? ((__uint16_t)((((__uint16_t
)(nkey) & 0xff00) >> 8) | (((__uint16_t)(nkey) &
0x00ff) << 8))) : _OSSwapInt16(nkey)))
;
3236 packet.message.header.request.extlen = 18;
3237 packet.message.header.request.bodylen = ntohl(18 + nkey + nmeta)(__builtin_constant_p(18 + nkey + nmeta) ? ((__uint32_t)((((__uint32_t
)(18 + nkey + nmeta) & 0xff000000) >> 24) | (((__uint32_t
)(18 + nkey + nmeta) & 0x00ff0000) >> 8) | (((__uint32_t
)(18 + nkey + nmeta) & 0x0000ff00) << 8) | (((__uint32_t
)(18 + nkey + nmeta) & 0x000000ff) << 24))) : _OSSwapInt32
(18 + nkey + nmeta))
;
3238 packet.message.body.by_seqno = htonll(by_seqno);
3239 packet.message.body.rev_seqno = htonll(rev_seqno);
3240 packet.message.body.nmeta = htons(nmeta)((__uint16_t)(__builtin_constant_p(nmeta) ? ((__uint16_t)((((
__uint16_t)(nmeta) & 0xff00) >> 8) | (((__uint16_t)
(nmeta) & 0x00ff) << 8))) : _OSSwapInt16(nmeta)))
;
3241
3242 add_iov(c, c->wcurr, sizeof(packet.bytes) + nkey + nmeta);
3243 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3244 c->wcurr += sizeof(packet.bytes);
3245 c->wbytes += sizeof(packet.bytes);
3246 memcpy(c->wcurr, key, nkey)__builtin___memcpy_chk (c->wcurr, key, nkey, __builtin_object_size
(c->wcurr, 0))
;
3247 c->wcurr += nkey;
3248 c->wbytes += nkey;
3249 memcpy(c->wcurr, meta, nmeta)__builtin___memcpy_chk (c->wcurr, meta, nmeta, __builtin_object_size
(c->wcurr, 0))
;
3250 c->wcurr += nmeta;
3251 c->wbytes += nmeta;
3252
3253 return ENGINE_SUCCESS;
3254}
3255
3256static ENGINE_ERROR_CODE upr_message_flush(const void* cookie,
3257 uint32_t opaque,
3258 uint16_t vbucket)
3259{
3260 protocol_binary_request_upr_flush packet;
3261 conn *c = (void*)cookie;
3262
3263 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3264 /* We don't have room in the buffer */
3265 return ENGINE_E2BIG;
3266 }
3267
3268 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3269 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3270 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_FLUSH;
3271 packet.message.header.request.opaque = opaque;
3272 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3273
3274 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3275 add_iov(c, c->wcurr, sizeof(packet.bytes));
3276 c->wcurr += sizeof(packet.bytes);
3277 c->wbytes += sizeof(packet.bytes);
3278
3279 return ENGINE_SUCCESS;
3280}
3281
3282static ENGINE_ERROR_CODE upr_message_set_vbucket_state(const void* cookie,
3283 uint32_t opaque,
3284 uint16_t vbucket,
3285 vbucket_state_t state)
3286{
3287 protocol_binary_request_upr_set_vbucket_state packet;
3288 conn *c = (void*)cookie;
3289
3290 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3291 /* We don't have room in the buffer */
3292 return ENGINE_E2BIG;
3293 }
3294
3295 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3296 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3297 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE;
3298 packet.message.header.request.extlen = 1;
3299 packet.message.header.request.bodylen = htonl(1)(__builtin_constant_p(1) ? ((__uint32_t)((((__uint32_t)(1) &
0xff000000) >> 24) | (((__uint32_t)(1) & 0x00ff0000
) >> 8) | (((__uint32_t)(1) & 0x0000ff00) << 8
) | (((__uint32_t)(1) & 0x000000ff) << 24))) : _OSSwapInt32
(1))
;
3300 packet.message.header.request.opaque = opaque;
3301 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3302
3303 switch (state) {
3304 case vbucket_state_active:
3305 packet.message.body.state = 0x01;
3306 break;
3307 case vbucket_state_pending:
3308 packet.message.body.state = 0x02;
3309 break;
3310 case vbucket_state_replica:
3311 packet.message.body.state = 0x03;
3312 break;
3313 case vbucket_state_dead:
3314 packet.message.body.state = 0x04;
3315 break;
3316 default:
3317 return ENGINE_EINVAL;
3318 }
3319
3320 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3321 add_iov(c, c->wcurr, sizeof(packet.bytes));
3322 c->wcurr += sizeof(packet.bytes);
3323 c->wbytes += sizeof(packet.bytes);
3324
3325 return ENGINE_SUCCESS;
3326}
3327
3328static ENGINE_ERROR_CODE upr_message_noop(const void* cookie,
3329 uint32_t opaque)
3330{
3331 protocol_binary_request_upr_noop packet;
3332 conn *c = (void*)cookie;
3333
3334 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3335 /* We don't have room in the buffer */
3336 return ENGINE_E2BIG;
3337 }
3338
3339 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3340 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3341 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_NOOP;
3342 packet.message.header.request.opaque = opaque;
3343
3344 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3345 add_iov(c, c->wcurr, sizeof(packet.bytes));
3346 c->wcurr += sizeof(packet.bytes);
3347 c->wbytes += sizeof(packet.bytes);
3348
3349 return ENGINE_SUCCESS;
3350}
3351
3352static ENGINE_ERROR_CODE upr_message_buffer_acknowledgement(const void* cookie,
3353 uint32_t opaque,
3354 uint16_t vbucket,
3355 uint32_t buffer_bytes)
3356{
3357 protocol_binary_request_upr_buffer_acknowledgement packet;
3358 conn *c = (void*)cookie;
3359
3360 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3361 /* We don't have room in the buffer */
3362 return ENGINE_E2BIG;
3363 }
3364
3365 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3366 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3367 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_BUFFER_ACKNOWLEDGEMENT;
3368 packet.message.header.request.opaque = opaque;
3369 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3370 packet.message.header.request.bodylen = ntohl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
;
3371 packet.message.body.buffer_bytes = ntohl(buffer_bytes)(__builtin_constant_p(buffer_bytes) ? ((__uint32_t)((((__uint32_t
)(buffer_bytes) & 0xff000000) >> 24) | (((__uint32_t
)(buffer_bytes) & 0x00ff0000) >> 8) | (((__uint32_t
)(buffer_bytes) & 0x0000ff00) << 8) | (((__uint32_t
)(buffer_bytes) & 0x000000ff) << 24))) : _OSSwapInt32
(buffer_bytes))
;
3372
3373 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3374 add_iov(c, c->wcurr, sizeof(packet.bytes));
3375 c->wcurr += sizeof(packet.bytes);
3376 c->wbytes += sizeof(packet.bytes);
3377
3378 return ENGINE_SUCCESS;
3379}
3380
3381static ENGINE_ERROR_CODE upr_message_control(const void* cookie,
3382 uint32_t opaque,
3383 const void *key,
3384 uint16_t nkey,
3385 const void *value,
3386 uint32_t nvalue)
3387{
3388 protocol_binary_request_upr_control packet;
3389 conn *c = (void*)cookie;
3390
3391 if (c->wbytes + sizeof(packet.bytes) + nkey + nvalue >= c->wsize) {
3392 /* We don't have room in the buffer */
3393 return ENGINE_E2BIG;
3394 }
3395
3396 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3397 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3398 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_CONTROL;
3399 packet.message.header.request.opaque = opaque;
3400 packet.message.header.request.keylen = ntohs(nkey)((__uint16_t)(__builtin_constant_p(nkey) ? ((__uint16_t)((((__uint16_t
)(nkey) & 0xff00) >> 8) | (((__uint16_t)(nkey) &
0x00ff) << 8))) : _OSSwapInt16(nkey)))
;
3401 packet.message.header.request.bodylen = ntohl(nvalue + nkey)(__builtin_constant_p(nvalue + nkey) ? ((__uint32_t)((((__uint32_t
)(nvalue + nkey) & 0xff000000) >> 24) | (((__uint32_t
)(nvalue + nkey) & 0x00ff0000) >> 8) | (((__uint32_t
)(nvalue + nkey) & 0x0000ff00) << 8) | (((__uint32_t
)(nvalue + nkey) & 0x000000ff) << 24))) : _OSSwapInt32
(nvalue + nkey))
;
3402
3403 add_iov(c, c->wcurr, sizeof(packet.bytes) + nkey + nvalue);
3404 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3405 c->wcurr += sizeof(packet.bytes);
3406 c->wbytes += sizeof(packet.bytes);
3407
3408 memcpy(c->wcurr, key, nkey)__builtin___memcpy_chk (c->wcurr, key, nkey, __builtin_object_size
(c->wcurr, 0))
;
3409 c->wcurr += nkey;
3410 c->wbytes += nkey;
3411
3412 memcpy(c->wcurr, value, nvalue)__builtin___memcpy_chk (c->wcurr, value, nvalue, __builtin_object_size
(c->wcurr, 0))
;
3413 c->wcurr += nvalue;
3414 c->wbytes += nvalue;
3415
3416 return ENGINE_SUCCESS;
3417}
3418
3419static void ship_upr_log(conn *c) {
3420 static struct upr_message_producers producers = {
3421 upr_message_get_failover_log,
3422 upr_message_stream_req,
3423 upr_message_add_stream_response,
3424 upr_message_set_vbucket_state_response,
3425 upr_message_stream_end,
3426 upr_message_marker,
3427 upr_message_mutation,
3428 upr_message_deletion,
3429 upr_message_expiration,
3430 upr_message_flush,
3431 upr_message_set_vbucket_state,
3432 upr_message_noop,
3433 upr_message_buffer_acknowledgement,
3434 upr_message_control
3435 };
3436 ENGINE_ERROR_CODE ret;
3437
3438 c->msgcurr = 0;
3439 c->msgused = 0;
3440 c->iovused = 0;
3441 if (add_msghdr(c) != 0) {
3442 if (settings.verbose) {
3443 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
3444 "%d: Failed to create output headers. Shutting down UPR connection\n", c->sfd);
3445 }
3446 conn_set_state(c, conn_closing);
3447 return ;
3448 }
3449
3450 c->wbytes = 0;
3451 c->wcurr = c->wbuf;
3452 c->icurr = c->ilist;
3453
3454 c->ewouldblock = false0;
3455 ret = settings.engine.v1->upr.step(settings.engine.v0, c, &producers);
3456 if (ret == ENGINE_SUCCESS) {
3457 /* the engine don't have more data to send at this moment */
3458 c->ewouldblock = true1;
3459 } else if (ret == ENGINE_WANT_MORE) {
3460 /* The engine got more data it wants to send */
3461 ret = ENGINE_SUCCESS;
3462 }
3463
3464 if (ret == ENGINE_SUCCESS) {
3465 conn_set_state(c, conn_mwrite);
3466 c->write_and_go = conn_ship_log;
3467 } else {
3468 conn_set_state(c, conn_closing);
3469 }
3470}
3471
3472/******************************************************************************
3473 * TAP packet executors *
3474 ******************************************************************************/
3475static void tap_connect_executor(conn *c, void *packet)
3476{
3477 cb_mutex_enter(&tap_stats.mutex);
3478 tap_stats.received.connect++;
3479 cb_mutex_exit(&tap_stats.mutex);
3480 conn_set_state(c, conn_setup_tap_stream);
3481}
3482
3483static void tap_mutation_executor(conn *c, void *packet)
3484{
3485 cb_mutex_enter(&tap_stats.mutex);
3486 tap_stats.received.mutation++;
3487 cb_mutex_exit(&tap_stats.mutex);
3488 process_bin_tap_packet(TAP_MUTATION, c);
3489}
3490
3491static void tap_delete_executor(conn *c, void *packet)
3492{
3493 cb_mutex_enter(&tap_stats.mutex);
3494 tap_stats.received.delete++;
3495 cb_mutex_exit(&tap_stats.mutex);
3496 process_bin_tap_packet(TAP_DELETION, c);
3497}
3498
3499static void tap_flush_executor(conn *c, void *packet)
3500{
3501 cb_mutex_enter(&tap_stats.mutex);
3502 tap_stats.received.flush++;
3503 cb_mutex_exit(&tap_stats.mutex);
3504 process_bin_tap_packet(TAP_FLUSH, c);
3505}
3506
3507static void tap_opaque_executor(conn *c, void *packet)
3508{
3509 cb_mutex_enter(&tap_stats.mutex);
3510 tap_stats.received.opaque++;
3511 cb_mutex_exit(&tap_stats.mutex);
3512 process_bin_tap_packet(TAP_OPAQUE, c);
3513}
3514
3515static void tap_vbucket_set_executor(conn *c, void *packet)
3516{
3517 cb_mutex_enter(&tap_stats.mutex);
3518 tap_stats.received.vbucket_set++;
3519 cb_mutex_exit(&tap_stats.mutex);
3520 process_bin_tap_packet(TAP_VBUCKET_SET, c);
3521}
3522
3523static void tap_checkpoint_start_executor(conn *c, void *packet)
3524{
3525 cb_mutex_enter(&tap_stats.mutex);
3526 tap_stats.received.checkpoint_start++;
3527 cb_mutex_exit(&tap_stats.mutex);
3528 process_bin_tap_packet(TAP_CHECKPOINT_START, c);
3529}
3530
3531static void tap_checkpoint_end_executor(conn *c, void *packet)
3532{
3533 cb_mutex_enter(&tap_stats.mutex);
3534 tap_stats.received.checkpoint_end++;
3535 cb_mutex_exit(&tap_stats.mutex);
3536 process_bin_tap_packet(TAP_CHECKPOINT_END, c);
3537}
3538
3539/*******************************************************************************
3540 * UPR packet validators *
3541 ******************************************************************************/
3542static int upr_open_validator(void *packet)
3543{
3544 protocol_binary_request_upr_open *req = packet;
3545 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3546 req->message.header.request.extlen != 8 ||
3547 req->message.header.request.keylen == 0 ||
3548 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3549 /* INCORRECT FORMAT */
3550 return -1;
3551 }
3552
3553 return 0;
3554}
3555
3556static int upr_add_stream_validator(void *packet)
3557{
3558 protocol_binary_request_upr_add_stream *req = packet;
3559 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3560 req->message.header.request.extlen != 4 ||
3561 req->message.header.request.keylen != 0 ||
3562 req->message.header.request.bodylen != htonl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
||
3563 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3564 /* INCORRECT FORMAT */
3565 return -1;
3566 }
3567
3568 return 0;
3569}
3570
3571static int upr_close_stream_validator(void *packet)
3572{
3573 protocol_binary_request_upr_close_stream *req = packet;
3574 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3575 req->message.header.request.extlen != 0 ||
3576 req->message.header.request.keylen != 0 ||
3577 req->message.header.request.bodylen != 0 ||
3578 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3579 /* INCORRECT FORMAT */
3580 return -1;
3581 }
3582
3583 return 0;
3584}
3585
3586static int upr_get_failover_log_validator(void *packet)
3587{
3588 protocol_binary_request_upr_get_failover_log *req = packet;
3589 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3590 req->message.header.request.extlen != 0 ||
3591 req->message.header.request.keylen != 0 ||
3592 req->message.header.request.bodylen != 0 ||
3593 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3594 return -1;
3595 }
3596
3597 return 0;
3598}
3599
3600static int upr_stream_req_validator(void *packet)
3601{
3602 protocol_binary_request_upr_stream_req *req = packet;
3603 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3604 req->message.header.request.extlen != 5*sizeof(uint64_t) + 2*sizeof(uint32_t) ||
3605 req->message.header.request.keylen != 0 ||
3606 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3607 /* INCORRECT FORMAT */
3608 return -1;
3609 }
3610 return 0;
3611}
3612
3613static int upr_stream_end_validator(void *packet)
3614{
3615 protocol_binary_request_upr_stream_end *req = packet;
3616 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3617 req->message.header.request.extlen != 4 ||
3618 req->message.header.request.keylen != 0 ||
3619 req->message.header.request.bodylen != htonl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
||
3620 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3621 return -1;
3622 }
3623
3624 return 0;
3625}
3626
3627static int upr_snapshot_marker_validator(void *packet)
3628{
3629 protocol_binary_request_upr_snapshot_marker *req = packet;
3630 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3631 req->message.header.request.extlen != 20 ||
3632 req->message.header.request.keylen != 0 ||
3633 req->message.header.request.bodylen != htonl(20)(__builtin_constant_p(20) ? ((__uint32_t)((((__uint32_t)(20) &
0xff000000) >> 24) | (((__uint32_t)(20) & 0x00ff0000
) >> 8) | (((__uint32_t)(20) & 0x0000ff00) <<
8) | (((__uint32_t)(20) & 0x000000ff) << 24))) : _OSSwapInt32
(20))
||
3634 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3635 abort();
3636 return -1;
3637 }
3638
3639 return 0;
3640}
3641
3642static int upr_mutation_validator(void *packet)
3643{
3644 protocol_binary_request_upr_mutation *req = packet;
3645 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3646 req->message.header.request.extlen != (2*sizeof(uint64_t) + 3 * sizeof(uint32_t) + sizeof(uint16_t)) + sizeof(uint8_t) ||
3647 req->message.header.request.keylen == 0 ||
3648 req->message.header.request.bodylen == 0) {
3649 return -1;
3650 }
3651
3652 return 0;
3653}
3654
3655static int upr_deletion_validator(void *packet)
3656{
3657 protocol_binary_request_upr_deletion *req = packet;
3658 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3659 uint32_t bodylen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- klen;
3660 bodylen -= req->message.header.request.extlen;
3661
3662 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3663 req->message.header.request.extlen != (2*sizeof(uint64_t) + sizeof(uint16_t)) ||
3664 req->message.header.request.keylen == 0 ||
3665 bodylen != 0) {
3666 return -1;
3667 }
3668
3669 return 0;
3670}
3671
3672static int upr_expiration_validator(void *packet)
3673{
3674 protocol_binary_request_upr_deletion *req = packet;
3675 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3676 uint32_t bodylen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- klen;
3677 bodylen -= req->message.header.request.extlen;
3678 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3679 req->message.header.request.extlen != (2*sizeof(uint64_t) + sizeof(uint16_t)) ||
3680 req->message.header.request.keylen == 0 ||
3681 bodylen != 0) {
3682 return -1;
3683 }
3684
3685 return 0;
3686}
3687
3688static int upr_flush_validator(void *packet)
3689{
3690 protocol_binary_request_upr_flush *req = packet;
3691 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3692 req->message.header.request.extlen != 0 ||
3693 req->message.header.request.keylen != 0 ||
3694 req->message.header.request.bodylen != 0 ||
3695 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3696 return -1;
3697 }
3698
3699 return 0;
3700}
3701
3702static int upr_set_vbucket_state_validator(void *packet)
3703{
3704 protocol_binary_request_upr_set_vbucket_state *req = packet;
3705 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3706 req->message.header.request.extlen != 1 ||
3707 req->message.header.request.keylen != 0 ||
3708 ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
!= 1 ||
3709 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3710 return -1;
3711 }
3712
3713 if (req->message.body.state < 1 || req->message.body.state > 4) {
3714 return -1;
3715 }
3716
3717 return 0;
3718}
3719
3720static int upr_noop_validator(void *packet)
3721{
3722 protocol_binary_request_upr_noop *req = packet;
3723 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3724 req->message.header.request.extlen != 0 ||
3725 req->message.header.request.keylen != 0 ||
3726 req->message.header.request.bodylen != 0 ||
3727 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3728 return -1;
3729 }
3730
3731 return 0;
3732}
3733
3734static int upr_buffer_acknowledgement_validator(void *packet)
3735{
3736 protocol_binary_request_upr_buffer_acknowledgement *req = packet;
3737 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3738 req->message.header.request.extlen != 0 ||
3739 req->message.header.request.keylen != 0 ||
3740 req->message.header.request.bodylen != ntohl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
||
3741 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3742 return -1;
3743 }
3744
3745 return 0;
3746}
3747
3748static int upr_control_validator(void *packet)
3749{
3750 protocol_binary_request_upr_control *req = packet;
3751 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3752 uint32_t nval = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- nkey;
3753
3754 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3755 req->message.header.request.extlen != 0 || nkey == 0 || nval == 0 ||
3756 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3757 return -1;
3758 }
3759
3760 return 0;
3761}
3762
3763static int isasl_refresh_validator(void *packet)
3764{
3765 protocol_binary_request_no_extras *req = packet;
3766 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3767 req->message.header.request.extlen != 0 ||
3768 req->message.header.request.keylen != 0 ||
3769 req->message.header.request.bodylen != 0 ||
3770 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3771 return -1;
3772 }
3773
3774 return 0;
3775}
3776
3777static int ssl_certs_refresh_validator(void *packet)
3778{
3779 protocol_binary_request_no_extras *req = packet;
3780 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3781 req->message.header.request.extlen != 0 ||
3782 req->message.header.request.keylen != 0 ||
3783 req->message.header.request.bodylen != 0 ||
3784 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3785 return -1;
3786 }
3787
3788 return 0;
3789}
3790
3791static int verbosity_validator(void *packet)
3792{
3793 protocol_binary_request_no_extras *req = packet;
3794 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3795 req->message.header.request.extlen != 4 ||
3796 req->message.header.request.keylen != 0 ||
3797 ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
!= 4 ||
3798 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3799 return -1;
3800 }
3801
3802 return 0;
3803}
3804
3805static int hello_validator(void *packet)
3806{
3807 protocol_binary_request_no_extras *req = packet;
3808 uint32_t len = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3809 len -= ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3810
3811 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3812 req->message.header.request.extlen != 0 || (len % 2) != 0 ||
3813 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3814 return -1;
3815 }
3816
3817 return 0;
3818}
3819
3820static int version_validator(void *packet)
3821{
3822 protocol_binary_request_no_extras *req = packet;
3823
3824 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3825 req->message.header.request.extlen != 0 ||
3826 req->message.header.request.keylen != 0 ||
3827 req->message.header.request.bodylen != 0 ||
3828 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3829 return -1;
3830 }
3831
3832 return 0;
3833}
3834
3835static int quit_validator(void *packet)
3836{
3837 protocol_binary_request_no_extras *req = packet;
3838
3839 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3840 req->message.header.request.extlen != 0 ||
3841 req->message.header.request.keylen != 0 ||
3842 req->message.header.request.bodylen != 0 ||
3843 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3844 return -1;
3845 }
3846
3847 return 0;
3848}
3849
3850static int sasl_list_mech_validator(void *packet)
3851{
3852 protocol_binary_request_no_extras *req = packet;
3853
3854 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3855 req->message.header.request.extlen != 0 ||
3856 req->message.header.request.keylen != 0 ||
3857 req->message.header.request.bodylen != 0 ||
3858 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3859 return -1;
3860 }
3861
3862 return 0;
3863}
3864
3865static int noop_validator(void *packet)
3866{
3867 protocol_binary_request_no_extras *req = packet;
3868
3869 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3870 req->message.header.request.extlen != 0 ||
3871 req->message.header.request.keylen != 0 ||
3872 req->message.header.request.bodylen != 0 ||
3873 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3874 return -1;
3875 }
3876
3877 return 0;
3878}
3879
3880static int flush_validator(void *packet)
3881{
3882 protocol_binary_request_no_extras *req = packet;
3883 uint8_t extlen = req->message.header.request.extlen;
3884 uint32_t bodylen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3885
3886 if (extlen != 0 && extlen != 4) {
3887 return -1;
3888 }
3889
3890 if (bodylen != extlen) {
3891 return -1;
3892 }
3893
3894 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3895 req->message.header.request.keylen != 0 ||
3896 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3897 return -1;
3898 }
3899
3900 return 0;
3901}
3902
3903static int get_validator(void *packet)
3904{
3905 protocol_binary_request_no_extras *req = packet;
3906 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3907 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3908
3909 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3910 req->message.header.request.extlen != 0 ||
3911 klen == 0 || klen != blen ||
3912 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3913 return -1;
3914 }
3915
3916 return 0;
3917}
3918
3919static int delete_validator(void *packet)
3920{
3921 protocol_binary_request_no_extras *req = packet;
3922 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3923 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3924
3925 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3926 req->message.header.request.extlen != 0 ||
3927 klen == 0 || klen != blen ||
3928 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3929 return -1;
3930 }
3931
3932 return 0;
3933}
3934
3935static int stat_validator(void *packet)
3936{
3937 protocol_binary_request_no_extras *req = packet;
3938 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3939 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3940
3941 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3942 req->message.header.request.extlen != 0 || klen != blen ||
3943 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3944 return -1;
3945 }
3946
3947 return 0;
3948}
3949
3950static int arithmetic_validator(void *packet)
3951{
3952 protocol_binary_request_no_extras *req = packet;
3953 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3954 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3955 uint8_t extlen = req->message.header.request.extlen;
3956
3957 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3958 extlen != 20 || klen == 0 || (klen + extlen) != blen ||
3959 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3960 return -1;
3961 }
3962
3963 return 0;
3964}
3965
3966static int get_cmd_timer_validator(void *packet)
3967{
3968 protocol_binary_request_no_extras *req = packet;
3969 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3970 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3971 uint8_t extlen = req->message.header.request.extlen;
3972
3973 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3974 extlen != 1 || klen != 0 || (klen + extlen) != blen ||
3975 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3976 return -1;
3977 }
3978
3979 return 0;
3980}
3981
3982static int set_ctrl_token_validator(void *packet)
3983{
3984 protocol_binary_request_no_extras *req = packet;
3985 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3986 req->message.header.request.extlen != sizeof(uint64_t) ||
3987 req->message.header.request.keylen != 0 ||
3988 ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
!= sizeof(uint64_t) ||
3989 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3990 return -1;
3991 }
3992
3993 return 0;
3994}
3995
3996static int get_ctrl_token_validator(void *packet)
3997{
3998 protocol_binary_request_no_extras *req = packet;
3999 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
4000 req->message.header.request.extlen != 0 ||
4001 req->message.header.request.keylen != 0 ||
4002 req->message.header.request.bodylen != 0 ||
4003 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
4004 return -1;
4005 }
4006
4007 return 0;
4008}
4009
4010/*******************************************************************************
4011 * UPR packet executors *
4012 ******************************************************************************/
4013static void upr_open_executor(conn *c, void *packet)
4014{
4015 protocol_binary_request_upr_open *req = (void*)packet;
4016
4017 if (settings.engine.v1->upr.open == NULL((void*)0)) {
4018 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4019 } else {
4020 ENGINE_ERROR_CODE ret = c->aiostat;
4021 c->aiostat = ENGINE_SUCCESS;
4022 c->ewouldblock = false0;
4023 c->supports_datatype = true1;
4024
4025 if (ret == ENGINE_SUCCESS) {
4026 ret = settings.engine.v1->upr.open(settings.engine.v0, c,
4027 req->message.header.request.opaque,
4028 ntohl(req->message.body.seqno)(__builtin_constant_p(req->message.body.seqno) ? ((__uint32_t
)((((__uint32_t)(req->message.body.seqno) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.seqno) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.seqno) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.seqno) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.seqno))
,
4029 ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
,
4030 (void*)(req->bytes + sizeof(req->bytes)),
4031 ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
);
4032 }
4033
4034 switch (ret) {
4035 case ENGINE_SUCCESS:
4036 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4037 break;
4038
4039 case ENGINE_DISCONNECT:
4040 conn_set_state(c, conn_closing);
4041 break;
4042
4043 case ENGINE_EWOULDBLOCK:
4044 c->ewouldblock = true1;
4045 break;
4046
4047 default:
4048 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4049 }
4050 }
4051}
4052
4053static void upr_add_stream_executor(conn *c, void *packet)
4054{
4055 protocol_binary_request_upr_add_stream *req = (void*)packet;
4056
4057 if (settings.engine.v1->upr.add_stream == NULL((void*)0)) {
4058 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4059 } else {
4060 ENGINE_ERROR_CODE ret = c->aiostat;
4061 c->aiostat = ENGINE_SUCCESS;
4062 c->ewouldblock = false0;
4063
4064 if (ret == ENGINE_SUCCESS) {
4065 ret = settings.engine.v1->upr.add_stream(settings.engine.v0, c,
4066 req->message.header.request.opaque,
4067 ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
,
4068 ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
);
4069 }
4070
4071 switch (ret) {
4072 case ENGINE_SUCCESS:
4073 c->upr = 1;
4074 conn_set_state(c, conn_ship_log);
4075 break;
4076 case ENGINE_DISCONNECT:
4077 conn_set_state(c, conn_closing);
4078 break;
4079
4080 case ENGINE_EWOULDBLOCK:
4081 c->ewouldblock = true1;
4082 break;
4083
4084 default:
4085 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4086 }
4087 }
4088}
4089
4090static void upr_close_stream_executor(conn *c, void *packet)
4091{
4092 protocol_binary_request_upr_close_stream *req = (void*)packet;
4093
4094 if (settings.engine.v1->upr.close_stream == NULL((void*)0)) {
4095 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4096 } else {
4097 ENGINE_ERROR_CODE ret = c->aiostat;
4098 c->aiostat = ENGINE_SUCCESS;
4099 c->ewouldblock = false0;
4100
4101 if (ret == ENGINE_SUCCESS) {
4102 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4103 uint32_t opaque = ntohl(req->message.header.request.opaque)(__builtin_constant_p(req->message.header.request.opaque) ?
((__uint32_t)((((__uint32_t)(req->message.header.request.
opaque) & 0xff000000) >> 24) | (((__uint32_t)(req->
message.header.request.opaque) & 0x00ff0000) >> 8) |
(((__uint32_t)(req->message.header.request.opaque) & 0x0000ff00
) << 8) | (((__uint32_t)(req->message.header.request
.opaque) & 0x000000ff) << 24))) : _OSSwapInt32(req->
message.header.request.opaque))
;
4104 ret = settings.engine.v1->upr.close_stream(settings.engine.v0, c,
4105 opaque, vbucket);
4106 }
4107
4108 switch (ret) {
4109 case ENGINE_SUCCESS:
4110 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4111 break;
4112
4113 case ENGINE_DISCONNECT:
4114 conn_set_state(c, conn_closing);
4115 break;
4116
4117 case ENGINE_EWOULDBLOCK:
4118 c->ewouldblock = true1;
4119 break;
4120
4121 default:
4122 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4123 }
4124 }
4125}
4126
4127/** Callback from the engine adding the response */
4128static ENGINE_ERROR_CODE add_failover_log(vbucket_failover_t*entries,
4129 size_t nentries,
4130 const void *cookie)
4131{
4132 ENGINE_ERROR_CODE ret;
4133 size_t ii;
4134 for (ii = 0; ii < nentries; ++ii) {
4135 entries[ii].uuid = htonll(entries[ii].uuid);
4136 entries[ii].seqno = htonll(entries[ii].seqno);
4137 }
4138
4139 if (binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, entries,
4140 (uint32_t)(nentries * sizeof(vbucket_failover_t)), 0,
4141 PROTOCOL_BINARY_RESPONSE_SUCCESS, 0,
4142 (void*)cookie)) {
4143 ret = ENGINE_SUCCESS;
4144 } else {
4145 ret = ENGINE_ENOMEM;
4146 }
4147
4148 for (ii = 0; ii < nentries; ++ii) {
4149 entries[ii].uuid = htonll(entries[ii].uuid);
4150 entries[ii].seqno = htonll(entries[ii].seqno);
4151 }
4152
4153 return ret;
4154}
4155
4156static void upr_get_failover_log_executor(conn *c, void *packet) {
4157 protocol_binary_request_upr_get_failover_log *req = (void*)packet;
4158
4159 if (settings.engine.v1->upr.get_failover_log == NULL((void*)0)) {
4160 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4161 } else {
4162 ENGINE_ERROR_CODE ret = c->aiostat;
4163 c->aiostat = ENGINE_SUCCESS;
4164 c->ewouldblock = false0;
4165
4166 if (ret == ENGINE_SUCCESS) {
4167 ret = settings.engine.v1->upr.get_failover_log(settings.engine.v0, c,
4168 req->message.header.request.opaque,
4169 ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
,
4170 add_failover_log);
4171 }
4172
4173 switch (ret) {
4174 case ENGINE_SUCCESS:
4175 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4176 write_and_free(c, c->dynamic_buffer.buffer,
4177 c->dynamic_buffer.offset);
4178 c->dynamic_buffer.buffer = NULL((void*)0);
4179 } else {
4180 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4181 }
4182 break;
4183
4184 case ENGINE_DISCONNECT:
4185 conn_set_state(c, conn_closing);
4186 break;
4187
4188 case ENGINE_EWOULDBLOCK:
4189 c->ewouldblock = true1;
4190 break;
4191
4192 default:
4193 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4194 }
4195 }
4196}
4197
4198static void upr_stream_req_executor(conn *c, void *packet)
4199{
4200 protocol_binary_request_upr_stream_req *req = (void*)packet;
4201
4202 if (settings.engine.v1->upr.stream_req == NULL((void*)0)) {
4203 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4204 } else {
4205 uint32_t flags = ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
;
4206 uint64_t start_seqno = ntohll(req->message.body.start_seqno);
4207 uint64_t end_seqno = ntohll(req->message.body.end_seqno);
4208 uint64_t vbucket_uuid = ntohll(req->message.body.vbucket_uuid);
4209 uint64_t snap_start_seqno = ntohll(req->message.body.snap_start_seqno);
4210 uint64_t snap_end_seqno = ntohll(req->message.body.snap_end_seqno);
4211 uint64_t rollback_seqno;
4212
4213 ENGINE_ERROR_CODE ret = c->aiostat;
4214 c->aiostat = ENGINE_SUCCESS;
4215 c->ewouldblock = false0;
4216
4217 if (ret == ENGINE_SUCCESS) {
4218 ret = settings.engine.v1->upr.stream_req(settings.engine.v0, c,
4219 flags,
4220 c->binary_header.request.opaque,
4221 c->binary_header.request.vbucket,
4222 start_seqno, end_seqno,
4223 vbucket_uuid,
4224 snap_start_seqno,
4225 snap_end_seqno,
4226 &rollback_seqno,
4227 add_failover_log);
4228 }
4229
4230 switch (ret) {
4231 case ENGINE_SUCCESS:
4232 c->upr = 1;
4233 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4234 write_and_free(c, c->dynamic_buffer.buffer,
4235 c->dynamic_buffer.offset);
4236 c->dynamic_buffer.buffer = NULL((void*)0);
4237 } else {
4238 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4239 }
4240 break;
4241
4242 case ENGINE_ROLLBACK:
4243 rollback_seqno = htonll(rollback_seqno);
4244 if (binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, &rollback_seqno,
4245 sizeof(rollback_seqno), 0,
4246 PROTOCOL_BINARY_RESPONSE_ROLLBACK, 0,
4247 c)) {
4248 write_and_free(c, c->dynamic_buffer.buffer,
4249 c->dynamic_buffer.offset);
4250 c->dynamic_buffer.buffer = NULL((void*)0);
4251 } else {
4252 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
4253 }
4254 break;
4255
4256 case ENGINE_DISCONNECT:
4257 conn_set_state(c, conn_closing);
4258 break;
4259
4260 case ENGINE_EWOULDBLOCK:
4261 c->ewouldblock = true1;
4262 break;
4263
4264 default:
4265 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4266 }
4267 }
4268}
4269
4270static void upr_stream_end_executor(conn *c, void *packet)
4271{
4272 protocol_binary_request_upr_stream_end *req = (void*)packet;
4273
4274 if (settings.engine.v1->upr.stream_end == NULL((void*)0)) {
4275 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4276 } else {
4277 ENGINE_ERROR_CODE ret = c->aiostat;
4278 c->aiostat = ENGINE_SUCCESS;
4279 c->ewouldblock = false0;
4280
4281 if (ret == ENGINE_SUCCESS) {
4282 ret = settings.engine.v1->upr.stream_end(settings.engine.v0, c,
4283 req->message.header.request.opaque,
4284 ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
,
4285 ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
);
4286 }
4287
4288 switch (ret) {
4289 case ENGINE_SUCCESS:
4290 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4291 write_and_free(c, c->dynamic_buffer.buffer,
4292 c->dynamic_buffer.offset);
4293 c->dynamic_buffer.buffer = NULL((void*)0);
4294 } else {
4295 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4296 }
4297 break;
4298
4299 case ENGINE_DISCONNECT:
4300 conn_set_state(c, conn_closing);
4301 break;
4302
4303 case ENGINE_EWOULDBLOCK:
4304 c->ewouldblock = true1;
4305 break;
4306
4307 default:
4308 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4309 }
4310 }
4311}
4312
4313static void upr_snapshot_marker_executor(conn *c, void *packet)
4314{
4315 protocol_binary_request_upr_snapshot_marker *req = (void*)packet;
4316
4317 if (settings.engine.v1->upr.snapshot_marker == NULL((void*)0)) {
4318 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4319 } else {
4320 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4321 uint32_t opaque = req->message.header.request.opaque;
4322 uint32_t flags = ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
;
4323 uint64_t start_seqno = ntohll(req->message.body.start_seqno);
4324 uint64_t end_seqno = ntohll(req->message.body.end_seqno);
4325
4326 ENGINE_ERROR_CODE ret = c->aiostat;
4327 c->aiostat = ENGINE_SUCCESS;
4328 c->ewouldblock = false0;
4329
4330 if (ret == ENGINE_SUCCESS) {
4331 ret = settings.engine.v1->upr.snapshot_marker(settings.engine.v0, c,
4332 opaque, vbucket,
4333 start_seqno,
4334 end_seqno, flags);
4335 }
4336
4337 switch (ret) {
4338 case ENGINE_SUCCESS:
4339 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4340 write_and_free(c, c->dynamic_buffer.buffer,
4341 c->dynamic_buffer.offset);
4342 c->dynamic_buffer.buffer = NULL((void*)0);
4343 } else {
4344 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4345 }
4346 break;
4347
4348 case ENGINE_DISCONNECT:
4349 conn_set_state(c, conn_closing);
4350 break;
4351
4352 case ENGINE_EWOULDBLOCK:
4353 c->ewouldblock = true1;
4354 break;
4355
4356 default:
4357 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4358 }
4359 }
4360}
4361
4362static void upr_mutation_executor(conn *c, void *packet)
4363{
4364 protocol_binary_request_upr_mutation *req = (void*)packet;
4365
4366 if (settings.engine.v1->upr.mutation == NULL((void*)0)) {
4367 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4368 } else {
4369 ENGINE_ERROR_CODE ret = c->aiostat;
4370 c->aiostat = ENGINE_SUCCESS;
4371 c->ewouldblock = false0;
4372
4373 if (ret == ENGINE_SUCCESS) {
4374 char *key = (char*)packet + sizeof(req->bytes);
4375 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4376 void *value = key + nkey;
4377 uint64_t cas = ntohll(req->message.header.request.cas);
4378 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4379 uint32_t flags = ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
;
4380 uint8_t datatype = req->message.header.request.datatype;
4381 uint64_t by_seqno = ntohll(req->message.body.by_seqno);
4382 uint64_t rev_seqno = ntohll(req->message.body.rev_seqno);
4383 uint32_t expiration = ntohl(req->message.body.expiration)(__builtin_constant_p(req->message.body.expiration) ? ((__uint32_t
)((((__uint32_t)(req->message.body.expiration) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.expiration
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.expiration) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.expiration) & 0x000000ff) <<
24))) : _OSSwapInt32(req->message.body.expiration))
;
4384 uint32_t lock_time = ntohl(req->message.body.lock_time)(__builtin_constant_p(req->message.body.lock_time) ? ((__uint32_t
)((((__uint32_t)(req->message.body.lock_time) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.lock_time
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.lock_time) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.lock_time) & 0x000000ff) << 24
))) : _OSSwapInt32(req->message.body.lock_time))
;
4385 uint16_t nmeta = ntohs(req->message.body.nmeta)((__uint16_t)(__builtin_constant_p(req->message.body.nmeta
) ? ((__uint16_t)((((__uint16_t)(req->message.body.nmeta) &
0xff00) >> 8) | (((__uint16_t)(req->message.body.nmeta
) & 0x00ff) << 8))) : _OSSwapInt16(req->message.
body.nmeta)))
;
4386 uint32_t nvalue = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- nkey
4387 - req->message.header.request.extlen - nmeta;
4388
4389 ret = settings.engine.v1->upr.mutation(settings.engine.v0, c,
4390 req->message.header.request.opaque,
4391 key, nkey, value, nvalue, cas, vbucket,
4392 flags, datatype, by_seqno, rev_seqno,
4393 expiration, lock_time,
4394 (char*)value + nvalue, nmeta,
4395 req->message.body.nru);
4396 }
4397
4398 switch (ret) {
4399 case ENGINE_SUCCESS:
4400 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4401 write_and_free(c, c->dynamic_buffer.buffer,
4402 c->dynamic_buffer.offset);
4403 c->dynamic_buffer.buffer = NULL((void*)0);
4404 } else {
4405 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4406 }
4407 break;
4408
4409 case ENGINE_DISCONNECT:
4410 conn_set_state(c, conn_closing);
4411 break;
4412
4413 case ENGINE_EWOULDBLOCK:
4414 c->ewouldblock = true1;
4415 break;
4416
4417 default:
4418 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4419 }
4420 }
4421}
4422
4423static void upr_deletion_executor(conn *c, void *packet)
4424{
4425 protocol_binary_request_upr_deletion *req = (void*)packet;
4426
4427 if (settings.engine.v1->upr.deletion == NULL((void*)0)) {
4428 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4429 } else {
4430 ENGINE_ERROR_CODE ret = c->aiostat;
4431 c->aiostat = ENGINE_SUCCESS;
4432 c->ewouldblock = false0;
4433
4434 if (ret == ENGINE_SUCCESS) {
4435 char *key = (char*)packet + sizeof(req->bytes);
4436 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4437 uint64_t cas = ntohll(req->message.header.request.cas);
4438 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4439 uint64_t by_seqno = ntohll(req->message.body.by_seqno);
4440 uint64_t rev_seqno = ntohll(req->message.body.rev_seqno);
4441 uint16_t nmeta = ntohs(req->message.body.nmeta)((__uint16_t)(__builtin_constant_p(req->message.body.nmeta
) ? ((__uint16_t)((((__uint16_t)(req->message.body.nmeta) &
0xff00) >> 8) | (((__uint16_t)(req->message.body.nmeta
) & 0x00ff) << 8))) : _OSSwapInt16(req->message.
body.nmeta)))
;
4442
4443 ret = settings.engine.v1->upr.deletion(settings.engine.v0, c,
4444 req->message.header.request.opaque,
4445 key, nkey, cas, vbucket,
4446 by_seqno, rev_seqno, key + nkey, nmeta);
4447 }
4448
4449 switch (ret) {
4450 case ENGINE_SUCCESS:
4451 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4452 write_and_free(c, c->dynamic_buffer.buffer,
4453 c->dynamic_buffer.offset);
4454 c->dynamic_buffer.buffer = NULL((void*)0);
4455 } else {
4456 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4457 }
4458 break;
4459
4460 case ENGINE_DISCONNECT:
4461 conn_set_state(c, conn_closing);
4462 break;
4463
4464 case ENGINE_EWOULDBLOCK:
4465 c->ewouldblock = true1;
4466 break;
4467
4468 default:
4469 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4470 }
4471 }
4472}
4473
4474static void upr_expiration_executor(conn *c, void *packet)
4475{
4476 protocol_binary_request_upr_expiration *req = (void*)packet;
4477
4478 if (settings.engine.v1->upr.expiration == NULL((void*)0)) {
4479 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4480 } else {
4481 ENGINE_ERROR_CODE ret = c->aiostat;
4482 c->aiostat = ENGINE_SUCCESS;
4483 c->ewouldblock = false0;
4484
4485 if (ret == ENGINE_SUCCESS) {
4486 char *key = (char*)packet + sizeof(req->bytes);
4487 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4488 uint64_t cas = ntohll(req->message.header.request.cas);
4489 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4490 uint64_t by_seqno = ntohll(req->message.body.by_seqno);
4491 uint64_t rev_seqno = ntohll(req->message.body.rev_seqno);
4492 uint16_t nmeta = ntohs(req->message.body.nmeta)((__uint16_t)(__builtin_constant_p(req->message.body.nmeta
) ? ((__uint16_t)((((__uint16_t)(req->message.body.nmeta) &
0xff00) >> 8) | (((__uint16_t)(req->message.body.nmeta
) & 0x00ff) << 8))) : _OSSwapInt16(req->message.
body.nmeta)))
;
4493
4494 ret = settings.engine.v1->upr.expiration(settings.engine.v0, c,
4495 req->message.header.request.opaque,
4496 key, nkey, cas, vbucket,
4497 by_seqno, rev_seqno, key + nkey, nmeta);
4498 }
4499
4500 switch (ret) {
4501 case ENGINE_SUCCESS:
4502 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4503 write_and_free(c, c->dynamic_buffer.buffer,
4504 c->dynamic_buffer.offset);
4505 c->dynamic_buffer.buffer = NULL((void*)0);
4506 } else {
4507 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4508 }
4509 break;
4510
4511 case ENGINE_DISCONNECT:
4512 conn_set_state(c, conn_closing);
4513 break;
4514
4515 case ENGINE_EWOULDBLOCK:
4516 c->ewouldblock = true1;
4517 break;
4518
4519 default:
4520 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4521 }
4522 }
4523}
4524
4525static void upr_flush_executor(conn *c, void *packet)
4526{
4527 protocol_binary_request_upr_flush *req = (void*)packet;
4528
4529 if (settings.engine.v1->upr.flush == NULL((void*)0)) {
4530 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4531 } else {
4532 ENGINE_ERROR_CODE ret = c->aiostat;
4533 c->aiostat = ENGINE_SUCCESS;
4534 c->ewouldblock = false0;
4535
4536 if (ret == ENGINE_SUCCESS) {
4537 ret = settings.engine.v1->upr.flush(settings.engine.v0, c,
4538 req->message.header.request.opaque,
4539 ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
);
4540 }
4541
4542 switch (ret) {
4543 case ENGINE_SUCCESS:
4544 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4545 write_and_free(c, c->dynamic_buffer.buffer,
4546 c->dynamic_buffer.offset);
4547 c->dynamic_buffer.buffer = NULL((void*)0);
4548 } else {
4549 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4550 }
4551 break;
4552
4553 case ENGINE_DISCONNECT:
4554 conn_set_state(c, conn_closing);
4555 break;
4556
4557 case ENGINE_EWOULDBLOCK:
4558 c->ewouldblock = true1;
4559 break;
4560
4561 default:
4562 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4563 }
4564 }
4565}
4566
4567static void upr_set_vbucket_state_executor(conn *c, void *packet)
4568{
4569 protocol_binary_request_upr_set_vbucket_state *req = (void*)packet;
4570
4571 if (settings.engine.v1->upr.set_vbucket_state== NULL((void*)0)) {
4572 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4573 } else {
4574 ENGINE_ERROR_CODE ret = c->aiostat;
4575 c->aiostat = ENGINE_SUCCESS;
4576 c->ewouldblock = false0;
4577
4578 if (ret == ENGINE_SUCCESS) {
4579 vbucket_state_t state = (vbucket_state_t)req->message.body.state;
4580 ret = settings.engine.v1->upr.set_vbucket_state(settings.engine.v0, c,
4581 c->binary_header.request.opaque,
4582 c->binary_header.request.vbucket,
4583 state);
4584 }
4585
4586 switch (ret) {
4587 case ENGINE_SUCCESS:
4588 conn_set_state(c, conn_ship_log);
4589 break;
4590 case ENGINE_DISCONNECT:
4591 conn_set_state(c, conn_closing);
4592 break;
4593
4594 case ENGINE_EWOULDBLOCK:
4595 c->ewouldblock = true1;
4596 break;
4597
4598 default:
4599 conn_set_state(c, conn_closing);
4600 break;
4601 }
4602 }
4603}
4604
4605static void upr_noop_executor(conn *c, void *packet)
4606{
4607 if (settings.engine.v1->upr.noop == NULL((void*)0)) {
4608 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4609 } else {
4610 ENGINE_ERROR_CODE ret = c->aiostat;
4611 c->aiostat = ENGINE_SUCCESS;
4612 c->ewouldblock = false0;
4613
4614 if (ret == ENGINE_SUCCESS) {
4615 ret = settings.engine.v1->upr.noop(settings.engine.v0, c,
4616 c->binary_header.request.opaque);
4617 }
4618
4619 switch (ret) {
4620 case ENGINE_SUCCESS:
4621 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4622 break;
4623
4624 case ENGINE_DISCONNECT:
4625 conn_set_state(c, conn_closing);
4626 break;
4627
4628 case ENGINE_EWOULDBLOCK:
4629 c->ewouldblock = true1;
4630 break;
4631
4632 default:
4633 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4634 }
4635 }
4636}
4637
4638static void upr_buffer_acknowledgement_executor(conn *c, void *packet)
4639{
4640 protocol_binary_request_upr_buffer_acknowledgement *req = (void*)packet;
4641
4642 if (settings.engine.v1->upr.buffer_acknowledgement == NULL((void*)0)) {
4643 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4644 } else {
4645 ENGINE_ERROR_CODE ret = c->aiostat;
4646 c->aiostat = ENGINE_SUCCESS;
4647 c->ewouldblock = false0;
4648
4649 if (ret == ENGINE_SUCCESS) {
4650 uint32_t bbytes;
4651 memcpy(&bbytes, &req->message.body.buffer_bytes, 4)__builtin___memcpy_chk (&bbytes, &req->message.body
.buffer_bytes, 4, __builtin_object_size (&bbytes, 0))
;
4652 ret = settings.engine.v1->upr.buffer_acknowledgement(settings.engine.v0, c,
4653 c->binary_header.request.opaque,
4654 c->binary_header.request.vbucket,
4655 ntohl(bbytes)(__builtin_constant_p(bbytes) ? ((__uint32_t)((((__uint32_t)(
bbytes) & 0xff000000) >> 24) | (((__uint32_t)(bbytes
) & 0x00ff0000) >> 8) | (((__uint32_t)(bbytes) &
0x0000ff00) << 8) | (((__uint32_t)(bbytes) & 0x000000ff
) << 24))) : _OSSwapInt32(bbytes))
);
4656 }
4657
4658 switch (ret) {
4659 case ENGINE_SUCCESS:
4660 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4661 break;
4662
4663 case ENGINE_DISCONNECT:
4664 conn_set_state(c, conn_closing);
4665 break;
4666
4667 case ENGINE_EWOULDBLOCK:
4668 c->ewouldblock = true1;
4669 break;
4670
4671 default:
4672 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4673 }
4674 }
4675}
4676
4677static void upr_control_executor(conn *c, void *packet)
4678{
4679 if (settings.engine.v1->upr.control == NULL((void*)0)) {
4680 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4681 } else {
4682 ENGINE_ERROR_CODE ret = c->aiostat;
4683 c->aiostat = ENGINE_SUCCESS;
4684 c->ewouldblock = false0;
4685
4686 if (ret == ENGINE_SUCCESS) {
4687 protocol_binary_request_upr_control *req = (void*)packet;
4688 const uint8_t *key = req->bytes + sizeof(req->bytes);
4689 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4690 const uint8_t *value = key + nkey;
4691 uint32_t nvalue = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- nkey;
4692 ret = settings.engine.v1->upr.control(settings.engine.v0, c,
4693 c->binary_header.request.opaque,
4694 key, nkey, value, nvalue);
4695 }
4696
4697 switch (ret) {
4698 case ENGINE_SUCCESS:
4699 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4700 break;
4701
4702 case ENGINE_DISCONNECT:
4703 conn_set_state(c, conn_closing);
4704 break;
4705
4706 case ENGINE_EWOULDBLOCK:
4707 c->ewouldblock = true1;
4708 break;
4709
4710 default:
4711 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4712 }
4713 }
4714}
4715
4716static void isasl_refresh_executor(conn *c, void *packet)
4717{
4718 ENGINE_ERROR_CODE ret = c->aiostat;
4719 c->aiostat = ENGINE_SUCCESS;
4720 c->ewouldblock = false0;
4721
4722 if (ret == ENGINE_SUCCESS) {
4723 ret = refresh_cbsasl(c);
4724 }
4725
4726 switch (ret) {
4727 case ENGINE_SUCCESS:
4728 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4729 break;
4730 case ENGINE_EWOULDBLOCK:
4731 c->ewouldblock = true1;
4732 conn_set_state(c, conn_refresh_cbsasl);
4733 break;
4734 case ENGINE_DISCONNECT:
4735 conn_set_state(c, conn_closing);
4736 break;
4737 default:
4738 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4739 }
4740}
4741
4742static void ssl_certs_refresh_executor(conn *c, void *packet)
4743{
4744 ENGINE_ERROR_CODE ret = c->aiostat;
4745 c->aiostat = ENGINE_SUCCESS;
4746 c->ewouldblock = false0;
4747
4748 if (ret == ENGINE_SUCCESS) {
4749 ret = refresh_ssl_certs(c);
4750 }
4751
4752 switch (ret) {
4753 case ENGINE_SUCCESS:
4754 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4755 break;
4756 case ENGINE_EWOULDBLOCK:
4757 c->ewouldblock = true1;
4758 conn_set_state(c, conn_refresh_ssl_certs);
4759 break;
4760 case ENGINE_DISCONNECT:
4761 conn_set_state(c, conn_closing);
4762 break;
4763 default:
4764 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4765 }
4766}
4767
4768static void verbosity_executor(conn *c, void *packet)
4769{
4770 protocol_binary_request_verbosity *req = packet;
4771 uint32_t level = (uint32_t)ntohl(req->message.body.level)(__builtin_constant_p(req->message.body.level) ? ((__uint32_t
)((((__uint32_t)(req->message.body.level) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.level) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.level) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.level) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.level))
;
4772 if (level > MAX_VERBOSITY_LEVEL2) {
4773 level = MAX_VERBOSITY_LEVEL2;
4774 }
4775 settings.verbose = (int)level;
4776 perform_callbacks(ON_LOG_LEVEL, NULL((void*)0), NULL((void*)0));
4777 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4778}
4779
4780static void process_hello_packet_executor(conn *c, void *packet) {
4781 protocol_binary_request_hello *req = packet;
4782 char log_buffer[512];
4783 int offset = snprintf(log_buffer, sizeof(log_buffer), "HELO ")__builtin___snprintf_chk (log_buffer, sizeof(log_buffer), 0, __builtin_object_size
(log_buffer, 2 > 1 ? 1 : 0), "HELO ")
;
4784 char *key = (char*)packet + sizeof(*req);
4785 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4786 uint32_t total = (ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- klen) / 2;
4787 uint32_t ii;
4788 char *curr = key + klen;
4789 uint16_t out[2]; /* We're currently only supporting two features */
4790 int jj = 0;
4791#if 0
4792 int added_tls = 0;
4793#endif
4794 memset((char*)out, 0, sizeof(out))__builtin___memset_chk ((char*)out, 0, sizeof(out), __builtin_object_size
((char*)out, 0))
;
4795
4796 /*
4797 * Disable all features the hello packet may enable, so that
4798 * the client can toggle features on/off during a connection
4799 */
4800 c->supports_datatype = false0;
4801
4802 if (klen) {
4803 if (klen > 256) {
4804 klen = 256;
4805 }
4806 log_buffer[offset++] = '[';
4807 memcpy(log_buffer + offset, key, klen)__builtin___memcpy_chk (log_buffer + offset, key, klen, __builtin_object_size
(log_buffer + offset, 0))
;
4808 offset += klen;
4809 log_buffer[offset++] = ']';
4810 log_buffer[offset++] = ' ';
4811 }
4812
4813 for (ii = 0; ii < total; ++ii) {
4814 uint16_t in;
4815 /* to avoid alignment */
4816 memcpy(&in, curr, 2)__builtin___memcpy_chk (&in, curr, 2, __builtin_object_size
(&in, 0))
;
4817 curr += 2;
4818 switch (ntohs(in)((__uint16_t)(__builtin_constant_p(in) ? ((__uint16_t)((((__uint16_t
)(in) & 0xff00) >> 8) | (((__uint16_t)(in) & 0x00ff
) << 8))) : _OSSwapInt16(in)))
) {
4819 case PROTOCOL_BINARY_FEATURE_TLS:
4820#if 0
4821 /* Not implemented */
4822 if (added_tls == 0) {
4823 out[jj++] = htons(PROTOCOL_BINARY_FEATURE_TLS)((__uint16_t)(__builtin_constant_p(PROTOCOL_BINARY_FEATURE_TLS
) ? ((__uint16_t)((((__uint16_t)(PROTOCOL_BINARY_FEATURE_TLS)
& 0xff00) >> 8) | (((__uint16_t)(PROTOCOL_BINARY_FEATURE_TLS
) & 0x00ff) << 8))) : _OSSwapInt16(PROTOCOL_BINARY_FEATURE_TLS
)))
;
4824 added_sls++;
4825 }
4826 break;
4827#endif
4828 case PROTOCOL_BINARY_FEATURE_DATATYPE:
4829 if (!c->supports_datatype) {
4830 offset += snprintf(log_buffer + offset,__builtin___snprintf_chk (log_buffer + offset, sizeof(log_buffer
) - offset, 0, __builtin_object_size (log_buffer + offset, 2 >
1 ? 1 : 0), "datatype ")
4831 sizeof(log_buffer) - offset,__builtin___snprintf_chk (log_buffer + offset, sizeof(log_buffer
) - offset, 0, __builtin_object_size (log_buffer + offset, 2 >
1 ? 1 : 0), "datatype ")
4832 "datatype ")__builtin___snprintf_chk (log_buffer + offset, sizeof(log_buffer
) - offset, 0, __builtin_object_size (log_buffer + offset, 2 >
1 ? 1 : 0), "datatype ")
;
4833 out[jj++] = htons(PROTOCOL_BINARY_FEATURE_DATATYPE)((__uint16_t)(__builtin_constant_p(PROTOCOL_BINARY_FEATURE_DATATYPE
) ? ((__uint16_t)((((__uint16_t)(PROTOCOL_BINARY_FEATURE_DATATYPE
) & 0xff00) >> 8) | (((__uint16_t)(PROTOCOL_BINARY_FEATURE_DATATYPE
) & 0x00ff) << 8))) : _OSSwapInt16(PROTOCOL_BINARY_FEATURE_DATATYPE
)))
;
4834 c->supports_datatype = true1;
4835 }
4836 break;
4837 }
4838 }
4839
4840 if (jj == 0) {
4841 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4842 } else {
4843 binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, out, 2 * jj,
4844 PROTOCOL_BINARY_RAW_BYTES,
4845 PROTOCOL_BINARY_RESPONSE_SUCCESS,
4846 0, c);
4847 write_and_free(c, c->dynamic_buffer.buffer,
4848 c->dynamic_buffer.offset);
4849 c->dynamic_buffer.buffer = NULL((void*)0);
4850 }
4851
4852 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
4853 "%d: %s", c->sfd, log_buffer);
4854}
4855
4856static void version_executor(conn *c, void *packet)
4857{
4858 write_bin_response(c, get_server_version(), 0, 0,
4859 (uint32_t)strlen(get_server_version()));
4860}
4861
4862static void quit_executor(conn *c, void *packet)
4863{
4864 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4865 c->write_and_go = conn_closing;
4866}
4867
4868static void quitq_executor(conn *c, void *packet)
4869{
4870 conn_set_state(c, conn_closing);
4871}
4872
4873static void sasl_list_mech_executor(conn *c, void *packet)
4874{
4875 const char *result_string = NULL((void*)0);
4876 unsigned int string_length = 0;
4877
4878 if (cbsasl_list_mechs(&result_string, &string_length) != SASL_OK) {
4879 /* Perhaps there's a better error for this... */
4880 if (settings.verbose) {
4881 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
4882 "%d: Failed to list SASL mechanisms.\n",
4883 c->sfd);
4884 }
4885 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0);
4886 return;
4887 }
4888 write_bin_response(c, (char*)result_string, 0, 0, string_length);
4889}
4890
4891static void noop_executor(conn *c, void *packet)
4892{
4893 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4894}
4895
4896static void flush_executor(conn *c, void *packet)
4897{
4898 ENGINE_ERROR_CODE ret;
4899 time_t exptime = 0;
4900 protocol_binary_request_flush* req = packet;
4901
4902 if (c->cmd == PROTOCOL_BINARY_CMD_FLUSHQ) {
4903 c->noreply = true1;
4904 }
4905
4906 if (c->binary_header.request.extlen == sizeof(req->message.body)) {
4907 exptime = ntohl(req->message.body.expiration)(__builtin_constant_p(req->message.body.expiration) ? ((__uint32_t
)((((__uint32_t)(req->message.body.expiration) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.expiration
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.expiration) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.expiration) & 0x000000ff) <<
24))) : _OSSwapInt32(req->message.body.expiration))
;
4908 }
4909
4910 if (settings.verbose > 1) {
4911 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
4912 "%d: flush %ld", c->sfd,
4913 (long)exptime);
4914 }
4915
4916 ret = settings.engine.v1->flush(settings.engine.v0, c, exptime);
4917
4918 if (ret == ENGINE_SUCCESS) {
4919 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4920 } else if (ret == ENGINE_ENOTSUP) {
4921 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4922 } else {
4923 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
4924 }
4925 STATS_NOKEY(c, cmd_flush){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->cmd_flush++; cb_mutex_exit
(&thread_stats->mutex); }
;
4926}
4927
4928static void get_executor(conn *c, void *packet)
4929{
4930 switch (c->cmd) {
4931 case PROTOCOL_BINARY_CMD_GETQ:
4932 c->cmd = PROTOCOL_BINARY_CMD_GET;
4933 c->noreply = true1;
4934 break;
4935 case PROTOCOL_BINARY_CMD_GET:
4936 c->noreply = false0;
4937 break;
4938 case PROTOCOL_BINARY_CMD_GETKQ:
4939 c->cmd = PROTOCOL_BINARY_CMD_GETK;
4940 c->noreply = true1;
4941 break;
4942 case PROTOCOL_BINARY_CMD_GETK:
4943 c->noreply = false0;
4944 break;
4945 default:
4946 abort();
4947 }
4948
4949 process_bin_get(c);
4950}
4951
4952static void process_bin_delete(conn *c);
4953static void delete_executor(conn *c, void *packet)
4954{
4955 if (c->cmd == PROTOCOL_BINARY_CMD_DELETEQ) {
4956 c->noreply = true1;
4957 }
4958
4959 process_bin_delete(c);
4960}
4961
4962static void stat_executor(conn *c, void *packet)
4963{
4964 char *subcommand = binary_get_key(c);
4965 size_t nkey = c->binary_header.request.keylen;
4966 ENGINE_ERROR_CODE ret;
4967
4968 if (settings.verbose > 1) {
4969 char buffer[1024];
4970 if (key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
4971 "STATS", subcommand, nkey) != -1) {
4972 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s\n",
4973 buffer);
4974 }
4975 }
4976
4977 ret = c->aiostat;
4978 c->aiostat = ENGINE_SUCCESS;
4979 c->ewouldblock = false0;
4980
4981 if (ret == ENGINE_SUCCESS) {
4982 if (nkey == 0) {
4983 /* request all statistics */
4984 ret = settings.engine.v1->get_stats(settings.engine.v0, c, NULL((void*)0), 0, append_stats);
4985 if (ret == ENGINE_SUCCESS) {
4986 server_stats(&append_stats, c, false0);
4987 }
4988 } else if (strncmp(subcommand, "reset", 5) == 0) {
4989 stats_reset(c);
4990 settings.engine.v1->reset_stats(settings.engine.v0, c);
4991 } else if (strncmp(subcommand, "settings", 8) == 0) {
4992 process_stat_settings(&append_stats, c);
4993 } else if (strncmp(subcommand, "cachedump", 9) == 0) {
4994 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4995 return;
4996 } else if (strncmp(subcommand, "detail", 6) == 0) {
4997 char *subcmd_pos = subcommand + 6;
4998 if (settings.allow_detailed) {
4999 if (strncmp(subcmd_pos, " dump", 5) == 0) {
5000 int len;
5001 char *dump_buf = stats_prefix_dump(&len);
5002 if (dump_buf == NULL((void*)0) || len <= 0) {
5003 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
5004 return ;
5005 } else {
5006 append_stats("detailed", (uint16_t)strlen("detailed"), dump_buf, len, c);
5007 free(dump_buf);
5008 }
5009 } else if (strncmp(subcmd_pos, " on", 3) == 0) {
5010 settings.detail_enabled = 1;
5011 } else if (strncmp(subcmd_pos, " off", 4) == 0) {
5012 settings.detail_enabled = 0;
5013 } else {
5014 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
5015 return;
5016 }
5017 } else {
5018 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
5019 return;
5020 }
5021 } else if (strncmp(subcommand, "aggregate", 9) == 0) {
5022 server_stats(&append_stats, c, true1);
5023 } else if (strncmp(subcommand, "connections", 11) == 0) {
5024 connection_stats(&append_stats, c);
5025 } else {
5026 ret = settings.engine.v1->get_stats(settings.engine.v0, c,
5027 subcommand, (int)nkey,
5028 append_stats);
5029 }
5030 }
5031
5032 switch (ret) {
5033 case ENGINE_SUCCESS:
5034 append_stats(NULL((void*)0), 0, NULL((void*)0), 0, c);
5035 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
5036 c->dynamic_buffer.buffer = NULL((void*)0);
5037 break;
5038 case ENGINE_ENOMEM:
5039 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
5040 break;
5041 case ENGINE_TMPFAIL:
5042 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ETMPFAIL, 0);
5043 break;
5044 case ENGINE_KEY_ENOENT:
5045 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
5046 break;
5047 case ENGINE_NOT_MY_VBUCKET:
5048 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, 0);
5049 break;
5050 case ENGINE_DISCONNECT:
5051 c->state = conn_closing;
5052 break;
5053 case ENGINE_ENOTSUP:
5054 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
5055 break;
5056 case ENGINE_EWOULDBLOCK:
5057 c->ewouldblock = true1;
5058 break;
5059 default:
5060 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5061 }
5062}
5063
5064static void arithmetic_executor(conn *c, void *packet)
5065{
5066 protocol_binary_response_incr* rsp = (protocol_binary_response_incr*)c->wbuf;
5067 protocol_binary_request_incr* req = binary_get_request(c);
5068 ENGINE_ERROR_CODE ret;
5069 uint64_t delta;
5070 uint64_t initial;
5071 rel_time_t expiration;
5072 char *key;
5073 size_t nkey;
5074 bool_Bool incr;
5075
5076 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5076, "c != ((void*)0)") : (void)0)
;
5077 cb_assert(c->wsize >= sizeof(*rsp))(__builtin_expect(!(c->wsize >= sizeof(*rsp)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5077, "c->wsize >= sizeof(*rsp)") : (void)0)
;
5078
5079
5080 switch (c->cmd) {
5081 case PROTOCOL_BINARY_CMD_INCREMENTQ:
5082 c->cmd = PROTOCOL_BINARY_CMD_INCREMENT;
5083 c->noreply = true1;
5084 break;
5085 case PROTOCOL_BINARY_CMD_INCREMENT:
5086 c->noreply = false0;
5087 break;
5088 case PROTOCOL_BINARY_CMD_DECREMENTQ:
5089 c->cmd = PROTOCOL_BINARY_CMD_DECREMENT;
5090 c->noreply = true1;
5091 break;
5092 case PROTOCOL_BINARY_CMD_DECREMENT:
5093 c->noreply = false0;
5094 break;
5095 default:
5096 abort();
5097 }
5098
5099 if (req->message.header.request.cas != 0) {
5100 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5101 return;
5102 }
5103
5104 /* fix byteorder in the request */
5105 delta = ntohll(req->message.body.delta);
5106 initial = ntohll(req->message.body.initial);
5107 expiration = ntohl(req->message.body.expiration)(__builtin_constant_p(req->message.body.expiration) ? ((__uint32_t
)((((__uint32_t)(req->message.body.expiration) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.expiration
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.expiration) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.expiration) & 0x000000ff) <<
24))) : _OSSwapInt32(req->message.body.expiration))
;
5108 key = binary_get_key(c);
5109 nkey = c->binary_header.request.keylen;
5110 incr = (c->cmd == PROTOCOL_BINARY_CMD_INCREMENT ||
5111 c->cmd == PROTOCOL_BINARY_CMD_INCREMENTQ);
5112
5113 if (settings.verbose > 1) {
5114 char buffer[1024];
5115 ssize_t nw;
5116 nw = key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
5117 incr ? "INCR" : "DECR", key, nkey);
5118 if (nw != -1) {
5119 if (snprintf(buffer + nw, sizeof(buffer) - nw,__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " %"
"ll" "u" ", %" "ll" "u" ", %" "ll" "u" "\n", delta, initial,
(uint64_t)expiration)
5120 " %" PRIu64 ", %" PRIu64 ", %" PRIu64 "\n",__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " %"
"ll" "u" ", %" "ll" "u" ", %" "ll" "u" "\n", delta, initial,
(uint64_t)expiration)
5121 delta, initial, (uint64_t)expiration)__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " %"
"ll" "u" ", %" "ll" "u" ", %" "ll" "u" "\n", delta, initial,
(uint64_t)expiration)
!= -1) {
5122 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s",
5123 buffer);
5124 }
5125 }
5126 }
5127
5128 ret = c->aiostat;
5129 c->aiostat = ENGINE_SUCCESS;
5130 if (ret == ENGINE_SUCCESS) {
5131 ret = settings.engine.v1->arithmetic(settings.engine.v0,
5132 c, key, (int)nkey, incr,
5133 req->message.body.expiration != 0xffffffff,
5134 delta, initial, expiration,
5135 &c->cas,
5136 c->binary_header.request.datatype,
5137 &rsp->message.body.value,
5138 c->binary_header.request.vbucket);
5139 }
5140
5141 switch (ret) {
5142 case ENGINE_SUCCESS:
5143 rsp->message.body.value = htonll(rsp->message.body.value);
5144 write_bin_response(c, &rsp->message.body, 0, 0,
5145 sizeof (rsp->message.body.value));
5146 if (incr) {
5147 STATS_INCR(c, incr_hits, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->incr_hits++;;
cb_mutex_exit(&thread_stats->mutex); }
;
5148 } else {
5149 STATS_INCR(c, decr_hits, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->decr_hits++;;
cb_mutex_exit(&thread_stats->mutex); }
;
5150 }
5151 break;
5152 case ENGINE_KEY_EEXISTS:
5153 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, 0);
5154 break;
5155 case ENGINE_KEY_ENOENT:
5156 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
5157 if (c->cmd == PROTOCOL_BINARY_CMD_INCREMENT) {
5158 STATS_INCR(c, incr_misses, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->incr_misses++
;; cb_mutex_exit(&thread_stats->mutex); }
;
5159 } else {
5160 STATS_INCR(c, decr_misses, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->decr_misses++
;; cb_mutex_exit(&thread_stats->mutex); }
;
5161 }
5162 break;
5163 case ENGINE_ENOMEM:
5164 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
5165 break;
5166 case ENGINE_TMPFAIL:
5167 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ETMPFAIL, 0);
5168 break;
5169 case ENGINE_EINVAL:
5170 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL, 0);
5171 break;
5172 case ENGINE_NOT_STORED:
5173 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_STORED, 0);
5174 break;
5175 case ENGINE_DISCONNECT:
5176 c->state = conn_closing;
5177 break;
5178 case ENGINE_ENOTSUP:
5179 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
5180 break;
5181 case ENGINE_NOT_MY_VBUCKET:
5182 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, 0);
5183 break;
5184 case ENGINE_EWOULDBLOCK:
5185 c->ewouldblock = true1;
5186 break;
5187 default:
5188 abort();
5189 }
5190}
5191
5192static void get_cmd_timer_executor(conn *c, void *packet)
5193{
5194 protocol_binary_request_get_cmd_timer *req = packet;
5195
5196 generate_timings(req->message.body.opcode, c);
5197 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
5198 c->dynamic_buffer.buffer = NULL((void*)0);
5199}
5200
5201static void set_ctrl_token_executor(conn *c, void *packet)
5202{
5203 protocol_binary_request_set_ctrl_token *req = packet;
5204
5205 uint64_t old_cas = ntohll(req->message.header.request.cas);
5206
5207 uint16_t ret = PROTOCOL_BINARY_RESPONSE_SUCCESS;
5208 cb_mutex_enter(&(session_cas.mutex));
5209 if (session_cas.ctr > 0) {
5210 ret = PROTOCOL_BINARY_RESPONSE_EBUSY;
5211 } else {
5212 if (old_cas == session_cas.value) {
5213 session_cas.value = ntohll(req->message.body.new_cas);
5214 } else {
5215 ret = PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS;
5216 }
5217 }
5218
5219 binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, NULL((void*)0), 0,
5220 PROTOCOL_BINARY_RAW_BYTES,
5221 ret, session_cas.value, c);
5222 cb_mutex_exit(&(session_cas.mutex));
5223
5224 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
5225 c->dynamic_buffer.buffer = NULL((void*)0);
5226}
5227
5228static void get_ctrl_token_executor(conn *c, void *packet)
5229{
5230 cb_mutex_enter(&(session_cas.mutex));
5231 binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, NULL((void*)0), 0,
5232 PROTOCOL_BINARY_RAW_BYTES,
5233 PROTOCOL_BINARY_RESPONSE_SUCCESS,
5234 session_cas.value, c);
5235 cb_mutex_exit(&(session_cas.mutex));
5236 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
5237 c->dynamic_buffer.buffer = NULL((void*)0);
5238}
5239
5240static void not_supported_executor(conn *c, void *packet)
5241{
5242 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
5243}
5244
5245
5246typedef int (*bin_package_validate)(void *packet);
5247typedef void (*bin_package_execute)(conn *c, void *packet);
5248
5249bin_package_validate validators[0xff];
5250bin_package_execute executors[0xff];
5251
5252static void setup_bin_packet_handlers(void) {
5253 validators[PROTOCOL_BINARY_CMD_UPR_OPEN] = upr_open_validator;
5254 validators[PROTOCOL_BINARY_CMD_UPR_ADD_STREAM] = upr_add_stream_validator;
5255 validators[PROTOCOL_BINARY_CMD_UPR_CLOSE_STREAM] = upr_close_stream_validator;
5256 validators[PROTOCOL_BINARY_CMD_UPR_SNAPSHOT_MARKER] = upr_snapshot_marker_validator;
5257 validators[PROTOCOL_BINARY_CMD_UPR_DELETION] = upr_deletion_validator;
5258 validators[PROTOCOL_BINARY_CMD_UPR_EXPIRATION] = upr_expiration_validator;
5259 validators[PROTOCOL_BINARY_CMD_UPR_FLUSH] = upr_flush_validator;
5260 validators[PROTOCOL_BINARY_CMD_UPR_GET_FAILOVER_LOG] = upr_get_failover_log_validator;
5261 validators[PROTOCOL_BINARY_CMD_UPR_MUTATION] = upr_mutation_validator;
5262 validators[PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE] = upr_set_vbucket_state_validator;
5263 validators[PROTOCOL_BINARY_CMD_UPR_NOOP] = upr_noop_validator;
5264 validators[PROTOCOL_BINARY_CMD_UPR_BUFFER_ACKNOWLEDGEMENT] = upr_buffer_acknowledgement_validator;
5265 validators[PROTOCOL_BINARY_CMD_UPR_CONTROL] = upr_control_validator;
5266 validators[PROTOCOL_BINARY_CMD_UPR_STREAM_END] = upr_stream_end_validator;
5267 validators[PROTOCOL_BINARY_CMD_UPR_STREAM_REQ] = upr_stream_req_validator;
5268 validators[PROTOCOL_BINARY_CMD_ISASL_REFRESH] = isasl_refresh_validator;
5269 validators[PROTOCOL_BINARY_CMD_SSL_CERTS_REFRESH] = ssl_certs_refresh_validator;
5270 validators[PROTOCOL_BINARY_CMD_VERBOSITY] = verbosity_validator;
5271 validators[PROTOCOL_BINARY_CMD_HELLO] = hello_validator;
5272 validators[PROTOCOL_BINARY_CMD_VERSION] = version_validator;
5273 validators[PROTOCOL_BINARY_CMD_QUIT] = quit_validator;
5274 validators[PROTOCOL_BINARY_CMD_QUITQ] = quit_validator;
5275 validators[PROTOCOL_BINARY_CMD_SASL_LIST_MECHS] = sasl_list_mech_validator;
5276 validators[PROTOCOL_BINARY_CMD_NOOP] = noop_validator;
5277 validators[PROTOCOL_BINARY_CMD_FLUSH] = flush_validator;
5278 validators[PROTOCOL_BINARY_CMD_FLUSHQ] = flush_validator;
5279 validators[PROTOCOL_BINARY_CMD_GET] = get_validator;
5280 validators[PROTOCOL_BINARY_CMD_GETQ] = get_validator;
5281 validators[PROTOCOL_BINARY_CMD_GETK] = get_validator;
5282 validators[PROTOCOL_BINARY_CMD_GETKQ] = get_validator;
5283 validators[PROTOCOL_BINARY_CMD_DELETE] = delete_validator;
5284 validators[PROTOCOL_BINARY_CMD_DELETEQ] = delete_validator;
5285 validators[PROTOCOL_BINARY_CMD_STAT] = stat_validator;
5286 validators[PROTOCOL_BINARY_CMD_INCREMENT] = arithmetic_validator;
5287 validators[PROTOCOL_BINARY_CMD_INCREMENTQ] = arithmetic_validator;
5288 validators[PROTOCOL_BINARY_CMD_DECREMENT] = arithmetic_validator;
5289 validators[PROTOCOL_BINARY_CMD_DECREMENTQ] = arithmetic_validator;
5290 validators[PROTOCOL_BINARY_CMD_GET_CMD_TIMER] = get_cmd_timer_validator;
5291 validators[PROTOCOL_BINARY_CMD_SET_CTRL_TOKEN] = set_ctrl_token_validator;
5292 validators[PROTOCOL_BINARY_CMD_GET_CTRL_TOKEN] = get_ctrl_token_validator;
5293
5294 executors[PROTOCOL_BINARY_CMD_UPR_OPEN] = upr_open_executor;
5295 executors[PROTOCOL_BINARY_CMD_UPR_ADD_STREAM] = upr_add_stream_executor;
5296 executors[PROTOCOL_BINARY_CMD_UPR_CLOSE_STREAM] = upr_close_stream_executor;
5297 executors[PROTOCOL_BINARY_CMD_UPR_SNAPSHOT_MARKER] = upr_snapshot_marker_executor;
5298 executors[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END] = tap_checkpoint_end_executor;
5299 executors[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START] = tap_checkpoint_start_executor;
5300 executors[PROTOCOL_BINARY_CMD_TAP_CONNECT] = tap_connect_executor;
5301 executors[PROTOCOL_BINARY_CMD_TAP_DELETE] = tap_delete_executor;
5302 executors[PROTOCOL_BINARY_CMD_TAP_FLUSH] = tap_flush_executor;
5303 executors[PROTOCOL_BINARY_CMD_TAP_MUTATION] = tap_mutation_executor;
5304 executors[PROTOCOL_BINARY_CMD_TAP_OPAQUE] = tap_opaque_executor;
5305 executors[PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET] = tap_vbucket_set_executor;
5306 executors[PROTOCOL_BINARY_CMD_UPR_DELETION] = upr_deletion_executor;
5307 executors[PROTOCOL_BINARY_CMD_UPR_EXPIRATION] = upr_expiration_executor;
5308 executors[PROTOCOL_BINARY_CMD_UPR_FLUSH] = upr_flush_executor;
5309 executors[PROTOCOL_BINARY_CMD_UPR_GET_FAILOVER_LOG] = upr_get_failover_log_executor;
5310 executors[PROTOCOL_BINARY_CMD_UPR_MUTATION] = upr_mutation_executor;
5311 executors[PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE] = upr_set_vbucket_state_executor;
5312 executors[PROTOCOL_BINARY_CMD_UPR_NOOP] = upr_noop_executor;
5313 executors[PROTOCOL_BINARY_CMD_UPR_BUFFER_ACKNOWLEDGEMENT] = upr_buffer_acknowledgement_executor;
5314 executors[PROTOCOL_BINARY_CMD_UPR_CONTROL] = upr_control_executor;
5315 executors[PROTOCOL_BINARY_CMD_UPR_STREAM_END] = upr_stream_end_executor;
5316 executors[PROTOCOL_BINARY_CMD_UPR_STREAM_REQ] = upr_stream_req_executor;
5317 executors[PROTOCOL_BINARY_CMD_ISASL_REFRESH] = isasl_refresh_executor;
5318 executors[PROTOCOL_BINARY_CMD_SSL_CERTS_REFRESH] = ssl_certs_refresh_executor;
5319 executors[PROTOCOL_BINARY_CMD_VERBOSITY] = verbosity_executor;
5320 executors[PROTOCOL_BINARY_CMD_HELLO] = process_hello_packet_executor;
5321 executors[PROTOCOL_BINARY_CMD_VERSION] = version_executor;
5322 executors[PROTOCOL_BINARY_CMD_QUIT] = quit_executor;
5323 executors[PROTOCOL_BINARY_CMD_QUITQ] = quitq_executor;
5324 executors[PROTOCOL_BINARY_CMD_SASL_LIST_MECHS] = sasl_list_mech_executor;
5325 executors[PROTOCOL_BINARY_CMD_NOOP] = noop_executor;
5326 executors[PROTOCOL_BINARY_CMD_FLUSH] = flush_executor;
5327 executors[PROTOCOL_BINARY_CMD_FLUSHQ] = flush_executor;
5328 executors[PROTOCOL_BINARY_CMD_GET] = get_executor;
5329 executors[PROTOCOL_BINARY_CMD_GETQ] = get_executor;
5330 executors[PROTOCOL_BINARY_CMD_GETK] = get_executor;
5331 executors[PROTOCOL_BINARY_CMD_GETKQ] = get_executor;
5332 executors[PROTOCOL_BINARY_CMD_DELETE] = delete_executor;
5333 executors[PROTOCOL_BINARY_CMD_DELETEQ] = delete_executor;
5334 executors[PROTOCOL_BINARY_CMD_STAT] = stat_executor;
5335 executors[PROTOCOL_BINARY_CMD_INCREMENT] = arithmetic_executor;
5336 executors[PROTOCOL_BINARY_CMD_INCREMENTQ] = arithmetic_executor;
5337 executors[PROTOCOL_BINARY_CMD_DECREMENT] = arithmetic_executor;
5338 executors[PROTOCOL_BINARY_CMD_DECREMENTQ] = arithmetic_executor;
5339 executors[PROTOCOL_BINARY_CMD_GET_CMD_TIMER] = get_cmd_timer_executor;
5340 executors[PROTOCOL_BINARY_CMD_SET_CTRL_TOKEN] = set_ctrl_token_executor;
5341 executors[PROTOCOL_BINARY_CMD_GET_CTRL_TOKEN] = get_ctrl_token_executor;
5342}
5343
5344static void setup_not_supported_handlers(void) {
5345 if (settings.engine.v1->get_tap_iterator == NULL((void*)0)) {
5346 executors[PROTOCOL_BINARY_CMD_TAP_CONNECT] = not_supported_executor;
5347 }
5348
5349 if (settings.engine.v1->tap_notify == NULL((void*)0)) {
5350 executors[PROTOCOL_BINARY_CMD_TAP_MUTATION] = not_supported_executor;
5351 executors[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START] = not_supported_executor;
5352 executors[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END] = not_supported_executor;
5353 executors[PROTOCOL_BINARY_CMD_TAP_DELETE] = not_supported_executor;
5354 executors[PROTOCOL_BINARY_CMD_TAP_FLUSH] = not_supported_executor;
5355 executors[PROTOCOL_BINARY_CMD_TAP_OPAQUE] = not_supported_executor;
5356 executors[PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET] = not_supported_executor;
5357 }
5358}
5359
5360static int invalid_datatype(conn *c) {
5361 switch (c->binary_header.request.datatype) {
5362 case PROTOCOL_BINARY_RAW_BYTES:
5363 return 0;
5364
5365 case PROTOCOL_BINARY_DATATYPE_JSON:
5366 case PROTOCOL_BINARY_DATATYPE_COMPRESSED:
5367 case PROTOCOL_BINARY_DATATYPE_COMPRESSED_JSON:
5368 if (c->supports_datatype) {
5369 return 0;
5370 }
5371 /* FALLTHROUGH */
5372 default:
5373 return 1;
5374 }
5375}
5376
5377static void process_bin_packet(conn *c) {
5378
5379 char *packet = (c->rcurr - (c->binary_header.request.bodylen +
5380 sizeof(c->binary_header)));
5381
5382 uint8_t opcode = c->binary_header.request.opcode;
5383
5384 bin_package_validate validator = validators[opcode];
5385 bin_package_execute executor = executors[opcode];
5386
5387 if (validator != NULL((void*)0) && validator(packet) != 0) {
5388 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5389 } else if (executor != NULL((void*)0)) {
5390 executor(c, packet);
5391 } else {
5392 process_bin_unknown_packet(c);
5393 }
5394}
5395
5396static void dispatch_bin_command(conn *c) {
5397 int protocol_error = 0;
5398
5399 int extlen = c->binary_header.request.extlen;
5400 uint16_t keylen = c->binary_header.request.keylen;
5401 uint32_t bodylen = c->binary_header.request.bodylen;
5402
5403 if (settings.require_sasl && !authenticated(c)) {
5404 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0);
5405 c->write_and_go = conn_closing;
5406 return;
5407 }
5408
5409 if (invalid_datatype(c)) {
5410 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5411 c->write_and_go = conn_closing;
5412 return;
5413 }
5414
5415 if (c->start == 0) {
5416 c->start = gethrtime();
5417 }
5418
5419 MEMCACHED_PROCESS_COMMAND_START(c->sfd, c->rcurr, c->rbytes);
5420
5421 /* binprot supports 16bit keys, but internals are still 8bit */
5422 if (keylen > KEY_MAX_LENGTH250) {
5423 handle_binary_protocol_error(c);
5424 return;
5425 }
5426
5427 if (executors[c->cmd] != NULL((void*)0)) {
5428 c->noreply = false0;
5429 bin_read_chunk(c, bin_reading_packet, c->binary_header.request.bodylen);
5430 return;
5431 }
5432
5433 c->noreply = true1;
5434
5435 switch (c->cmd) {
5436 case PROTOCOL_BINARY_CMD_SETQ:
5437 c->cmd = PROTOCOL_BINARY_CMD_SET;
5438 break;
5439 case PROTOCOL_BINARY_CMD_ADDQ:
5440 c->cmd = PROTOCOL_BINARY_CMD_ADD;
5441 break;
5442 case PROTOCOL_BINARY_CMD_REPLACEQ:
5443 c->cmd = PROTOCOL_BINARY_CMD_REPLACE;
5444 break;
5445 case PROTOCOL_BINARY_CMD_APPENDQ:
5446 c->cmd = PROTOCOL_BINARY_CMD_APPEND;
5447 break;
5448 case PROTOCOL_BINARY_CMD_PREPENDQ:
5449 c->cmd = PROTOCOL_BINARY_CMD_PREPEND;
5450 break;
5451 default:
5452 c->noreply = false0;
5453 }
5454
5455 switch (c->cmd) {
5456 case PROTOCOL_BINARY_CMD_SET: /* FALLTHROUGH */
5457 case PROTOCOL_BINARY_CMD_ADD: /* FALLTHROUGH */
5458 case PROTOCOL_BINARY_CMD_REPLACE:
5459 if (extlen == 8 && keylen != 0 && bodylen >= (uint32_t)(keylen + 8)) {
5460 bin_read_key(c, bin_reading_set_header, 8);
5461 } else {
5462 protocol_error = 1;
5463 }
5464
5465 break;
5466 case PROTOCOL_BINARY_CMD_APPEND:
5467 case PROTOCOL_BINARY_CMD_PREPEND:
5468 if (keylen > 0 && extlen == 0) {
5469 bin_read_key(c, bin_reading_set_header, 0);
5470 } else {
5471 protocol_error = 1;
5472 }
5473 break;
5474
5475 case PROTOCOL_BINARY_CMD_SASL_AUTH:
5476 case PROTOCOL_BINARY_CMD_SASL_STEP:
5477 if (extlen == 0 && keylen != 0) {
5478 bin_read_key(c, bin_reading_sasl_auth, 0);
5479 } else {
5480 protocol_error = 1;
5481 }
5482 break;
5483
5484 default:
5485 if (settings.engine.v1->unknown_command == NULL((void*)0)) {
5486 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND,
5487 bodylen);
5488 } else {
5489 bin_read_chunk(c, bin_reading_packet, c->binary_header.request.bodylen);
5490 }
5491 }
5492
5493 if (protocol_error) {
5494 handle_binary_protocol_error(c);
5495 }
5496}
5497
5498static void process_bin_update(conn *c) {
5499 char *key;
5500 uint16_t nkey;
5501 uint32_t vlen;
5502 item *it;
5503 protocol_binary_request_set* req = binary_get_request(c);
5504 ENGINE_ERROR_CODE ret;
5505 item_info_holder info;
5506 rel_time_t expiration;
5507
5508 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5508, "c != ((void*)0)") : (void)0)
;
5509 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
5510 info.info.nvalue = 1;
5511 key = binary_get_key(c);
5512 nkey = c->binary_header.request.keylen;
5513
5514 /* fix byteorder in the request */
5515 req->message.body.flags = req->message.body.flags;
5516 expiration = ntohl(req->message.body.expiration)(__builtin_constant_p(req->message.body.expiration) ? ((__uint32_t
)((((__uint32_t)(req->message.body.expiration) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.expiration
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.expiration) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.expiration) & 0x000000ff) <<
24))) : _OSSwapInt32(req->message.body.expiration))
;
5517
5518 vlen = c->binary_header.request.bodylen - (nkey + c->binary_header.request.extlen);
5519
5520 if (settings.verbose > 1) {
5521 size_t nw;
5522 char buffer[1024];
5523 const char *prefix;
5524 if (c->cmd == PROTOCOL_BINARY_CMD_ADD) {
5525 prefix = "ADD";
5526 } else if (c->cmd == PROTOCOL_BINARY_CMD_SET) {
5527 prefix = "SET";
5528 } else {
5529 prefix = "REPLACE";
5530 }
5531
5532 nw = key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
5533 prefix, key, nkey);
5534
5535 if (nw != -1) {
5536 if (snprintf(buffer + nw, sizeof(buffer) - nw,__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " Value len is %d\n"
, vlen)
5537 " Value len is %d\n", vlen)__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " Value len is %d\n"
, vlen)
) {
5538 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s",
5539 buffer);
5540 }
5541 }
5542 }
5543
5544 if (settings.detail_enabled) {
5545 stats_prefix_record_set(key, nkey);
5546 }
5547
5548 ret = c->aiostat;
5549 c->aiostat = ENGINE_SUCCESS;
5550 c->ewouldblock = false0;
5551
5552 if (ret == ENGINE_SUCCESS) {
5553 ret = settings.engine.v1->allocate(settings.engine.v0, c,
5554 &it, key, nkey,
5555 vlen,
5556 req->message.body.flags,
5557 expiration,
5558 c->binary_header.request.datatype);
5559 if (ret == ENGINE_SUCCESS && !settings.engine.v1->get_item_info(settings.engine.v0,
5560 c, it,
5561 (void*)&info)) {
5562 settings.engine.v1->release(settings.engine.v0, c, it);
5563 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
5564 return;
5565 }
5566 }
5567
5568 switch (ret) {
5569 case ENGINE_SUCCESS:
5570 item_set_cas(c, it, c->binary_header.request.cas);
5571
5572 switch (c->cmd) {
5573 case PROTOCOL_BINARY_CMD_ADD:
5574 c->store_op = OPERATION_ADD;
5575 break;
5576 case PROTOCOL_BINARY_CMD_SET:
5577 if (c->binary_header.request.cas != 0) {
5578 c->store_op = OPERATION_CAS;
5579 } else {
5580 c->store_op = OPERATION_SET;
5581 }
5582 break;
5583 case PROTOCOL_BINARY_CMD_REPLACE:
5584 if (c->binary_header.request.cas != 0) {
5585 c->store_op = OPERATION_CAS;
5586 } else {
5587 c->store_op = OPERATION_REPLACE;
5588 }
5589 break;
5590 default:
5591 cb_assert(0)(__builtin_expect(!(0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5591, "0") : (void)0)
;
5592 }
5593
5594 c->item = it;
5595 c->ritem = info.info.value[0].iov_base;
5596 c->rlbytes = vlen;
5597 conn_set_state(c, conn_nread);
5598 c->substate = bin_read_set_value;
5599 break;
5600 case ENGINE_EWOULDBLOCK:
5601 c->ewouldblock = true1;
5602 break;
5603 case ENGINE_DISCONNECT:
5604 c->state = conn_closing;
5605 break;
5606 default:
5607 if (ret == ENGINE_E2BIG) {
5608 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_E2BIG, vlen);
5609 } else {
5610 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, vlen);
5611 }
5612
5613 /* swallow the data line */
5614 c->write_and_go = conn_swallow;
5615 }
5616}
5617
5618static void process_bin_append_prepend(conn *c) {
5619 ENGINE_ERROR_CODE ret;
5620 char *key;
5621 int nkey;
5622 int vlen;
5623 item *it;
5624 item_info_holder info;
5625 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
5626 info.info.nvalue = 1;
5627
5628 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5628, "c != ((void*)0)") : (void)0)
;
5629
5630 key = binary_get_key(c);
5631 nkey = c->binary_header.request.keylen;
5632 vlen = c->binary_header.request.bodylen - nkey;
5633
5634 if (settings.verbose > 1) {
5635 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
5636 "Value len is %d\n", vlen);
5637 }
5638
5639 if (settings.detail_enabled) {
5640 stats_prefix_record_set(key, nkey);
5641 }
5642
5643 ret = c->aiostat;
5644 c->aiostat = ENGINE_SUCCESS;
5645 c->ewouldblock = false0;
5646
5647 if (ret == ENGINE_SUCCESS) {
5648 ret = settings.engine.v1->allocate(settings.engine.v0, c,
5649 &it, key, nkey,
5650 vlen, 0, 0,
5651 c->binary_header.request.datatype);
5652 if (ret == ENGINE_SUCCESS && !settings.engine.v1->get_item_info(settings.engine.v0,
5653 c, it,
5654 (void*)&info)) {
5655 settings.engine.v1->release(settings.engine.v0, c, it);
5656 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
5657 return;
5658 }
5659 }
5660
5661 switch (ret) {
5662 case ENGINE_SUCCESS:
5663 item_set_cas(c, it, c->binary_header.request.cas);
5664
5665 switch (c->cmd) {
5666 case PROTOCOL_BINARY_CMD_APPEND:
5667 c->store_op = OPERATION_APPEND;
5668 break;
5669 case PROTOCOL_BINARY_CMD_PREPEND:
5670 c->store_op = OPERATION_PREPEND;
5671 break;
5672 default:
5673 cb_assert(0)(__builtin_expect(!(0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5673, "0") : (void)0)
;
5674 }
5675
5676 c->item = it;
5677 c->ritem = info.info.value[0].iov_base;
5678 c->rlbytes = vlen;
5679 conn_set_state(c, conn_nread);
5680 c->substate = bin_read_set_value;
5681 break;
5682 case ENGINE_EWOULDBLOCK:
5683 c->ewouldblock = true1;
5684 break;
5685 case ENGINE_DISCONNECT:
5686 c->state = conn_closing;
5687 break;
5688 default:
5689 if (ret == ENGINE_E2BIG) {
5690 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_E2BIG, vlen);
5691 } else {
5692 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, vlen);
5693 }
5694 /* swallow the data line */
5695 c->write_and_go = conn_swallow;
5696 }
5697}
5698
5699static void process_bin_delete(conn *c) {
5700 ENGINE_ERROR_CODE ret;
5701 protocol_binary_request_delete* req = binary_get_request(c);
5702 char* key = binary_get_key(c);
5703 size_t nkey = c->binary_header.request.keylen;
5704 uint64_t cas = ntohll(req->message.header.request.cas);
5705 item_info_holder info;
5706 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
5707
5708 info.info.nvalue = 1;
5709
5710 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5710, "c != ((void*)0)") : (void)0)
;
5711
5712 if (settings.verbose > 1) {
5713 char buffer[1024];
5714 if (key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
5715 "DELETE", key, nkey) != -1) {
5716 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s\n",
5717 buffer);
5718 }
5719 }
5720
5721 ret = c->aiostat;
5722 c->aiostat = ENGINE_SUCCESS;
5723 c->ewouldblock = false0;
5724
5725 if (ret == ENGINE_SUCCESS) {
5726 if (settings.detail_enabled) {
5727 stats_prefix_record_delete(key, nkey);
5728 }
5729 ret = settings.engine.v1->remove(settings.engine.v0, c, key, nkey,
5730 &cas, c->binary_header.request.vbucket);
5731 }
5732
5733 /* For some reason the SLAB_INCR tries to access this... */
5734 switch (ret) {
5735 case ENGINE_SUCCESS:
5736 c->cas = cas;
5737 write_bin_response(c, NULL((void*)0), 0, 0, 0);
5738 SLAB_INCR(c, delete_hits, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].delete_hits++;; cb_mutex_exit(&thread_stats->
mutex); }
;
5739 break;
5740 case ENGINE_KEY_EEXISTS:
5741 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, 0);
5742 break;
5743 case ENGINE_KEY_ENOENT:
5744 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
5745 STATS_INCR(c, delete_misses, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->delete_misses
++;; cb_mutex_exit(&thread_stats->mutex); }
;
5746 break;
5747 case ENGINE_NOT_MY_VBUCKET:
5748 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, 0);
5749 break;
5750 case ENGINE_TMPFAIL:
5751 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ETMPFAIL, 0);
5752 break;
5753 case ENGINE_EWOULDBLOCK:
5754 c->ewouldblock = true1;
5755 break;
5756 default:
5757 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5758 }
5759}
5760
5761static void complete_nread(conn *c) {
5762 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5762, "c != ((void*)0)") : (void)0)
;
5763 cb_assert(c->cmd >= 0)(__builtin_expect(!(c->cmd >= 0), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5763, "c->cmd >= 0") : (void)0)
;
5764
5765 switch(c->substate) {
5766 case bin_reading_set_header:
5767 if (c->cmd == PROTOCOL_BINARY_CMD_APPEND ||
5768 c->cmd == PROTOCOL_BINARY_CMD_PREPEND) {
5769 process_bin_append_prepend(c);
5770 } else {
5771 process_bin_update(c);
5772 }
5773 break;
5774 case bin_read_set_value:
5775 complete_update_bin(c);
5776 break;
5777 case bin_reading_sasl_auth:
5778 process_bin_sasl_auth(c);
5779 break;
5780 case bin_reading_sasl_auth_data:
5781 process_bin_complete_sasl_auth(c);
5782 break;
5783 case bin_reading_packet:
5784 if (c->binary_header.request.magic == PROTOCOL_BINARY_RES) {
5785 RESPONSE_HANDLER handler;
5786 handler = response_handlers[c->binary_header.request.opcode];
5787 if (handler) {
5788 handler(c);
5789 } else {
5790 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
5791 "%d: ERROR: Unsupported response packet received: %u\n",
5792 c->sfd, (unsigned int)c->binary_header.request.opcode);
5793 conn_set_state(c, conn_closing);
5794 }
5795 } else {
5796 process_bin_packet(c);
5797 }
5798 break;
5799 default:
5800 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
5801 "Not handling substate %d\n", c->substate);
5802 abort();
5803 }
5804}
5805
5806static void reset_cmd_handler(conn *c) {
5807 c->sbytes = 0;
5808 c->cmd = -1;
5809 c->substate = bin_no_state;
5810 if(c->item != NULL((void*)0)) {
5811 settings.engine.v1->release(settings.engine.v0, c, c->item);
5812 c->item = NULL((void*)0);
5813 }
5814 conn_shrink(c);
5815 if (c->rbytes > 0) {
5816 conn_set_state(c, conn_parse_cmd);
5817 } else {
5818 conn_set_state(c, conn_waiting);
5819 }
5820}
5821
5822/* set up a connection to write a buffer then free it, used for stats */
5823static void write_and_free(conn *c, char *buf, size_t bytes) {
5824 if (buf) {
5825 c->write_and_free = buf;
5826 c->wcurr = buf;
5827 c->wbytes = (uint32_t)bytes;
5828 conn_set_state(c, conn_write);
5829 c->write_and_go = conn_new_cmd;
5830 } else {
5831 conn_set_state(c, conn_closing);
5832 }
5833}
5834
5835void append_stat(const char *name, ADD_STAT add_stats, conn *c,
5836 const char *fmt, ...) {
5837 char val_str[STAT_VAL_LEN128];
5838 int vlen;
5839 va_list ap;
5840
5841 cb_assert(name)(__builtin_expect(!(name), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5841, "name") : (void)0)
;
5842 cb_assert(add_stats)(__builtin_expect(!(add_stats), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5842, "add_stats") : (void)0)
;
5843 cb_assert(c)(__builtin_expect(!(c), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5843, "c") : (void)0)
;
5844 cb_assert(fmt)(__builtin_expect(!(fmt), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5844, "fmt") : (void)0)
;
5845
5846 va_start(ap, fmt)__builtin_va_start(ap, fmt);
5847 vlen = vsnprintf(val_str, sizeof(val_str) - 1, fmt, ap)__builtin___vsnprintf_chk (val_str, sizeof(val_str) - 1, 0, __builtin_object_size
(val_str, 2 > 1 ? 1 : 0), fmt, ap)
;
5848 va_end(ap)__builtin_va_end(ap);
5849
5850 add_stats(name, (uint16_t)strlen(name), val_str, vlen, c);
5851}
5852
5853static void aggregate_callback(void *in, void *out) {
5854 threadlocal_stats_aggregate(in, out);
5855}
5856
5857/* return server specific stats only */
5858static void server_stats(ADD_STAT add_stats, conn *c, bool_Bool aggregate) {
5859#ifdef WIN32
5860 long pid = GetCurrentProcessId();
5861#else
5862 struct rusage usage;
5863 long pid = (long)getpid();
5864#endif
5865 struct slab_stats slab_stats;
5866 char stat_key[1024];
5867 int i;
5868 struct tap_stats ts;
5869 rel_time_t now = current_time;
5870
5871 struct thread_stats thread_stats;
5872 threadlocal_stats_clear(&thread_stats);
5873
5874 if (aggregate && settings.engine.v1->aggregate_stats != NULL((void*)0)) {
5875 settings.engine.v1->aggregate_stats(settings.engine.v0,
5876 (const void *)c,
5877 aggregate_callback,
5878 &thread_stats);
5879 } else {
5880 threadlocal_stats_aggregate(get_independent_stats(c),
5881 &thread_stats);
5882 }
5883
5884 slab_stats_aggregate(&thread_stats, &slab_stats);
5885
5886#ifndef WIN32
5887 getrusage(RUSAGE_SELF0, &usage);
5888#endif
5889
5890 STATS_LOCK();
5891
5892 APPEND_STAT("pid", "%lu", pid)append_stat("pid", add_stats, c, "%lu", pid);;
5893 APPEND_STAT("uptime", "%u", now)append_stat("uptime", add_stats, c, "%u", now);;
5894 APPEND_STAT("time", "%ld", now + (long)process_started)append_stat("time", add_stats, c, "%ld", now + (long)process_started
);
;
5895 APPEND_STAT("version", "%s", get_server_version())append_stat("version", add_stats, c, "%s", get_server_version
());
;
5896 APPEND_STAT("memcached_version", "%s", MEMCACHED_VERSION)append_stat("memcached_version", add_stats, c, "%s", "2.0.1-macosx-198-ga92301a"
);
;
5897 APPEND_STAT("libevent", "%s", event_get_version())append_stat("libevent", add_stats, c, "%s", event_get_version
());
;
5898 APPEND_STAT("pointer_size", "%d", (int)(8 * sizeof(void *)))append_stat("pointer_size", add_stats, c, "%d", (int)(8 * sizeof
(void *)));
;
5899
5900#ifndef WIN32
5901 append_stat("rusage_user", add_stats, c, "%ld.%06ld",
5902 (long)usage.ru_utime.tv_sec,
5903 (long)usage.ru_utime.tv_usec);
5904 append_stat("rusage_system", add_stats, c, "%ld.%06ld",
5905 (long)usage.ru_stime.tv_sec,
5906 (long)usage.ru_stime.tv_usec);
5907#endif
5908
5909 APPEND_STAT("daemon_connections", "%u", stats.daemon_conns)append_stat("daemon_connections", add_stats, c, "%u", stats.daemon_conns
);
;
5910 APPEND_STAT("curr_connections", "%u", stats.curr_conns)append_stat("curr_connections", add_stats, c, "%u", stats.curr_conns
);
;
5911 for (i = 0; i < settings.num_interfaces; ++i) {
5912 sprintf(stat_key, "%s", "max_conns_on_port_")__builtin___sprintf_chk (stat_key, 0, __builtin_object_size (
stat_key, 2 > 1 ? 1 : 0), "%s", "max_conns_on_port_")
;
5913 sprintf(stat_key + strlen(stat_key), "%d", stats.listening_ports[i].port)__builtin___sprintf_chk (stat_key + strlen(stat_key), 0, __builtin_object_size
(stat_key + strlen(stat_key), 2 > 1 ? 1 : 0), "%d", stats
.listening_ports[i].port)
;
5914 APPEND_STAT(stat_key, "%d", stats.listening_ports[i].maxconns)append_stat(stat_key, add_stats, c, "%d", stats.listening_ports
[i].maxconns);
;
5915 sprintf(stat_key, "%s", "curr_conns_on_port_")__builtin___sprintf_chk (stat_key, 0, __builtin_object_size (
stat_key, 2 > 1 ? 1 : 0), "%s", "curr_conns_on_port_")
;
5916 sprintf(stat_key + strlen(stat_key), "%d", stats.listening_ports[i].port)__builtin___sprintf_chk (stat_key + strlen(stat_key), 0, __builtin_object_size
(stat_key + strlen(stat_key), 2 > 1 ? 1 : 0), "%d", stats
.listening_ports[i].port)
;
5917 APPEND_STAT(stat_key, "%d", stats.listening_ports[i].curr_conns)append_stat(stat_key, add_stats, c, "%d", stats.listening_ports
[i].curr_conns);
;
5918 }
5919 APPEND_STAT("total_connections", "%u", stats.total_conns)append_stat("total_connections", add_stats, c, "%u", stats.total_conns
);
;
5920 APPEND_STAT("connection_structures", "%u", stats.conn_structs)append_stat("connection_structures", add_stats, c, "%u", stats
.conn_structs);
;
5921 APPEND_STAT("cmd_get", "%"PRIu64, thread_stats.cmd_get)append_stat("cmd_get", add_stats, c, "%""ll" "u", thread_stats
.cmd_get);
;
5922 APPEND_STAT("cmd_set", "%"PRIu64, slab_stats.cmd_set)append_stat("cmd_set", add_stats, c, "%""ll" "u", slab_stats.
cmd_set);
;
5923 APPEND_STAT("cmd_flush", "%"PRIu64, thread_stats.cmd_flush)append_stat("cmd_flush", add_stats, c, "%""ll" "u", thread_stats
.cmd_flush);
;
5924 APPEND_STAT("auth_cmds", "%"PRIu64, thread_stats.auth_cmds)append_stat("auth_cmds", add_stats, c, "%""ll" "u", thread_stats
.auth_cmds);
;
5925 APPEND_STAT("auth_errors", "%"PRIu64, thread_stats.auth_errors)append_stat("auth_errors", add_stats, c, "%""ll" "u", thread_stats
.auth_errors);
;
5926 APPEND_STAT("get_hits", "%"PRIu64, slab_stats.get_hits)append_stat("get_hits", add_stats, c, "%""ll" "u", slab_stats
.get_hits);
;
5927 APPEND_STAT("get_misses", "%"PRIu64, thread_stats.get_misses)append_stat("get_misses", add_stats, c, "%""ll" "u", thread_stats
.get_misses);
;
5928 APPEND_STAT("delete_misses", "%"PRIu64, thread_stats.delete_misses)append_stat("delete_misses", add_stats, c, "%""ll" "u", thread_stats
.delete_misses);
;
5929 APPEND_STAT("delete_hits", "%"PRIu64, slab_stats.delete_hits)append_stat("delete_hits", add_stats, c, "%""ll" "u", slab_stats
.delete_hits);
;
5930 APPEND_STAT("incr_misses", "%"PRIu64, thread_stats.incr_misses)append_stat("incr_misses", add_stats, c, "%""ll" "u", thread_stats
.incr_misses);
;
5931 APPEND_STAT("incr_hits", "%"PRIu64, thread_stats.incr_hits)append_stat("incr_hits", add_stats, c, "%""ll" "u", thread_stats
.incr_hits);
;
5932 APPEND_STAT("decr_misses", "%"PRIu64, thread_stats.decr_misses)append_stat("decr_misses", add_stats, c, "%""ll" "u", thread_stats
.decr_misses);
;
5933 APPEND_STAT("decr_hits", "%"PRIu64, thread_stats.decr_hits)append_stat("decr_hits", add_stats, c, "%""ll" "u", thread_stats
.decr_hits);
;
5934 APPEND_STAT("cas_misses", "%"PRIu64, thread_stats.cas_misses)append_stat("cas_misses", add_stats, c, "%""ll" "u", thread_stats
.cas_misses);
;
5935 APPEND_STAT("cas_hits", "%"PRIu64, slab_stats.cas_hits)append_stat("cas_hits", add_stats, c, "%""ll" "u", slab_stats
.cas_hits);
;
5936 APPEND_STAT("cas_badval", "%"PRIu64, slab_stats.cas_badval)append_stat("cas_badval", add_stats, c, "%""ll" "u", slab_stats
.cas_badval);
;
5937 APPEND_STAT("bytes_read", "%"PRIu64, thread_stats.bytes_read)append_stat("bytes_read", add_stats, c, "%""ll" "u", thread_stats
.bytes_read);
;
5938 APPEND_STAT("bytes_written", "%"PRIu64, thread_stats.bytes_written)append_stat("bytes_written", add_stats, c, "%""ll" "u", thread_stats
.bytes_written);
;
5939 APPEND_STAT("accepting_conns", "%u", is_listen_disabled() ? 0 : 1)append_stat("accepting_conns", add_stats, c, "%u", is_listen_disabled
() ? 0 : 1);
;
5940 APPEND_STAT("listen_disabled_num", "%"PRIu64, get_listen_disabled_num())append_stat("listen_disabled_num", add_stats, c, "%""ll" "u",
get_listen_disabled_num());
;
5941 APPEND_STAT("rejected_conns", "%" PRIu64, (uint64_t)stats.rejected_conns)append_stat("rejected_conns", add_stats, c, "%" "ll" "u", (uint64_t
)stats.rejected_conns);
;
5942 APPEND_STAT("threads", "%d", settings.num_threads)append_stat("threads", add_stats, c, "%d", settings.num_threads
);
;
5943 APPEND_STAT("conn_yields", "%" PRIu64, (uint64_t)thread_stats.conn_yields)append_stat("conn_yields", add_stats, c, "%" "ll" "u", (uint64_t
)thread_stats.conn_yields);
;
5944 STATS_UNLOCK();
5945
5946 /*
5947 * Add tap stats (only if non-zero)
5948 */
5949 cb_mutex_enter(&tap_stats.mutex);
5950 ts = tap_stats;
5951 cb_mutex_exit(&tap_stats.mutex);
5952
5953 if (ts.sent.connect) {
5954 APPEND_STAT("tap_connect_sent", "%"PRIu64, ts.sent.connect)append_stat("tap_connect_sent", add_stats, c, "%""ll" "u", ts
.sent.connect);
;
5955 }
5956 if (ts.sent.mutation) {
5957 APPEND_STAT("tap_mutation_sent", "%"PRIu64, ts.sent.mutation)append_stat("tap_mutation_sent", add_stats, c, "%""ll" "u", ts
.sent.mutation);
;
5958 }
5959 if (ts.sent.checkpoint_start) {
5960 APPEND_STAT("tap_checkpoint_start_sent", "%"PRIu64, ts.sent.checkpoint_start)append_stat("tap_checkpoint_start_sent", add_stats, c, "%""ll"
"u", ts.sent.checkpoint_start);
;
5961 }
5962 if (ts.sent.checkpoint_end) {
5963 APPEND_STAT("tap_checkpoint_end_sent", "%"PRIu64, ts.sent.checkpoint_end)append_stat("tap_checkpoint_end_sent", add_stats, c, "%""ll" "u"
, ts.sent.checkpoint_end);
;
5964 }
5965 if (ts.sent.delete) {
5966 APPEND_STAT("tap_delete_sent", "%"PRIu64, ts.sent.delete)append_stat("tap_delete_sent", add_stats, c, "%""ll" "u", ts.
sent.delete);
;
5967 }
5968 if (ts.sent.flush) {
5969 APPEND_STAT("tap_flush_sent", "%"PRIu64, ts.sent.flush)append_stat("tap_flush_sent", add_stats, c, "%""ll" "u", ts.sent
.flush);
;
5970 }
5971 if (ts.sent.opaque) {
5972 APPEND_STAT("tap_opaque_sent", "%"PRIu64, ts.sent.opaque)append_stat("tap_opaque_sent", add_stats, c, "%""ll" "u", ts.
sent.opaque);
;
5973 }
5974 if (ts.sent.vbucket_set) {
5975 APPEND_STAT("tap_vbucket_set_sent", "%"PRIu64,append_stat("tap_vbucket_set_sent", add_stats, c, "%""ll" "u"
, ts.sent.vbucket_set);
5976 ts.sent.vbucket_set)append_stat("tap_vbucket_set_sent", add_stats, c, "%""ll" "u"
, ts.sent.vbucket_set);
;
5977 }
5978 if (ts.received.connect) {
5979 APPEND_STAT("tap_connect_received", "%"PRIu64, ts.received.connect)append_stat("tap_connect_received", add_stats, c, "%""ll" "u"
, ts.received.connect);
;
5980 }
5981 if (ts.received.mutation) {
5982 APPEND_STAT("tap_mutation_received", "%"PRIu64, ts.received.mutation)append_stat("tap_mutation_received", add_stats, c, "%""ll" "u"
, ts.received.mutation);
;
5983 }
5984 if (ts.received.checkpoint_start) {
5985 APPEND_STAT("tap_checkpoint_start_received", "%"PRIu64, ts.received.checkpoint_start)append_stat("tap_checkpoint_start_received", add_stats, c, "%"
"ll" "u", ts.received.checkpoint_start);
;
5986 }
5987 if (ts.received.checkpoint_end) {
5988 APPEND_STAT("tap_checkpoint_end_received", "%"PRIu64, ts.received.checkpoint_end)append_stat("tap_checkpoint_end_received", add_stats, c, "%""ll"
"u", ts.received.checkpoint_end);
;
5989 }
5990 if (ts.received.delete) {
5991 APPEND_STAT("tap_delete_received", "%"PRIu64, ts.received.delete)append_stat("tap_delete_received", add_stats, c, "%""ll" "u",
ts.received.delete);
;
5992 }
5993 if (ts.received.flush) {
5994 APPEND_STAT("tap_flush_received", "%"PRIu64, ts.received.flush)append_stat("tap_flush_received", add_stats, c, "%""ll" "u", ts
.received.flush);
;
5995 }
5996 if (ts.received.opaque) {
5997 APPEND_STAT("tap_opaque_received", "%"PRIu64, ts.received.opaque)append_stat("tap_opaque_received", add_stats, c, "%""ll" "u",
ts.received.opaque);
;
5998 }
5999 if (ts.received.vbucket_set) {
6000 APPEND_STAT("tap_vbucket_set_received", "%"PRIu64,append_stat("tap_vbucket_set_received", add_stats, c, "%""ll"
"u", ts.received.vbucket_set);
6001 ts.received.vbucket_set)append_stat("tap_vbucket_set_received", add_stats, c, "%""ll"
"u", ts.received.vbucket_set);
;
6002 }
6003}
6004
6005static void process_stat_settings(ADD_STAT add_stats, void *c) {
6006 int ii;
6007 cb_assert(add_stats)(__builtin_expect(!(add_stats), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6007, "add_stats") : (void)0)
;
6008 APPEND_STAT("maxconns", "%d", settings.maxconns)append_stat("maxconns", add_stats, c, "%d", settings.maxconns
);
;
6009
6010 for (ii = 0; ii < settings.num_interfaces; ++ii) {
6011 char interface[1024];
6012 int offset;
6013 if (settings.interfaces[ii].host == NULL((void*)0)) {
6014 offset = sprintf(interface, "interface-*:%u", settings.interfaces[ii].port)__builtin___sprintf_chk (interface, 0, __builtin_object_size (
interface, 2 > 1 ? 1 : 0), "interface-*:%u", settings.interfaces
[ii].port)
;
6015 } else {
6016 offset = snprintf(interface, sizeof(interface), "interface-%s:%u",__builtin___snprintf_chk (interface, sizeof(interface), 0, __builtin_object_size
(interface, 2 > 1 ? 1 : 0), "interface-%s:%u", settings.interfaces
[ii].host, settings.interfaces[ii].port)
6017 settings.interfaces[ii].host,__builtin___snprintf_chk (interface, sizeof(interface), 0, __builtin_object_size
(interface, 2 > 1 ? 1 : 0), "interface-%s:%u", settings.interfaces
[ii].host, settings.interfaces[ii].port)
6018 settings.interfaces[ii].port)__builtin___snprintf_chk (interface, sizeof(interface), 0, __builtin_object_size
(interface, 2 > 1 ? 1 : 0), "interface-%s:%u", settings.interfaces
[ii].host, settings.interfaces[ii].port)
;
6019 }
6020
6021 snprintf(interface + offset, sizeof(interface) - offset, "-maxconn")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-maxconn")
;
6022 APPEND_STAT(interface, "%u", settings.interfaces[ii].maxconn)append_stat(interface, add_stats, c, "%u", settings.interfaces
[ii].maxconn);
;
6023 snprintf(interface + offset, sizeof(interface) - offset, "-backlog")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-backlog")
;
6024 APPEND_STAT(interface, "%u", settings.interfaces[ii].backlog)append_stat(interface, add_stats, c, "%u", settings.interfaces
[ii].backlog);
;
6025 snprintf(interface + offset, sizeof(interface) - offset, "-ipv4")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ipv4")
;
6026 APPEND_STAT(interface, "%s", settings.interfaces[ii].ipv4 ?append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ipv4 ? "true" : "false");
6027 "true" : "false")append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ipv4 ? "true" : "false");
;
6028 snprintf(interface + offset, sizeof(interface) - offset, "-ipv6")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ipv6")
;
6029 APPEND_STAT(interface, "%s", settings.interfaces[ii].ipv6 ?append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ipv6 ? "true" : "false");
6030 "true" : "false")append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ipv6 ? "true" : "false");
;
6031
6032 snprintf(interface + offset, sizeof(interface) - offset,__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-tcp_nodelay")
6033 "-tcp_nodelay")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-tcp_nodelay")
;
6034 APPEND_STAT(interface, "%s", settings.interfaces[ii].tcp_nodelay ?append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].tcp_nodelay ? "true" : "false");
6035 "true" : "false")append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].tcp_nodelay ? "true" : "false");
;
6036
6037 if (settings.interfaces[ii].ssl.key) {
6038 snprintf(interface + offset, sizeof(interface) - offset,__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl-pkey")
6039 "-ssl-pkey")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl-pkey")
;
6040 APPEND_STAT(interface, "%s", settings.interfaces[ii].ssl.key)append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ssl.key);
;
6041 snprintf(interface + offset, sizeof(interface) - offset,__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl-cert")
6042 "-ssl-cert")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl-cert")
;
6043 APPEND_STAT(interface, "%s", settings.interfaces[ii].ssl.cert)append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ssl.cert);
;
6044 } else {
6045 snprintf(interface + offset, sizeof(interface) - offset,__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl")
6046 "-ssl")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl")
;
6047 APPEND_STAT(interface, "%s", "false")append_stat(interface, add_stats, c, "%s", "false");;
6048 }
6049 }
6050
6051 APPEND_STAT("verbosity", "%d", settings.verbose)append_stat("verbosity", add_stats, c, "%d", settings.verbose
);
;
6052 APPEND_STAT("num_threads", "%d", settings.num_threads)append_stat("num_threads", add_stats, c, "%d", settings.num_threads
);
;
6053 APPEND_STAT("stat_key_prefix", "%c", settings.prefix_delimiter)append_stat("stat_key_prefix", add_stats, c, "%c", settings.prefix_delimiter
);
;
6054 APPEND_STAT("detail_enabled", "%s",append_stat("detail_enabled", add_stats, c, "%s", settings.detail_enabled
? "yes" : "no");
6055 settings.detail_enabled ? "yes" : "no")append_stat("detail_enabled", add_stats, c, "%s", settings.detail_enabled
? "yes" : "no");
;
6056 APPEND_STAT("allow_detailed", "%s",append_stat("allow_detailed", add_stats, c, "%s", settings.allow_detailed
? "yes" : "no");
6057 settings.allow_detailed ? "yes" : "no")append_stat("allow_detailed", add_stats, c, "%s", settings.allow_detailed
? "yes" : "no");
;
6058 APPEND_STAT("reqs_per_event", "%d", settings.reqs_per_event)append_stat("reqs_per_event", add_stats, c, "%d", settings.reqs_per_event
);
;
6059 APPEND_STAT("reqs_per_tap_event", "%d", settings.reqs_per_tap_event)append_stat("reqs_per_tap_event", add_stats, c, "%d", settings
.reqs_per_tap_event);
;
6060 APPEND_STAT("auth_enabled_sasl", "%s", "yes")append_stat("auth_enabled_sasl", add_stats, c, "%s", "yes");;
6061
6062 APPEND_STAT("auth_sasl_engine", "%s", "cbsasl")append_stat("auth_sasl_engine", add_stats, c, "%s", "cbsasl")
;
;
6063 APPEND_STAT("auth_required_sasl", "%s", settings.require_sasl ? "yes" : "no")append_stat("auth_required_sasl", add_stats, c, "%s", settings
.require_sasl ? "yes" : "no");
;
6064 {
6065 EXTENSION_DAEMON_DESCRIPTOR *ptr;
6066 for (ptr = settings.extensions.daemons; ptr != NULL((void*)0); ptr = ptr->next) {
6067 APPEND_STAT("extension", "%s", ptr->get_name())append_stat("extension", add_stats, c, "%s", ptr->get_name
());
;
6068 }
6069 }
6070
6071 APPEND_STAT("logger", "%s", settings.extensions.logger->get_name())append_stat("logger", add_stats, c, "%s", settings.extensions
.logger->get_name());
;
6072 {
6073 EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *ptr;
6074 for (ptr = settings.extensions.binary; ptr != NULL((void*)0); ptr = ptr->next) {
6075 APPEND_STAT("binary_extension", "%s", ptr->get_name())append_stat("binary_extension", add_stats, c, "%s", ptr->get_name
());
;
6076 }
6077 }
6078
6079 if (settings.config) {
6080 add_stats("config", (uint16_t)strlen("config"),
6081 settings.config, strlen(settings.config), c);
6082 }
6083}
6084
6085/*
6086 * if we have a complete line in the buffer, process it.
6087 */
6088static int try_read_command(conn *c) {
6089 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6089, "c != ((void*)0)") : (void)0)
;
6090 cb_assert(c->rcurr <= (c->rbuf + c->rsize))(__builtin_expect(!(c->rcurr <= (c->rbuf + c->rsize
)), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6090, "c->rcurr <= (c->rbuf + c->rsize)") : (void
)0)
;
6091 cb_assert(c->rbytes > 0)(__builtin_expect(!(c->rbytes > 0), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6091, "c->rbytes > 0") : (void)0)
;
6092
6093 /* Do we have the complete packet header? */
6094 if (c->rbytes < sizeof(c->binary_header)) {
6095 /* need more data! */
6096 return 0;
6097 } else {
6098#ifdef NEED_ALIGN
6099 if (((long)(c->rcurr)) % 8 != 0) {
6100 /* must realign input buffer */
6101 memmove(c->rbuf, c->rcurr, c->rbytes)__builtin___memmove_chk (c->rbuf, c->rcurr, c->rbytes
, __builtin_object_size (c->rbuf, 0))
;
6102 c->rcurr = c->rbuf;
6103 if (settings.verbose > 1) {
6104 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
6105 "%d: Realign input buffer\n", c->sfd);
6106 }
6107 }
6108#endif
6109 protocol_binary_request_header* req;
6110 req = (protocol_binary_request_header*)c->rcurr;
6111
6112 if (settings.verbose > 1) {
6113 /* Dump the packet before we convert it to host order */
6114 char buffer[1024];
6115 ssize_t nw;
6116 nw = bytes_to_output_string(buffer, sizeof(buffer), c->sfd,
6117 true1, "Read binary protocol data:",
6118 (const char*)req->bytes,
6119 sizeof(req->bytes));
6120 if (nw != -1) {
6121 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
6122 "%s", buffer);
6123 }
6124 }
6125
6126 c->binary_header = *req;
6127 c->binary_header.request.keylen = ntohs(req->request.keylen)((__uint16_t)(__builtin_constant_p(req->request.keylen) ? (
(__uint16_t)((((__uint16_t)(req->request.keylen) & 0xff00
) >> 8) | (((__uint16_t)(req->request.keylen) & 0x00ff
) << 8))) : _OSSwapInt16(req->request.keylen)))
;
6128 c->binary_header.request.bodylen = ntohl(req->request.bodylen)(__builtin_constant_p(req->request.bodylen) ? ((__uint32_t
)((((__uint32_t)(req->request.bodylen) & 0xff000000) >>
24) | (((__uint32_t)(req->request.bodylen) & 0x00ff0000
) >> 8) | (((__uint32_t)(req->request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->request.bodylen
) & 0x000000ff) << 24))) : _OSSwapInt32(req->request
.bodylen))
;
6129 c->binary_header.request.vbucket = ntohs(req->request.vbucket)((__uint16_t)(__builtin_constant_p(req->request.vbucket) ?
((__uint16_t)((((__uint16_t)(req->request.vbucket) & 0xff00
) >> 8) | (((__uint16_t)(req->request.vbucket) &
0x00ff) << 8))) : _OSSwapInt16(req->request.vbucket
)))
;
6130 c->binary_header.request.cas = ntohll(req->request.cas);
6131
6132 if (c->binary_header.request.magic != PROTOCOL_BINARY_REQ &&
6133 !(c->binary_header.request.magic == PROTOCOL_BINARY_RES &&
6134 response_handlers[c->binary_header.request.opcode])) {
6135 if (settings.verbose) {
6136 if (c->binary_header.request.magic != PROTOCOL_BINARY_RES) {
6137 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6138 "%d: Invalid magic: %x\n",
6139 c->sfd,
6140 c->binary_header.request.magic);
6141 } else {
6142 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6143 "%d: ERROR: Unsupported response packet received: %u\n",
6144 c->sfd, (unsigned int)c->binary_header.request.opcode);
6145
6146 }
6147 }
6148 conn_set_state(c, conn_closing);
6149 return -1;
6150 }
6151
6152 c->msgcurr = 0;
6153 c->msgused = 0;
6154 c->iovused = 0;
6155 if (add_msghdr(c) != 0) {
6156 conn_set_state(c, conn_closing);
6157 return -1;
6158 }
6159
6160 c->cmd = c->binary_header.request.opcode;
6161 c->keylen = c->binary_header.request.keylen;
6162 c->opaque = c->binary_header.request.opaque;
6163 /* clear the returned cas value */
6164 c->cas = 0;
6165
6166 dispatch_bin_command(c);
6167
6168 c->rbytes -= sizeof(c->binary_header);
6169 c->rcurr += sizeof(c->binary_header);
6170 }
6171
6172 return 1;
6173}
6174
6175static void drain_bio_send_pipe(conn *c) {
6176 int n;
6177 bool_Bool stop = false0;
6178
6179 do {
6180 if (c->ssl.out.current < c->ssl.out.total) {
6181#ifdef WIN32
6182 DWORD error;
6183#else
6184 int error;
6185#endif
6186 n = send(c->sfd, c->ssl.out.buffer + c->ssl.out.current,
6187 c->ssl.out.total - c->ssl.out.current, 0);
6188 if (n > 0) {
6189 c->ssl.out.current += n;
6190 if (c->ssl.out.current == c->ssl.out.total) {
6191 c->ssl.out.current = c->ssl.out.total = 0;
6192 }
6193 } else {
6194 if (n == -1) {
6195#ifdef WIN32
6196 error = WSAGetLastError();
6197#else
6198 error = errno(*__error());
6199#endif
6200 if (!is_blocking(error)) {
6201 c->ssl.error = true1;
6202 }
6203 }
6204 return ;
6205 }
6206 }
6207
6208 if (c->ssl.out.total == 0) {
6209 n = BIO_read(c->ssl.network, c->ssl.out.buffer, c->ssl.out.buffsz);
6210 if (n > 0) {
6211 c->ssl.out.total = n;
6212 } else {
6213 stop = true1;
6214 }
6215 }
6216 } while (!stop);
6217}
6218
6219static void drain_bio_recv_pipe(conn *c) {
6220 int n;
6221 bool_Bool stop = false0;
6222
6223 stop = false0;
6224 do {
6225 if (c->ssl.in.current < c->ssl.in.total) {
6226 n = BIO_write(c->ssl.network, c->ssl.in.buffer + c->ssl.in.current,
6227 c->ssl.in.total - c->ssl.in.current);
6228 if (n > 0) {
6229 c->ssl.in.current += n;
6230 if (c->ssl.in.current == c->ssl.in.total) {
6231 c->ssl.in.current = c->ssl.in.total = 0;
6232 }
6233 } else {
6234 /* Our input BIO is full, no need to grab more data from
6235 * the network at this time..
6236 */
6237 return ;
6238 }
6239 }
6240
6241 if (c->ssl.in.total < c->ssl.in.buffsz) {
6242#ifdef WIN32
6243 DWORD error;
6244#else
6245 int error;
6246#endif
6247 n = recv(c->sfd, c->ssl.in.buffer + c->ssl.in.total,
6248 c->ssl.in.buffsz - c->ssl.in.total, 0);
6249 if (n > 0) {
6250 c->ssl.in.total += n;
6251 } else {
6252 stop = true1;
6253 if (n == 0) {
6254 c->ssl.error = true1; /* read end shutdown */
6255 } else {
6256#ifdef WIN32
6257 error = WSAGetLastError();
6258#else
6259 error = errno(*__error());
6260#endif
6261 if (!is_blocking(error)) {
6262 c->ssl.error = true1;
6263 }
6264 }
6265 }
6266 }
6267 } while (!stop);
6268}
6269
6270static int do_ssl_pre_connection(conn *c) {
6271 int r = SSL_accept(c->ssl.client);
6272 if (r == 1) {
6273 drain_bio_send_pipe(c);
6274 c->ssl.connected = true1;
6275 } else {
6276 if (SSL_get_error(c->ssl.client, r) == SSL_ERROR_WANT_READ2) {
6277 drain_bio_send_pipe(c);
6278 set_ewouldblock();
6279 return -1;
6280 } else {
6281 char *errmsg = malloc(8*1024);
6282 if (errmsg) {
6283 int offset = sprintf(errmsg,__builtin___sprintf_chk (errmsg, 0, __builtin_object_size (errmsg
, 2 > 1 ? 1 : 0), "SSL_accept() returned %d with error %d\n"
, r, SSL_get_error(c->ssl.client, r))
6284 "SSL_accept() returned %d with error %d\n",__builtin___sprintf_chk (errmsg, 0, __builtin_object_size (errmsg
, 2 > 1 ? 1 : 0), "SSL_accept() returned %d with error %d\n"
, r, SSL_get_error(c->ssl.client, r))
6285 r, SSL_get_error(c->ssl.client, r))__builtin___sprintf_chk (errmsg, 0, __builtin_object_size (errmsg
, 2 > 1 ? 1 : 0), "SSL_accept() returned %d with error %d\n"
, r, SSL_get_error(c->ssl.client, r))
;
6286
6287 ERR_error_string_n(ERR_get_error(), errmsg + offset,
6288 8192 - offset);
6289
6290 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6291 "%d: ERROR: %s",
6292 c->sfd, errmsg);
6293 free(errmsg);
6294 }
6295 set_econnreset();
6296 return -1;
6297 }
6298 }
6299
6300 return 0;
6301}
6302
6303static int do_ssl_read(conn *c, char *dest, size_t nbytes) {
6304 int ret = 0;
6305
6306 while (ret < nbytes) {
6307 int n;
6308 drain_bio_recv_pipe(c);
6309 if (c->ssl.error) {
6310 set_econnreset();
6311 return -1;
6312 }
6313 n = SSL_read(c->ssl.client, dest + ret, nbytes - ret);
6314 if (n > 0) {
6315 ret += n;
6316 } else {
6317 if (ret > 0) {
6318 /* I've gotten some data, let the user have that */
6319 return ret;
6320 }
6321
6322 if (n < 0) {
6323 int error = SSL_get_error(c->ssl.client, n);
6324 switch (error) {
6325 case SSL_ERROR_WANT_READ2:
6326 /*
6327 * Drain the buffers and retry if we've got data in
6328 * our input buffers
6329 */
6330 if (c->ssl.in.current >= c->ssl.in.total) {
6331 set_ewouldblock();
6332 return -1;
6333 }
6334 break;
6335
6336 default:
6337 /*
6338 * @todo I don't know how to gracefully recover from this
6339 * let's just shut down the connection
6340 */
6341 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6342 "%d: ERROR: SSL_read returned -1 with error %d",
6343 c->sfd, error);
6344 set_econnreset();
6345 return -1;
6346 }
6347 }
6348 }
6349 }
6350
6351 return ret;
6352}
6353
6354static int do_data_recv(conn *c, void *dest, size_t nbytes) {
6355 int res;
6356 if (c->ssl.enabled) {
6357 drain_bio_recv_pipe(c);
6358
6359 if (!c->ssl.connected) {
6360 res = do_ssl_pre_connection(c);
6361 if (res == -1) {
6362 return -1;
6363 }
6364 }
6365
6366 /* The SSL negotiation might be complete at this time */
6367 if (c->ssl.connected) {
6368 res = do_ssl_read(c, dest, nbytes);
6369 }
6370 } else {
6371 res = recv(c->sfd, dest, nbytes, 0);
6372 }
6373
6374 return res;
6375}
6376
6377static int do_ssl_write(conn *c, char *dest, size_t nbytes) {
6378 int ret = 0;
6379
6380 int chunksize = settings.bio_drain_buffer_sz;
6381
6382 while (ret < nbytes) {
6383 int n;
6384 int chunk;
6385
6386 drain_bio_send_pipe(c);
6387 if (c->ssl.error) {
6388 set_econnreset();
6389 return -1;
6390 }
6391
6392 chunk = nbytes - ret;
6393 if (chunk > chunksize) {
6394 chunk = chunksize;
6395 }
6396
6397 n = SSL_write(c->ssl.client, dest + ret, chunk);
6398 if (n > 0) {
6399 ret += n;
6400 } else {
6401 if (ret > 0) {
6402 /* We've sent some data.. let the caller have them */
6403 return ret;
6404 }
6405
6406 if (n < 0) {
6407 int error = SSL_get_error(c->ssl.client, n);
6408 switch (error) {
6409 case SSL_ERROR_WANT_WRITE3:
6410 set_ewouldblock();
6411 return -1;
6412
6413 default:
6414 /*
6415 * @todo I don't know how to gracefully recover from this
6416 * let's just shut down the connection
6417 */
6418 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6419 "%d: ERROR: SSL_write returned -1 with error %d",
6420 c->sfd, error);
6421 set_econnreset();
6422 return -1;
6423 }
6424 }
6425 }
6426 }
6427
6428 return ret;
6429}
6430
6431
6432static int do_data_sendmsg(conn *c, struct msghdr *m) {
6433 int res;
6434 if (c->ssl.enabled) {
6435 int ii;
6436 res = 0;
6437 for (ii = 0; ii < m->msg_iovlen; ++ii) {
6438 int n = do_ssl_write(c,
6439 m->msg_iov[ii].iov_base,
6440 m->msg_iov[ii].iov_len);
6441 if (n > 0) {
6442 res += n;
6443 } else {
6444 return res > 0 ? res : -1;
6445 }
6446 }
6447
6448 /* @todo figure out how to drain the rest of the data if we
6449 * failed to send all of it...
6450 */
6451 drain_bio_send_pipe(c);
6452 return res;
6453 } else {
6454 res = sendmsg(c->sfd, m, 0);
6455 }
6456
6457 return res;
6458}
6459
6460/*
6461 * read from network as much as we can, handle buffer overflow and connection
6462 * close.
6463 * before reading, move the remaining incomplete fragment of a command
6464 * (if any) to the beginning of the buffer.
6465 *
6466 * To protect us from someone flooding a connection with bogus data causing
6467 * the connection to eat up all available memory, break out and start looking
6468 * at the data I've got after a number of reallocs...
6469 *
6470 * @return enum try_read_result
6471 */
6472static enum try_read_result try_read_network(conn *c) {
6473 enum try_read_result gotdata = READ_NO_DATA_RECEIVED;
6474 int res;
6475 int num_allocs = 0;
6476 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6476, "c != ((void*)0)") : (void)0)
;
6477
6478 if (c->rcurr != c->rbuf) {
6479 if (c->rbytes != 0) /* otherwise there's nothing to copy */
6480 memmove(c->rbuf, c->rcurr, c->rbytes)__builtin___memmove_chk (c->rbuf, c->rcurr, c->rbytes
, __builtin_object_size (c->rbuf, 0))
;
6481 c->rcurr = c->rbuf;
6482 }
6483
6484 while (1) {
6485 int avail;
6486#ifdef WIN32
6487 DWORD error;
6488#else
6489 int error;
6490#endif
6491
6492 if (c->rbytes >= c->rsize) {
6493 char *new_rbuf;
6494
6495 if (num_allocs == 4) {
6496 return gotdata;
6497 }
6498 ++num_allocs;
6499 new_rbuf = realloc(c->rbuf, c->rsize * 2);
6500 if (!new_rbuf) {
6501 if (settings.verbose > 0) {
6502 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6503 "Couldn't realloc input buffer\n");
6504 }
6505 c->rbytes = 0; /* ignore what we read */
6506 conn_set_state(c, conn_closing);
6507 return READ_MEMORY_ERROR;
6508 }
6509 c->rcurr = c->rbuf = new_rbuf;
6510 c->rsize *= 2;
6511 }
6512
6513 avail = c->rsize - c->rbytes;
6514 res = do_data_recv(c, c->rbuf + c->rbytes, avail);
6515 if (res > 0) {
6516 STATS_ADD(c, bytes_read, res){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->bytes_read +=
res; cb_mutex_exit(&thread_stats->mutex); }
;
6517 gotdata = READ_DATA_RECEIVED;
6518 c->rbytes += res;
6519 if (res == avail) {
6520 continue;
6521 } else {
6522 break;
6523 }
6524 }
6525 if (res == 0) {
6526 return READ_ERROR;
6527 }
6528 if (res == -1) {
6529#ifdef WIN32
6530 error = WSAGetLastError();
6531#else
6532 error = errno(*__error());
6533#endif
6534
6535 if (is_blocking(error)) {
6536 break;
6537 }
6538 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6539 "%d Closing connection due to read error: %s",
6540 c->sfd,
6541 strerror(errno(*__error())));
6542 return READ_ERROR;
6543 }
6544 }
6545 return gotdata;
6546}
6547
6548bool_Bool register_event(conn *c, struct timeval *timeout) {
6549 cb_assert(!c->registered_in_libevent)(__builtin_expect(!(!c->registered_in_libevent), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6549, "!c->registered_in_libevent") : (void)0)
;
6550 cb_assert(c->sfd != INVALID_SOCKET)(__builtin_expect(!(c->sfd != -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6550, "c->sfd != -1") : (void)0)
;
6551
6552 if (event_add(&c->event, timeout) == -1) {
6553 log_system_error(EXTENSION_LOG_WARNING,
6554 NULL((void*)0),
6555 "Failed to add connection to libevent: %s");
6556 return false0;
6557 }
6558
6559 c->registered_in_libevent = true1;
6560
6561 return true1;
6562}
6563
6564bool_Bool unregister_event(conn *c) {
6565 cb_assert(c->registered_in_libevent)(__builtin_expect(!(c->registered_in_libevent), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6565, "c->registered_in_libevent") : (void)0)
;
6566 cb_assert(c->sfd != INVALID_SOCKET)(__builtin_expect(!(c->sfd != -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6566, "c->sfd != -1") : (void)0)
;
6567
6568 if (event_del(&c->event) == -1) {
6569 return false0;
6570 }
6571
6572 c->registered_in_libevent = false0;
6573
6574 return true1;
6575}
6576
6577bool_Bool update_event(conn *c, const int new_flags) {
6578 struct event_base *base;
6579
6580 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6580, "c != ((void*)0)") : (void)0)
;
6581 base = c->event.ev_base;
6582
6583 if (c->ssl.enabled && c->ssl.connected && (new_flags & EV_READ0x02)) {
6584 /*
6585 * If we want more data and we have SSL, that data might be inside
6586 * SSL's internal buffers rather than inside the socket buffer. In
6587 * that case signal an EV_READ event without actually polling the
6588 * socket.
6589 */
6590 char dummy;
6591 /* SSL_pending() will not work here despite the name */
6592 int rv = SSL_peek(c->ssl.client, &dummy, 1);
6593 if (rv > 0) {
6594 /* signal a call to the handler */
6595 event_active(&c->event, EV_READ0x02, 0);
6596 return true1;
6597 }
6598 }
6599
6600 if (c->ev_flags == new_flags) {
6601 return true1;
6602 }
6603
6604 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL((void*)0),
6605 "Updated event for %d to read=%s, write=%s\n",
6606 c->sfd, (new_flags & EV_READ0x02 ? "yes" : "no"),
6607 (new_flags & EV_WRITE0x04 ? "yes" : "no"));
6608
6609 if (!unregister_event(c)) {
6610 return false0;
6611 }
6612
6613 event_set(&c->event, c->sfd, new_flags, event_handler, (void *)c);
6614 event_base_set(base, &c->event);
6615 c->ev_flags = new_flags;
6616
6617 return register_event(c, NULL((void*)0));
6618}
6619
6620/*
6621 * Transmit the next chunk of data from our list of msgbuf structures.
6622 *
6623 * Returns:
6624 * TRANSMIT_COMPLETE All done writing.
6625 * TRANSMIT_INCOMPLETE More data remaining to write.
6626 * TRANSMIT_SOFT_ERROR Can't write any more right now.
6627 * TRANSMIT_HARD_ERROR Can't write (c->state is set to conn_closing)
6628 */
6629static enum transmit_result transmit(conn *c) {
6630 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6630, "c != ((void*)0)") : (void)0)
;
6631
6632 while (c->msgcurr < c->msgused &&
6633 c->msglist[c->msgcurr].msg_iovlen == 0) {
6634 /* Finished writing the current msg; advance to the next. */
6635 c->msgcurr++;
6636 }
6637
6638 if (c->msgcurr < c->msgused) {
6639#ifdef WIN32
6640 DWORD error;
6641#else
6642 int error;
6643#endif
6644 ssize_t res;
6645 struct msghdr *m = &c->msglist[c->msgcurr];
6646
6647 res = do_data_sendmsg(c, m);
6648#ifdef WIN32
6649 error = WSAGetLastError();
6650#else
6651 error = errno(*__error());
6652#endif
6653 if (res > 0) {
6654 STATS_ADD(c, bytes_written, res){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->bytes_written
+= res; cb_mutex_exit(&thread_stats->mutex); }
;
6655
6656 /* We've written some of the data. Remove the completed
6657 iovec entries from the list of pending writes. */
6658 while (m->msg_iovlen > 0 && res >= m->msg_iov->iov_len) {
6659 res -= (ssize_t)m->msg_iov->iov_len;
6660 m->msg_iovlen--;
6661 m->msg_iov++;
6662 }
6663
6664 /* Might have written just part of the last iovec entry;
6665 adjust it so the next write will do the rest. */
6666 if (res > 0) {
6667 m->msg_iov->iov_base = (void*)((unsigned char*)m->msg_iov->iov_base + res);
6668 m->msg_iov->iov_len -= res;
6669 }
6670 return TRANSMIT_INCOMPLETE;
6671 }
6672
6673 if (res == -1 && is_blocking(error)) {
6674 if (!update_event(c, EV_WRITE0x04 | EV_PERSIST0x10)) {
6675 if (settings.verbose > 0) {
6676 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
6677 "Couldn't update event\n");
6678 }
6679 conn_set_state(c, conn_closing);
6680 return TRANSMIT_HARD_ERROR;
6681 }
6682 return TRANSMIT_SOFT_ERROR;
6683 }
6684 /* if res == 0 or res == -1 and error is not EAGAIN or EWOULDBLOCK,
6685 we have a real error, on which we close the connection */
6686 if (settings.verbose > 0) {
6687 if (res == -1) {
6688 log_socket_error(EXTENSION_LOG_WARNING, c,
6689 "Failed to write, and not due to blocking: %s");
6690 } else {
6691 int ii;
6692 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6693 "%d - sendmsg returned 0\n",
6694 c->sfd);
6695 for (ii = 0; ii < m->msg_iovlen; ++ii) {
6696 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6697 "\t%d - %zu\n",
6698 c->sfd, m->msg_iov[ii].iov_len);
6699 }
6700
6701 }
6702 }
6703
6704 conn_set_state(c, conn_closing);
6705 return TRANSMIT_HARD_ERROR;
6706 } else {
6707 if (c->ssl.enabled) {
6708 drain_bio_send_pipe(c);
6709 if (c->ssl.out.total) {
6710 if (!update_event(c, EV_WRITE0x04 | EV_PERSIST0x10)) {
6711 if (settings.verbose > 0) {
6712 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
6713 "Couldn't update event");
6714 }
6715 conn_set_state(c, conn_closing);
6716 return TRANSMIT_HARD_ERROR;
6717 }
6718 return TRANSMIT_SOFT_ERROR;
6719 }
6720 }
6721
6722 return TRANSMIT_COMPLETE;
6723 }
6724}
6725
6726bool_Bool conn_listening(conn *c)
6727{
6728 SOCKETint sfd;
6729 struct sockaddr_storage addr;
6730 socklen_t addrlen = sizeof(addr);
6731 int curr_conns;
6732 int port_conns;
6733 struct listening_port *port_instance;
6734
6735 if ((sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen)) == -1) {
6736#ifdef WIN32
6737 DWORD error = WSAGetLastError();
6738#else
6739 int error = errno(*__error());
6740#endif
6741
6742 if (is_emfile(error)) {
6743 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6744 "Too many open files\n");
6745 disable_listen();
6746 } else if (!is_blocking(error)) {
6747 log_socket_error(EXTENSION_LOG_WARNING, c,
6748 "Failed to accept new client: %s");
6749 }
6750
6751 return false0;
6752 }
6753
6754 STATS_LOCK();
6755 curr_conns = ++stats.curr_conns;
6756 port_instance = get_listening_port_instance(c->parent_port);
6757 cb_assert(port_instance)(__builtin_expect(!(port_instance), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6757, "port_instance") : (void)0)
;
6758 port_conns = ++port_instance->curr_conns;
6759 STATS_UNLOCK();
6760
6761 if (curr_conns >= settings.maxconns || port_conns >= port_instance->maxconns) {
6762 STATS_LOCK();
6763 ++stats.rejected_conns;
6764 --port_instance->curr_conns;
6765 STATS_UNLOCK();
6766
6767 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6768 "Too many open connections\n");
6769
6770 safe_close(sfd);
6771 return false0;
6772 }
6773
6774 if (evutil_make_socket_nonblocking(sfd) == -1) {
6775 STATS_LOCK();
6776 --port_instance->curr_conns;
6777 STATS_UNLOCK();
6778 safe_close(sfd);
6779 return false0;
6780 }
6781
6782 dispatch_conn_new(sfd, c->parent_port, conn_new_cmd, EV_READ0x02 | EV_PERSIST0x10,
6783 DATA_BUFFER_SIZE2048);
6784
6785 return false0;
6786}
6787
6788/**
6789 * Ship tap log to the other end. This state differs with all other states
6790 * in the way that it support full duplex dialog. We're listening to both read
6791 * and write events from libevent most of the time. If a read event occurs we
6792 * switch to the conn_read state to read and execute the input message (that would
6793 * be an ack message from the other side). If a write event occurs we continue to
6794 * send tap log to the other end.
6795 * @param c the tap connection to drive
6796 * @return true if we should continue to process work for this connection, false
6797 * if we should start processing events for other connections.
6798 */
6799bool_Bool conn_ship_log(conn *c) {
6800 bool_Bool cont = false0;
6801 short mask = EV_READ0x02 | EV_PERSIST0x10 | EV_WRITE0x04;
6802
6803 if (c->sfd == INVALID_SOCKET-1) {
6804 return false0;
6805 }
6806
6807 if (c->which & EV_READ0x02 || c->rbytes > 0) {
6808 if (c->rbytes > 0) {
6809 if (try_read_command(c) == 0) {
6810 conn_set_state(c, conn_read);
6811 }
6812 } else {
6813 conn_set_state(c, conn_read);
6814 }
6815
6816 /* we're going to process something.. let's proceed */
6817 cont = true1;
6818
6819 /* We have a finite number of messages in the input queue */
6820 /* so let's process all of them instead of backing off after */
6821 /* reading a subset of them. */
6822 /* Why? Because we've got every time we're calling ship_tap_log */
6823 /* we try to send a chunk of items.. This means that if we end */
6824 /* up in a situation where we're receiving a burst of nack messages */
6825 /* we'll only process a subset of messages in our input queue, */
6826 /* and it will slowly grow.. */
6827 c->nevents = settings.reqs_per_tap_event;
6828 } else if (c->which & EV_WRITE0x04) {
6829 --c->nevents;
6830 if (c->nevents >= 0) {
6831 c->ewouldblock = false0;
6832 if (c->upr) {
6833 ship_upr_log(c);
6834 } else {
6835 ship_tap_log(c);
6836 }
6837 if (c->ewouldblock) {
6838 mask = EV_READ0x02 | EV_PERSIST0x10;
6839 } else {
6840 cont = true1;
6841 }
6842 }
6843 }
6844
6845 if (!update_event(c, mask)) {
6846 if (settings.verbose > 0) {
6847 settings.extensions.logger->log(EXTENSION_LOG_INFO,
6848 c, "Couldn't update event\n");
6849 }
6850 conn_set_state(c, conn_closing);
6851 }
6852
6853 return cont;
6854}
6855
6856bool_Bool conn_waiting(conn *c) {
6857 if (!update_event(c, EV_READ0x02 | EV_PERSIST0x10)) {
6858 if (settings.verbose > 0) {
6859 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6860 "Couldn't update event\n");
6861 }
6862 conn_set_state(c, conn_closing);
6863 return true1;
6864 }
6865 conn_set_state(c, conn_read);
6866 return false0;
6867}
6868
6869bool_Bool conn_read(conn *c) {
6870 int res = try_read_network(c);
6871 switch (res) {
6872 case READ_NO_DATA_RECEIVED:
6873 conn_set_state(c, conn_waiting);
6874 break;
6875 case READ_DATA_RECEIVED:
6876 conn_set_state(c, conn_parse_cmd);
6877 break;
6878 case READ_ERROR:
6879 conn_set_state(c, conn_closing);
6880 break;
6881 case READ_MEMORY_ERROR: /* Failed to allocate more memory */
6882 /* State already set by try_read_network */
6883 break;
6884 }
6885
6886 return true1;
6887}
6888
6889bool_Bool conn_parse_cmd(conn *c) {
6890 if (try_read_command(c) == 0) {
6891 /* wee need more data! */
6892 conn_set_state(c, conn_waiting);
6893 }
6894
6895 return !c->ewouldblock;
6896}
6897
6898bool_Bool conn_new_cmd(conn *c) {
6899 /* Only process nreqs at a time to avoid starving other connections */
6900 c->start = 0;
6901 --c->nevents;
6902 if (c->nevents >= 0) {
6903 reset_cmd_handler(c);
6904 } else {
6905 STATS_NOKEY(c, conn_yields){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->conn_yields++
; cb_mutex_exit(&thread_stats->mutex); }
;
6906 if (c->rbytes > 0) {
6907 /* We have already read in data into the input buffer,
6908 so libevent will most likely not signal read events
6909 on the socket (unless more data is available. As a
6910 hack we should just put in a request to write data,
6911 because that should be possible ;-)
6912 */
6913 if (!update_event(c, EV_WRITE0x04 | EV_PERSIST0x10)) {
6914 if (settings.verbose > 0) {
6915 settings.extensions.logger->log(EXTENSION_LOG_INFO,
6916 c, "Couldn't update event\n");
6917 }
6918 conn_set_state(c, conn_closing);
6919 return true1;
6920 }
6921 }
6922 return false0;
6923 }
6924
6925 return true1;
6926}
6927
6928bool_Bool conn_swallow(conn *c) {
6929 ssize_t res;
6930#ifdef WIN32
6931 DWORD error;
6932#else
6933 int error;
6934#endif
6935 /* we are reading sbytes and throwing them away */
6936 if (c->sbytes == 0) {
6937 conn_set_state(c, conn_new_cmd);
6938 return true1;
6939 }
6940
6941 /* first check if we have leftovers in the conn_read buffer */
6942 if (c->rbytes > 0) {
6943 uint32_t tocopy = c->rbytes > c->sbytes ? c->sbytes : c->rbytes;
6944 c->sbytes -= tocopy;
6945 c->rcurr += tocopy;
6946 c->rbytes -= tocopy;
6947 return true1;
6948 }
6949
6950 /* now try reading from the socket */
6951 res = do_data_recv(c, c->rbuf, c->rsize > c->sbytes ? c->sbytes : c->rsize);
6952#ifdef WIN32
6953 error = WSAGetLastError();
6954#else
6955 error = errno(*__error());
6956#endif
6957 if (res > 0) {
6958 STATS_ADD(c, bytes_read, res){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->bytes_read +=
res; cb_mutex_exit(&thread_stats->mutex); }
;
6959 c->sbytes -= res;
6960 return true1;
6961 }
6962 if (res == 0) { /* end of stream */
6963 conn_set_state(c, conn_closing);
6964 return true1;
6965 }
6966 if (res == -1 && is_blocking(error)) {
6967 if (!update_event(c, EV_READ0x02 | EV_PERSIST0x10)) {
6968 if (settings.verbose > 0) {
6969 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6970 "Couldn't update event\n");
6971 }
6972 conn_set_state(c, conn_closing);
6973 return true1;
6974 }
6975 return false0;
6976 }
6977
6978 /* otherwise we have a real error, on which we close the connection */
6979 if (!is_closed_conn(error)) {
6980 char msg[80];
6981 snprintf(msg, sizeof(msg),__builtin___snprintf_chk (msg, sizeof(msg), 0, __builtin_object_size
(msg, 2 > 1 ? 1 : 0), "%d Failed to read, and not due to blocking (%%s)"
, (int)c->sfd)
6982 "%d Failed to read, and not due to blocking (%%s)",__builtin___snprintf_chk (msg, sizeof(msg), 0, __builtin_object_size
(msg, 2 > 1 ? 1 : 0), "%d Failed to read, and not due to blocking (%%s)"
, (int)c->sfd)
6983 (int)c->sfd)__builtin___snprintf_chk (msg, sizeof(msg), 0, __builtin_object_size
(msg, 2 > 1 ? 1 : 0), "%d Failed to read, and not due to blocking (%%s)"
, (int)c->sfd)
;
6984
6985 log_socket_error(EXTENSION_LOG_INFO, c, msg);
6986 }
6987
6988 conn_set_state(c, conn_closing);
6989
6990 return true1;
6991}
6992
6993bool_Bool conn_nread(conn *c) {
6994 ssize_t res;
6995#ifdef WIN32
6996 DWORD error;
6997#else
6998 int error;
6999#endif
7000
7001 if (c->rlbytes == 0) {
7002 bool_Bool block = c->ewouldblock = false0;
7003 complete_nread(c);
7004 if (c->ewouldblock) {
7005 unregister_event(c);
7006 block = true1;
7007 }
7008 return !block;
7009 }
7010 /* first check if we have leftovers in the conn_read buffer */
7011 if (c->rbytes > 0) {
7012 uint32_t tocopy = c->rbytes > c->rlbytes ? c->rlbytes : c->rbytes;
7013 if (c->ritem != c->rcurr) {
7014 memmove(c->ritem, c->rcurr, tocopy)__builtin___memmove_chk (c->ritem, c->rcurr, tocopy, __builtin_object_size
(c->ritem, 0))
;
7015 }
7016 c->ritem += tocopy;
7017 c->rlbytes -= tocopy;
7018 c->rcurr += tocopy;
7019 c->rbytes -= tocopy;
7020 if (c->rlbytes == 0) {
7021 return true1;
7022 }
7023 }
7024
7025 /* now try reading from the socket */
7026 res = do_data_recv(c, c->ritem, c->rlbytes);
7027#ifdef WIN32
7028 error = WSAGetLastError();
7029#else
7030 error = errno(*__error());
7031#endif
7032 if (res > 0) {
7033 STATS_ADD(c, bytes_read, res){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->bytes_read +=
res; cb_mutex_exit(&thread_stats->mutex); }
;
7034 if (c->rcurr == c->ritem) {
7035 c->rcurr += res;
7036 }
7037 c->ritem += res;
7038 c->rlbytes -= res;
7039 return true1;
7040 }
7041 if (res == 0) { /* end of stream */
7042 conn_set_state(c, conn_closing);
7043 return true1;
7044 }
7045
7046 if (res == -1 && is_blocking(error)) {
7047 if (!update_event(c, EV_READ0x02 | EV_PERSIST0x10)) {
7048 if (settings.verbose > 0) {
7049 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
7050 "Couldn't update event\n");
7051 }
7052 conn_set_state(c, conn_closing);
7053 return true1;
7054 }
7055 return false0;
7056 }
7057
7058 /* otherwise we have a real error, on which we close the connection */
7059 if (!is_closed_conn(error)) {
7060 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
7061 "%d Failed to read, and not due to blocking:\n"
7062 "errno: %d %s \n"
7063 "rcurr=%lx ritem=%lx rbuf=%lx rlbytes=%d rsize=%d\n",
7064 c->sfd, errno(*__error()), strerror(errno(*__error())),
7065 (long)c->rcurr, (long)c->ritem, (long)c->rbuf,
7066 (int)c->rlbytes, (int)c->rsize);
7067 }
7068 conn_set_state(c, conn_closing);
7069 return true1;
7070}
7071
7072bool_Bool conn_write(conn *c) {
7073 /*
7074 * We want to write out a simple response. If we haven't already,
7075 * assemble it into a msgbuf list (this will be a single-entry
7076 * list for TCP).
7077 */
7078 if (c->iovused == 0) {
7079 if (add_iov(c, c->wcurr, c->wbytes) != 0) {
7080 if (settings.verbose > 0) {
7081 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
7082 "Couldn't build response\n");
7083 }
7084 conn_set_state(c, conn_closing);
7085 return true1;
7086 }
7087 }
7088
7089 return conn_mwrite(c);
7090}
7091
7092bool_Bool conn_mwrite(conn *c) {
7093 switch (transmit(c)) {
7094 case TRANSMIT_COMPLETE:
7095 if (c->state == conn_mwrite) {
7096 while (c->ileft > 0) {
7097 item *it = *(c->icurr);
7098 settings.engine.v1->release(settings.engine.v0, c, it);
7099 c->icurr++;
7100 c->ileft--;
7101 }
7102 while (c->temp_alloc_left > 0) {
7103 char *temp_alloc_ = *(c->temp_alloc_curr);
7104 free(temp_alloc_);
7105 c->temp_alloc_curr++;
7106 c->temp_alloc_left--;
7107 }
7108 /* XXX: I don't know why this wasn't the general case */
7109 conn_set_state(c, c->write_and_go);
7110 } else if (c->state == conn_write) {
7111 if (c->write_and_free) {
7112 free(c->write_and_free);
7113 c->write_and_free = 0;
7114 }
7115 conn_set_state(c, c->write_and_go);
7116 } else {
7117 if (settings.verbose > 0) {
7118 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
7119 "Unexpected state %d\n", c->state);
7120 }
7121 conn_set_state(c, conn_closing);
7122 }
7123 break;
7124
7125 case TRANSMIT_INCOMPLETE:
7126 case TRANSMIT_HARD_ERROR:
7127 break; /* Continue in state machine. */
7128
7129 case TRANSMIT_SOFT_ERROR:
7130 return false0;
7131 }
7132
7133 return true1;
7134}
7135
7136bool_Bool conn_pending_close(conn *c) {
7137 cb_assert(c->sfd == INVALID_SOCKET)(__builtin_expect(!(c->sfd == -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7137, "c->sfd == -1") : (void)0)
;
7138 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
7139 "Awaiting clients to release the cookie (pending close for %p)",
7140 (void*)c);
7141 /*
7142 * tell the tap connection that we're disconnecting it now,
7143 * but give it a grace period
7144 */
7145 perform_callbacks(ON_DISCONNECT, NULL((void*)0), c);
7146
7147 if (c->refcount > 1) {
7148 return false0;
7149 }
7150
7151 conn_set_state(c, conn_immediate_close);
7152 return true1;
7153}
7154
7155bool_Bool conn_immediate_close(conn *c) {
7156 struct listening_port *port_instance;
7157 cb_assert(c->sfd == INVALID_SOCKET)(__builtin_expect(!(c->sfd == -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7157, "c->sfd == -1") : (void)0)
;
7158 settings.extensions.logger->log(EXTENSION_LOG_DETAIL, c,
7159 "Releasing connection %p",
7160 c);
7161
7162 STATS_LOCK();
7163 port_instance = get_listening_port_instance(c->parent_port);
7164 cb_assert(port_instance)(__builtin_expect(!(port_instance), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7164, "port_instance") : (void)0)
;
7165 --port_instance->curr_conns;
7166 STATS_UNLOCK();
7167
7168 perform_callbacks(ON_DISCONNECT, NULL((void*)0), c);
7169 conn_close(c);
7170
7171 return false0;
7172}
7173
7174bool_Bool conn_closing(conn *c) {
7175 /* We don't want any network notifications anymore.. */
7176 unregister_event(c);
7177 safe_close(c->sfd);
7178 c->sfd = INVALID_SOCKET-1;
7179
7180 if (c->refcount > 1 || c->ewouldblock) {
7181 conn_set_state(c, conn_pending_close);
7182 } else {
7183 conn_set_state(c, conn_immediate_close);
7184 }
7185 return true1;
7186}
7187
7188bool_Bool conn_setup_tap_stream(conn *c) {
7189 process_bin_tap_connect(c);
7190 return true1;
7191}
7192
7193bool_Bool conn_refresh_cbsasl(conn *c) {
7194 ENGINE_ERROR_CODE ret = c->aiostat;
7195 c->aiostat = ENGINE_SUCCESS;
7196 c->ewouldblock = false0;
7197
7198 cb_assert(ret != ENGINE_EWOULDBLOCK)(__builtin_expect(!(ret != ENGINE_EWOULDBLOCK), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7198, "ret != ENGINE_EWOULDBLOCK") : (void)0)
;
7199
7200 switch (ret) {
7201 case ENGINE_SUCCESS:
7202 write_bin_response(c, NULL((void*)0), 0, 0, 0);
7203 break;
7204 case ENGINE_DISCONNECT:
7205 conn_set_state(c, conn_closing);
7206 break;
7207 default:
7208 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
7209 }
7210
7211 return true1;
7212}
7213
7214bool_Bool conn_refresh_ssl_certs(conn *c) {
7215 ENGINE_ERROR_CODE ret = c->aiostat;
7216 c->aiostat = ENGINE_SUCCESS;
7217 c->ewouldblock = false0;
7218
7219 cb_assert(ret != ENGINE_EWOULDBLOCK)(__builtin_expect(!(ret != ENGINE_EWOULDBLOCK), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7219, "ret != ENGINE_EWOULDBLOCK") : (void)0)
;
7220
7221 switch (ret) {
7222 case ENGINE_SUCCESS:
7223 write_bin_response(c, NULL((void*)0), 0, 0, 0);
7224 break;
7225 case ENGINE_DISCONNECT:
7226 conn_set_state(c, conn_closing);
7227 break;
7228 default:
7229 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
7230 }
7231
7232 return true1;
7233}
7234
7235void event_handler(evutil_socket_tint fd, short which, void *arg) {
7236 conn *c = arg;
7237 LIBEVENT_THREAD *thr;
7238
7239 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7239, "c != ((void*)0)") : (void)0)
;
7240
7241 if (memcached_shutdown) {
7242 event_base_loopbreak(c->event.ev_base);
7243 return ;
7244 }
7245
7246 thr = c->thread;
7247 if (!is_listen_thread()) {
7248 cb_assert(thr)(__builtin_expect(!(thr), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7248, "thr") : (void)0)
;
7249 LOCK_THREAD(thr)cb_mutex_enter(&thr->mutex); (__builtin_expect(!(thr->
is_locked == 0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7249, "thr->is_locked == 0") : (void)0); thr->is_locked
= 1;
;
7250 /*
7251 * Remove the list from the list of pending io's (in case the
7252 * object was scheduled to run in the dispatcher before the
7253 * callback for the worker thread is executed.
7254 */
7255 c->thread->pending_io = list_remove(c->thread->pending_io, c);
7256 }
7257
7258 c->which = which;
7259
7260 /* sanity */
7261 cb_assert(fd == c->sfd)(__builtin_expect(!(fd == c->sfd), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7261, "fd == c->sfd") : (void)0)
;
7262 perform_callbacks(ON_SWITCH_CONN, c, c);
7263
7264
7265 c->nevents = settings.reqs_per_event;
7266 if (c->state == conn_ship_log) {
7267 c->nevents = settings.reqs_per_tap_event;
7268 }
7269
7270 do {
7271 if (settings.verbose) {
7272 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
7273 "%d - Running task: (%s)\n",
7274 c->sfd, state_text(c->state));
7275 }
7276 } while (c->state(c));
7277
7278 if (thr) {
7279 UNLOCK_THREAD(thr)(__builtin_expect(!(thr->is_locked == 1), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7279, "thr->is_locked == 1") : (void)0); thr->is_locked
= 0; cb_mutex_exit(&thr->mutex);
;
7280 }
7281}
7282
7283static void dispatch_event_handler(evutil_socket_tint fd, short which, void *arg) {
7284 char buffer[80];
7285 ssize_t nr = recv(fd, buffer, sizeof(buffer), 0);
7286
7287 if (nr != -1 && is_listen_disabled()) {
7288 bool_Bool enable = false0;
7289 cb_mutex_enter(&listen_state.mutex);
7290 listen_state.count -= nr;
7291 if (listen_state.count <= 0) {
7292 enable = true1;
7293 listen_state.disabled = false0;
7294 }
7295 cb_mutex_exit(&listen_state.mutex);
7296 if (enable) {
7297 conn *next;
7298 for (next = listen_conn; next; next = next->next) {
7299 int backlog = 1024;
7300 int ii;
7301 update_event(next, EV_READ0x02 | EV_PERSIST0x10);
7302 for (ii = 0; ii < settings.num_interfaces; ++ii) {
7303 if (next->parent_port == settings.interfaces[ii].port) {
7304 backlog = settings.interfaces[ii].backlog;
7305 break;
7306 }
7307 }
7308
7309 if (listen(next->sfd, backlog) != 0) {
7310 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7311 "listen() failed",
7312 strerror(errno(*__error())));
7313 }
7314 }
7315 }
7316 }
7317}
7318
7319/*
7320 * Sets a socket's send buffer size to the maximum allowed by the system.
7321 */
7322static void maximize_sndbuf(const SOCKETint sfd) {
7323 socklen_t intsize = sizeof(int);
7324 int last_good = 0;
7325 int min, max, avg;
7326 int old_size;
7327
7328 /* Start with the default size. */
7329 if (getsockopt(sfd, SOL_SOCKET0xffff, SO_SNDBUF0x1001, (void *)&old_size, &intsize) != 0) {
7330 if (settings.verbose > 0) {
7331 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7332 "getsockopt(SO_SNDBUF): %s",
7333 strerror(errno(*__error())));
7334 }
7335
7336 return;
7337 }
7338
7339 /* Binary-search for the real maximum. */
7340 min = old_size;
7341 max = MAX_SENDBUF_SIZE(256 * 1024 * 1024);
7342
7343 while (min <= max) {
7344 avg = ((unsigned int)(min + max)) / 2;
7345 if (setsockopt(sfd, SOL_SOCKET0xffff, SO_SNDBUF0x1001, (void *)&avg, intsize) == 0) {
7346 last_good = avg;
7347 min = avg + 1;
7348 } else {
7349 max = avg - 1;
7350 }
7351 }
7352
7353 if (settings.verbose > 1) {
7354 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL((void*)0),
7355 "<%d send buffer was %d, now %d\n", sfd, old_size, last_good);
7356 }
7357}
7358
7359static SOCKETint new_socket(struct addrinfo *ai) {
7360 SOCKETint sfd;
7361
7362 sfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
7363 if (sfd == INVALID_SOCKET-1) {
7364 return INVALID_SOCKET-1;
7365 }
7366
7367 if (evutil_make_socket_nonblocking(sfd) == -1) {
7368 safe_close(sfd);
7369 return INVALID_SOCKET-1;
7370 }
7371
7372 maximize_sndbuf(sfd);
7373
7374 return sfd;
7375}
7376
7377/**
7378 * Create a socket and bind it to a specific port number
7379 * @param interface the interface to bind to
7380 * @param port the port number to bind to
7381 * @param portnumber_file A filepointer to write the port numbers to
7382 * when they are successfully added to the list of ports we
7383 * listen on.
7384 */
7385static int server_socket(struct interface *interf, FILE *portnumber_file) {
7386 SOCKETint sfd;
7387 struct linger ling = {0, 0};
7388 struct addrinfo *ai;
7389 struct addrinfo *next;
7390 struct addrinfo hints;
7391 char port_buf[NI_MAXSERV32];
7392 int error;
7393 int success = 0;
7394 int flags =1;
7395 char *host = NULL((void*)0);
7396
7397 memset(&hints, 0, sizeof(hints))__builtin___memset_chk (&hints, 0, sizeof(hints), __builtin_object_size
(&hints, 0))
;
7398 hints.ai_flags = AI_PASSIVE0x00000001;
7399 hints.ai_protocol = IPPROTO_TCP6;
7400 hints.ai_socktype = SOCK_STREAM1;
7401
7402 if (interf->ipv4 && interf->ipv6) {
7403 hints.ai_family = AF_UNSPEC0;
7404 } else if (interf->ipv4) {
7405 hints.ai_family = AF_INET2;
7406 } else if (interf->ipv6) {
7407 hints.ai_family = AF_INET630;
7408 }
7409
7410 snprintf(port_buf, sizeof(port_buf), "%u", (unsigned int)interf->port)__builtin___snprintf_chk (port_buf, sizeof(port_buf), 0, __builtin_object_size
(port_buf, 2 > 1 ? 1 : 0), "%u", (unsigned int)interf->
port)
;
7411
7412 if (interf->host) {
7413 if (strlen(interf->host) > 0 && strcmp(interf->host, "*") != 0) {
7414 host = interf->host;
7415 }
7416 }
7417 error = getaddrinfo(host, port_buf, &hints, &ai);
7418 if (error != 0) {
7419#ifdef WIN32
7420 log_errcode_error(EXTENSION_LOG_WARNING, NULL((void*)0),
7421 "getaddrinfo(): %s", error);
7422#else
7423 if (error != EAI_SYSTEM11) {
7424 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7425 "getaddrinfo(): %s", gai_strerror(error));
7426 } else {
7427 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7428 "getaddrinfo(): %s", strerror(error));
7429 }
7430#endif
7431 return 1;
7432 }
7433
7434 for (next= ai; next; next= next->ai_next) {
7435 struct listening_port *port_instance;
7436 conn *listen_conn_add;
7437 if ((sfd = new_socket(next)) == INVALID_SOCKET-1) {
7438 /* getaddrinfo can return "junk" addresses,
7439 * we make sure at least one works before erroring.
7440 */
7441 continue;
7442 }
7443
7444#ifdef IPV6_V6ONLY27
7445 if (next->ai_family == AF_INET630) {
7446 error = setsockopt(sfd, IPPROTO_IPV641, IPV6_V6ONLY27, (char *) &flags, sizeof(flags));
7447 if (error != 0) {
7448 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7449 "setsockopt(IPV6_V6ONLY): %s",
7450 strerror(errno(*__error())));
7451 safe_close(sfd);
7452 continue;
7453 }
7454 }
7455#endif
7456
7457 setsockopt(sfd, SOL_SOCKET0xffff, SO_REUSEADDR0x0004, (void *)&flags, sizeof(flags));
7458 error = setsockopt(sfd, SOL_SOCKET0xffff, SO_KEEPALIVE0x0008, (void *)&flags, sizeof(flags));
7459 if (error != 0) {
7460 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7461 "setsockopt(SO_KEEPALIVE): %s",
7462 strerror(errno(*__error())));
7463 }
7464
7465 error = setsockopt(sfd, SOL_SOCKET0xffff, SO_LINGER0x0080, (void *)&ling, sizeof(ling));
7466 if (error != 0) {
7467 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7468 "setsockopt(SO_LINGER): %s",
7469 strerror(errno(*__error())));
7470 }
7471
7472 if (interf->tcp_nodelay) {
7473 error = setsockopt(sfd, IPPROTO_TCP6,
7474 TCP_NODELAY0x01, (void *)&flags, sizeof(flags));
7475 if (error != 0) {
7476 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7477 "setsockopt(TCP_NODELAY): %s",
7478 strerror(errno(*__error())));
7479 }
7480 }
7481
7482 if (bind(sfd, next->ai_addr, (socklen_t)next->ai_addrlen) == SOCKET_ERROR-1) {
7483#ifdef WIN32
7484 DWORD error = WSAGetLastError();
7485#else
7486 int error = errno(*__error());
7487#endif
7488 if (!is_addrinuse(error)) {
7489 log_errcode_error(EXTENSION_LOG_WARNING, NULL((void*)0),
7490 "bind(): %s", error);
7491 safe_close(sfd);
7492 freeaddrinfo(ai);
7493 return 1;
7494 }
7495 safe_close(sfd);
7496 continue;
7497 } else {
7498 success++;
7499 if (listen(sfd, interf->backlog) == SOCKET_ERROR-1) {
7500 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7501 "listen(): %s",
7502 strerror(errno(*__error())));
7503 safe_close(sfd);
7504 freeaddrinfo(ai);
7505 return 1;
7506 }
7507 if (portnumber_file != NULL((void*)0) &&
7508 (next->ai_addr->sa_family == AF_INET2 ||
7509 next->ai_addr->sa_family == AF_INET630)) {
7510 union {
7511 struct sockaddr_in in;
7512 struct sockaddr_in6 in6;
7513 } my_sockaddr;
7514 socklen_t len = sizeof(my_sockaddr);
7515 if (getsockname(sfd, (struct sockaddr*)&my_sockaddr, &len)==0) {
7516 if (next->ai_addr->sa_family == AF_INET2) {
7517 fprintf(portnumber_file, "%s INET: %u\n", "TCP",
7518 ntohs(my_sockaddr.in.sin_port)((__uint16_t)(__builtin_constant_p(my_sockaddr.in.sin_port) ?
((__uint16_t)((((__uint16_t)(my_sockaddr.in.sin_port) & 0xff00
) >> 8) | (((__uint16_t)(my_sockaddr.in.sin_port) &
0x00ff) << 8))) : _OSSwapInt16(my_sockaddr.in.sin_port
)))
);
7519 } else {
7520 fprintf(portnumber_file, "%s INET6: %u\n", "TCP",
7521 ntohs(my_sockaddr.in6.sin6_port)((__uint16_t)(__builtin_constant_p(my_sockaddr.in6.sin6_port)
? ((__uint16_t)((((__uint16_t)(my_sockaddr.in6.sin6_port) &
0xff00) >> 8) | (((__uint16_t)(my_sockaddr.in6.sin6_port
) & 0x00ff) << 8))) : _OSSwapInt16(my_sockaddr.in6.
sin6_port)))
);
7522 }
7523 }
7524 }
7525 }
7526
7527 if (!(listen_conn_add = conn_new(sfd, interf->port, conn_listening,
7528 EV_READ0x02 | EV_PERSIST0x10, 1,
7529 main_base, NULL((void*)0)))) {
7530 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7531 "failed to create listening connection\n");
7532 exit(EXIT_FAILURE1);
7533 }
7534 listen_conn_add->next = listen_conn;
7535 listen_conn = listen_conn_add;
7536 STATS_LOCK();
7537 ++stats.curr_conns;
7538 ++stats.daemon_conns;
7539 port_instance = get_listening_port_instance(interf->port);
7540 cb_assert(port_instance)(__builtin_expect(!(port_instance), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7540, "port_instance") : (void)0)
;
7541 ++port_instance->curr_conns;
7542 STATS_UNLOCK();
7543 }
7544
7545 freeaddrinfo(ai);
7546
7547 /* Return zero iff we detected no errors in starting up connections */
7548 return success == 0;
7549}
7550
7551static int server_sockets(FILE *portnumber_file) {
7552 int ret = 0;
7553 int ii = 0;
7554
7555 for (ii = 0; ii < settings.num_interfaces; ++ii) {
7556 stats.listening_ports[ii].port = settings.interfaces[ii].port;
7557 stats.listening_ports[ii].maxconns = settings.interfaces[ii].maxconn;
7558 ret |= server_socket(settings.interfaces + ii, portnumber_file);
7559 }
7560
7561 return ret;
7562}
7563
7564static struct event clockevent;
7565
7566/* time-sensitive callers can call it by hand with this, outside the normal ever-1-second timer */
7567static void set_current_time(void) {
7568 struct timeval timer;
7569
7570 gettimeofday(&timer, NULL((void*)0));
7571 current_time = (rel_time_t) (timer.tv_sec - process_started);
7572}
7573
7574static void clock_handler(evutil_socket_tint fd, short which, void *arg) {
7575 struct timeval t;
7576 static bool_Bool initialized = false0;
7577
7578 t.tv_sec = 1;
7579 t.tv_usec = 0;
7580
7581 if (memcached_shutdown) {
7582 event_base_loopbreak(main_base);
7583 return ;
7584 }
7585
7586 if (initialized) {
7587 /* only delete the event if it's actually there. */
7588 evtimer_del(&clockevent)event_del(&clockevent);
7589 } else {
7590 initialized = true1;
7591 }
7592
7593 evtimer_set(&clockevent, clock_handler, 0)event_set((&clockevent), -1, 0, (clock_handler), (0));
7594 event_base_set(main_base, &clockevent);
7595 evtimer_add(&clockevent, &t)event_add((&clockevent), (&t));
7596
7597 set_current_time();
7598}
7599
7600static void usage(void) {
7601 printf("memcached %s\n", get_server_version());
7602 printf("-C file Read configuration from file\n");
7603 printf("-h print this help and exit\n");
7604 printf("\nEnvironment variables:\n");
7605 printf("MEMCACHED_PORT_FILENAME File to write port information to\n");
7606 printf("MEMCACHED_REQS_TAP_EVENT Similar to -R but for tap_ship_log\n");
7607}
7608
7609#ifndef WIN32
7610static void save_pid(const char *pid_file) {
7611 FILE *fp;
7612
7613 if (access(pid_file, F_OK0) == 0) {
7614 if ((fp = fopen(pid_file, "r")) != NULL((void*)0)) {
7615 char buffer[1024];
7616 if (fgets(buffer, sizeof(buffer), fp) != NULL((void*)0)) {
7617 unsigned int pid;
7618 if (safe_strtoul(buffer, &pid) && kill((pid_t)pid, 0) == 0) {
7619 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7620 "WARNING: The pid file contained the following (running) pid: %u\n", pid);
7621 }
7622 }
7623 fclose(fp);
7624 }
7625 }
7626
7627 if ((fp = fopen(pid_file, "w")) == NULL((void*)0)) {
7628 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7629 "Could not open the pid file %s for writing: %s\n",
7630 pid_file, strerror(errno(*__error())));
7631 return;
7632 }
7633
7634 fprintf(fp,"%ld\n", (long)getpid());
7635 if (fclose(fp) == -1) {
7636 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7637 "Could not close the pid file %s: %s\n",
7638 pid_file, strerror(errno(*__error())));
7639 }
7640}
7641
7642static void remove_pidfile(const char *pid_file) {
7643 if (pid_file != NULL((void*)0)) {
7644 if (unlink(pid_file) != 0) {
7645 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7646 "Could not remove the pid file %s: %s\n",
7647 pid_file, strerror(errno(*__error())));
7648 }
7649 }
7650}
7651#endif
7652
7653#ifndef WIN32
7654
7655#ifndef HAVE_SIGIGNORE1
7656static int sigignore(int sig) {
7657 struct sigaction sa;
7658 memset(&sa, 0, sizeof(sa))__builtin___memset_chk (&sa, 0, sizeof(sa), __builtin_object_size
(&sa, 0))
;
7659 sa.sa_handler__sigaction_u.__sa_handler = SIG_IGN(void (*)(int))1;
7660
7661 if (sigemptyset(&sa.sa_mask)(*(&sa.sa_mask) = 0, 0) == -1 || sigaction(sig, &sa, 0) == -1) {
7662 return -1;
7663 }
7664 return 0;
7665}
7666#endif /* !HAVE_SIGIGNORE */
7667
7668static void sigterm_handler(int sig) {
7669 cb_assert(sig == SIGTERM || sig == SIGINT)(__builtin_expect(!(sig == 15 || sig == 2), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7669, "sig == 15 || sig == 2") : (void)0)
;
7670 memcached_shutdown = 1;
7671}
7672#endif
7673
7674static int install_sigterm_handler(void) {
7675#ifndef WIN32
7676 struct sigaction sa;
7677 memset(&sa, 0, sizeof(sa))__builtin___memset_chk (&sa, 0, sizeof(sa), __builtin_object_size
(&sa, 0))
;
7678 sa.sa_handler__sigaction_u.__sa_handler = sigterm_handler;
7679
7680 if (sigemptyset(&sa.sa_mask)(*(&sa.sa_mask) = 0, 0) == -1 || sigaction(SIGTERM15, &sa, 0) == -1 ||
7681 sigaction(SIGINT2, &sa, 0) == -1) {
7682 return -1;
7683 }
7684#endif
7685
7686 return 0;
7687}
7688
7689static const char* get_server_version(void) {
7690 if (strlen(PRODUCT_VERSION"") == 0) {
7691 return "unknown";
7692 } else {
7693 return PRODUCT_VERSION"";
7694 }
7695}
7696
7697static void store_engine_specific(const void *cookie,
7698 void *engine_data) {
7699 conn *c = (conn*)cookie;
7700 c->engine_storage = engine_data;
7701}
7702
7703static void *get_engine_specific(const void *cookie) {
7704 conn *c = (conn*)cookie;
7705 return c->engine_storage;
7706}
7707
7708static bool_Bool is_datatype_supported(const void *cookie) {
7709 conn *c = (conn*)cookie;
7710 return c->supports_datatype;
7711}
7712
7713static uint8_t get_opcode_if_ewouldblock_set(const void *cookie) {
7714 conn *c = (conn*)cookie;
7715 uint8_t opcode = PROTOCOL_BINARY_CMD_INVALID;
7716 if (c->ewouldblock) {
7717 opcode = c->binary_header.request.opcode;
7718 }
7719 return opcode;
7720}
7721
7722static bool_Bool validate_session_cas(const uint64_t cas) {
7723 bool_Bool ret = true1;
7724 cb_mutex_enter(&(session_cas.mutex));
7725 if (cas != 0) {
7726 if (session_cas.value != cas) {
7727 ret = false0;
7728 } else {
7729 session_cas.ctr++;
7730 }
7731 } else {
7732 session_cas.ctr++;
7733 }
7734 cb_mutex_exit(&(session_cas.mutex));
7735 return ret;
7736}
7737
7738static void decrement_session_ctr() {
7739 cb_mutex_enter(&(session_cas.mutex));
7740 cb_assert(session_cas.ctr != 0)(__builtin_expect(!(session_cas.ctr != 0), 0) ? __assert_rtn(
__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7740, "session_cas.ctr != 0") : (void)0)
;
7741 session_cas.ctr--;
7742 cb_mutex_exit(&(session_cas.mutex));
7743}
7744
7745static SOCKETint get_socket_fd(const void *cookie) {
7746 conn *c = (conn *)cookie;
7747 return c->sfd;
7748}
7749
7750static ENGINE_ERROR_CODE reserve_cookie(const void *cookie) {
7751 conn *c = (conn *)cookie;
7752 ++c->refcount;
7753 return ENGINE_SUCCESS;
7754}
7755
7756static ENGINE_ERROR_CODE release_cookie(const void *cookie) {
7757 conn *c = (conn *)cookie;
7758 int notify;
7759 LIBEVENT_THREAD *thr;
7760
7761 cb_assert(c)(__builtin_expect(!(c), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7761, "c") : (void)0)
;
7762 thr = c->thread;
7763 cb_assert(thr)(__builtin_expect(!(thr), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7763, "thr") : (void)0)
;
7764 LOCK_THREAD(thr)cb_mutex_enter(&thr->mutex); (__builtin_expect(!(thr->
is_locked == 0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7764, "thr->is_locked == 0") : (void)0); thr->is_locked
= 1;
;
7765 --c->refcount;
7766
7767 /* Releasing the refererence to the object may cause it to change
7768 * state. (NOTE: the release call shall never be called from the
7769 * worker threads), so should put the connection in the pool of
7770 * pending IO and have the system retry the operation for the
7771 * connection
7772 */
7773 notify = add_conn_to_pending_io_list(c);
7774 UNLOCK_THREAD(thr)(__builtin_expect(!(thr->is_locked == 1), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7774, "thr->is_locked == 1") : (void)0); thr->is_locked
= 0; cb_mutex_exit(&thr->mutex);
;
7775
7776 /* kick the thread in the butt */
7777 if (notify) {
7778 notify_thread(thr);
7779 }
7780
7781 return ENGINE_SUCCESS;
7782}
7783
7784static int num_independent_stats(void) {
7785 return settings.num_threads + 1;
7786}
7787
7788static void *new_independent_stats(void) {
7789 int nrecords = num_independent_stats();
7790 struct thread_stats *ts = calloc(nrecords, sizeof(struct thread_stats));
7791 int ii;
7792 for (ii = 0; ii < nrecords; ii++) {
7793 cb_mutex_initialize(&ts[ii].mutex);
7794 }
7795 return ts;
7796}
7797
7798static void release_independent_stats(void *stats) {
7799 int nrecords = num_independent_stats();
7800 struct thread_stats *ts = stats;
7801 int ii;
7802 for (ii = 0; ii < nrecords; ii++) {
7803 cb_mutex_destroy(&ts[ii].mutex);
7804 }
7805 free(ts);
7806}
7807
7808static struct thread_stats* get_independent_stats(conn *c) {
7809 struct thread_stats *independent_stats;
7810 if (settings.engine.v1->get_stats_struct != NULL((void*)0)) {
7811 independent_stats = settings.engine.v1->get_stats_struct(settings.engine.v0, (const void *)c);
7812 if (independent_stats == NULL((void*)0)) {
7813 independent_stats = default_independent_stats;
7814 }
7815 } else {
7816 independent_stats = default_independent_stats;
7817 }
7818 return independent_stats;
7819}
7820
7821static struct thread_stats *get_thread_stats(conn *c) {
7822 struct thread_stats *independent_stats;
7823 cb_assert(c->thread->index < num_independent_stats())(__builtin_expect(!(c->thread->index < num_independent_stats
()), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7823, "c->thread->index < num_independent_stats()"
) : (void)0)
;
7824 independent_stats = get_independent_stats(c);
7825 return &independent_stats[c->thread->index];
7826}
7827
7828static void register_callback(ENGINE_HANDLE *eh,
7829 ENGINE_EVENT_TYPE type,
7830 EVENT_CALLBACK cb, const void *cb_data) {
7831 struct engine_event_handler *h =
7832 calloc(sizeof(struct engine_event_handler), 1);
7833
7834 cb_assert(h)(__builtin_expect(!(h), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7834, "h") : (void)0)
;
7835 h->cb = cb;
7836 h->cb_data = cb_data;
7837 h->next = engine_event_handlers[type];
7838 engine_event_handlers[type] = h;
7839}
7840
7841static rel_time_t get_current_time(void)
7842{
7843 return current_time;
7844}
7845
7846static void count_eviction(const void *cookie, const void *key, const int nkey) {
7847 (void)cookie;
7848 (void)key;
7849 (void)nkey;
7850}
7851
7852/**
7853 * To make it easy for engine implementors that doesn't want to care about
7854 * writing their own incr/decr code, they can just set the arithmetic function
7855 * to NULL and use this implementation. It is not efficient, due to the fact
7856 * that it does multiple calls through the interface (get and then cas store).
7857 * If you don't care, feel free to use it..
7858 */
7859static ENGINE_ERROR_CODE internal_arithmetic(ENGINE_HANDLE* handle,
7860 const void* cookie,
7861 const void* key,
7862 const int nkey,
7863 const bool_Bool increment,
7864 const bool_Bool create,
7865 const uint64_t delta,
7866 const uint64_t initial,
7867 const rel_time_t exptime,
7868 uint64_t *cas,
7869 uint8_t datatype,
7870 uint64_t *result,
7871 uint16_t vbucket)
7872{
7873 ENGINE_HANDLE_V1 *e = (ENGINE_HANDLE_V1*)handle;
7874 item *it = NULL((void*)0);
7875 ENGINE_ERROR_CODE ret;
7876
7877 ret = e->get(handle, cookie, &it, key, nkey, vbucket);
7878
7879 if (ret == ENGINE_SUCCESS) {
7880 size_t nb;
7881 item *nit;
7882 char value[80];
7883 uint64_t val;
7884 item_info_holder info;
7885 item_info_holder i2;
7886 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
7887 memset(&i2, 0, sizeof(i2))__builtin___memset_chk (&i2, 0, sizeof(i2), __builtin_object_size
(&i2, 0))
;
7888
7889 info.info.nvalue = 1;
7890
7891 if (!e->get_item_info(handle, cookie, it, (void*)&info)) {
7892 e->release(handle, cookie, it);
7893 return ENGINE_FAILED;
7894 }
7895
7896 if (info.info.value[0].iov_len > (sizeof(value) - 1)) {
7897 e->release(handle, cookie, it);
7898 return ENGINE_EINVAL;
7899 }
7900
7901 memcpy(value, info.info.value[0].iov_base, info.info.value[0].iov_len)__builtin___memcpy_chk (value, info.info.value[0].iov_base, info
.info.value[0].iov_len, __builtin_object_size (value, 0))
;
7902 value[info.info.value[0].iov_len] = '\0';
7903
7904 if (!safe_strtoull(value, &val)) {
7905 e->release(handle, cookie, it);
7906 return ENGINE_EINVAL;
7907 }
7908
7909 if (increment) {
7910 val += delta;
7911 } else {
7912 if (delta > val) {
7913 val = 0;
7914 } else {
7915 val -= delta;
7916 }
7917 }
7918
7919 nb = snprintf(value, sizeof(value), "%"PRIu64, val)__builtin___snprintf_chk (value, sizeof(value), 0, __builtin_object_size
(value, 2 > 1 ? 1 : 0), "%""ll" "u", val)
;
7920 *result = val;
7921 nit = NULL((void*)0);
7922 if (e->allocate(handle, cookie, &nit, key,
7923 nkey, nb, info.info.flags, info.info.exptime,
7924 datatype) != ENGINE_SUCCESS) {
7925 e->release(handle, cookie, it);
7926 return ENGINE_ENOMEM;
7927 }
7928
7929 i2.info.nvalue = 1;
7930 if (!e->get_item_info(handle, cookie, nit, (void*)&i2)) {
7931 e->release(handle, cookie, it);
7932 e->release(handle, cookie, nit);
7933 return ENGINE_FAILED;
7934 }
7935
7936 memcpy(i2.info.value[0].iov_base, value, nb)__builtin___memcpy_chk (i2.info.value[0].iov_base, value, nb,
__builtin_object_size (i2.info.value[0].iov_base, 0))
;
7937 e->item_set_cas(handle, cookie, nit, info.info.cas);
7938 ret = e->store(handle, cookie, nit, cas, OPERATION_CAS, vbucket);
7939 e->release(handle, cookie, it);
7940 e->release(handle, cookie, nit);
7941 } else if (ret == ENGINE_KEY_ENOENT && create) {
7942 char value[80];
7943 size_t nb = snprintf(value, sizeof(value), "%"PRIu64"\r\n", initial)__builtin___snprintf_chk (value, sizeof(value), 0, __builtin_object_size
(value, 2 > 1 ? 1 : 0), "%""ll" "u""\r\n", initial)
;
7944 item_info_holder info;
7945 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
7946 info.info.nvalue = 1;
7947
7948 *result = initial;
7949 if (e->allocate(handle, cookie, &it, key, nkey, nb, 0, exptime,
7950 datatype) != ENGINE_SUCCESS) {
7951 e->release(handle, cookie, it);
7952 return ENGINE_ENOMEM;
7953 }
7954
7955 if (!e->get_item_info(handle, cookie, it, (void*)&info)) {
7956 e->release(handle, cookie, it);
7957 return ENGINE_FAILED;
7958 }
7959
7960 memcpy(info.info.value[0].iov_base, value, nb)__builtin___memcpy_chk (info.info.value[0].iov_base, value, nb
, __builtin_object_size (info.info.value[0].iov_base, 0))
;
7961 ret = e->store(handle, cookie, it, cas, OPERATION_CAS, vbucket);
7962 e->release(handle, cookie, it);
7963 }
7964
7965 /* We had a race condition.. just call ourself recursively to retry */
7966 if (ret == ENGINE_KEY_EEXISTS) {
7967 return internal_arithmetic(handle, cookie, key, nkey, increment, create, delta,
7968 initial, exptime, cas, datatype, result, vbucket);
7969 }
7970
7971 return ret;
7972}
7973
7974/**
7975 * Register an extension if it's not already registered
7976 *
7977 * @param type the type of the extension to register
7978 * @param extension the extension to register
7979 * @return true if success, false otherwise
7980 */
7981static bool_Bool register_extension(extension_type_t type, void *extension)
7982{
7983 if (extension == NULL((void*)0)) {
7984 return false0;
7985 }
7986
7987 switch (type) {
7988 case EXTENSION_DAEMON:
7989 {
7990 EXTENSION_DAEMON_DESCRIPTOR *ptr;
7991 for (ptr = settings.extensions.daemons; ptr != NULL((void*)0); ptr = ptr->next) {
7992 if (ptr == extension) {
7993 return false0;
7994 }
7995 }
7996 ((EXTENSION_DAEMON_DESCRIPTOR *)(extension))->next = settings.extensions.daemons;
7997 settings.extensions.daemons = extension;
7998 }
7999 return true1;
8000 case EXTENSION_LOGGER:
8001 settings.extensions.logger = extension;
8002 return true1;
8003
8004 case EXTENSION_BINARY_PROTOCOL:
8005 if (settings.extensions.binary != NULL((void*)0)) {
8006 EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *last;
8007 for (last = settings.extensions.binary; last->next != NULL((void*)0);
8008 last = last->next) {
8009 if (last == extension) {
8010 return false0;
8011 }
8012 }
8013 if (last == extension) {
8014 return false0;
8015 }
8016 last->next = extension;
8017 last->next->next = NULL((void*)0);
8018 } else {
8019 settings.extensions.binary = extension;
8020 settings.extensions.binary->next = NULL((void*)0);
8021 }
8022
8023 ((EXTENSION_BINARY_PROTOCOL_DESCRIPTOR*)extension)->setup(setup_binary_lookup_cmd);
8024 return true1;
8025
8026 default:
8027 return false0;
8028 }
8029}
8030
8031/**
8032 * Unregister an extension
8033 *
8034 * @param type the type of the extension to remove
8035 * @param extension the extension to remove
8036 */
8037static void unregister_extension(extension_type_t type, void *extension)
8038{
8039 switch (type) {
8040 case EXTENSION_DAEMON:
8041 {
8042 EXTENSION_DAEMON_DESCRIPTOR *prev = NULL((void*)0);
8043 EXTENSION_DAEMON_DESCRIPTOR *ptr = settings.extensions.daemons;
8044
8045 while (ptr != NULL((void*)0) && ptr != extension) {
8046 prev = ptr;
8047 ptr = ptr->next;
8048 }
8049
8050 if (ptr != NULL((void*)0) && prev != NULL((void*)0)) {
8051 prev->next = ptr->next;
8052 }
8053
8054 if (settings.extensions.daemons == ptr) {
8055 settings.extensions.daemons = ptr->next;
8056 }
8057 }
8058 break;
8059 case EXTENSION_LOGGER:
8060 if (settings.extensions.logger == extension) {
8061 if (get_stderr_logger() == extension) {
8062 settings.extensions.logger = get_null_logger();
8063 } else {
8064 settings.extensions.logger = get_stderr_logger();
8065 }
8066 }
8067 break;
8068 case EXTENSION_BINARY_PROTOCOL:
8069 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8070 "You can't unregister a binary command handler!");
8071 abort();
8072 break;
8073
8074 default:
8075 ;
8076 }
8077
8078}
8079
8080/**
8081 * Get the named extension
8082 */
8083static void* get_extension(extension_type_t type)
8084{
8085 switch (type) {
8086 case EXTENSION_DAEMON:
8087 return settings.extensions.daemons;
8088
8089 case EXTENSION_LOGGER:
8090 return settings.extensions.logger;
8091
8092 case EXTENSION_BINARY_PROTOCOL:
8093 return settings.extensions.binary;
8094
8095 default:
8096 return NULL((void*)0);
8097 }
8098}
8099
8100static void shutdown_server(void) {
8101 memcached_shutdown = 1;
8102}
8103
8104static EXTENSION_LOGGER_DESCRIPTOR* get_logger(void)
8105{
8106 return settings.extensions.logger;
8107}
8108
8109static EXTENSION_LOG_LEVEL get_log_level(void)
8110{
8111 EXTENSION_LOG_LEVEL ret;
8112 switch (settings.verbose) {
8113 case 0: ret = EXTENSION_LOG_WARNING; break;
8114 case 1: ret = EXTENSION_LOG_INFO; break;
8115 case 2: ret = EXTENSION_LOG_DEBUG; break;
8116 default:
8117 ret = EXTENSION_LOG_DETAIL;
8118 }
8119 return ret;
8120}
8121
8122static void set_log_level(EXTENSION_LOG_LEVEL severity)
8123{
8124 switch (severity) {
8125 case EXTENSION_LOG_WARNING: settings.verbose = 0; break;
8126 case EXTENSION_LOG_INFO: settings.verbose = 1; break;
8127 case EXTENSION_LOG_DEBUG: settings.verbose = 2; break;
8128 default:
8129 settings.verbose = 3;
8130 }
8131}
8132
8133static void get_config_append_stats(const char *key, const uint16_t klen,
8134 const char *val, const uint32_t vlen,
8135 const void *cookie)
8136{
8137 char *pos;
8138 size_t nbytes;
8139
8140 if (klen == 0 || vlen == 0) {
8141 return ;
8142 }
8143
8144 pos = (char*)cookie;
8145 nbytes = strlen(pos);
8146
8147 if ((nbytes + klen + vlen + 3) > 1024) {
8148 /* Not enough size in the buffer.. */
8149 return;
8150 }
8151
8152 memcpy(pos + nbytes, key, klen)__builtin___memcpy_chk (pos + nbytes, key, klen, __builtin_object_size
(pos + nbytes, 0))
;
8153 nbytes += klen;
8154 pos[nbytes] = '=';
8155 ++nbytes;
8156 memcpy(pos + nbytes, val, vlen)__builtin___memcpy_chk (pos + nbytes, val, vlen, __builtin_object_size
(pos + nbytes, 0))
;
8157 nbytes += vlen;
8158 memcpy(pos + nbytes, ";", 2)__builtin___memcpy_chk (pos + nbytes, ";", 2, __builtin_object_size
(pos + nbytes, 0))
;
8159}
8160
8161static bool_Bool get_config(struct config_item items[]) {
8162 char config[1024];
8163 int rval;
8164
8165 config[0] = '\0';
8166 process_stat_settings(get_config_append_stats, config);
8167 rval = parse_config(config, items, NULL((void*)0));
8168 return rval >= 0;
8169}
8170
8171/**
8172 * Callback the engines may call to get the public server interface
8173 * @return pointer to a structure containing the interface. The client should
8174 * know the layout and perform the proper casts.
8175 */
8176static SERVER_HANDLE_V1 *get_server_api(void)
8177{
8178 static int init;
8179 static SERVER_CORE_API core_api;
8180 static SERVER_COOKIE_API server_cookie_api;
8181 static SERVER_STAT_API server_stat_api;
8182 static SERVER_LOG_API server_log_api;
8183 static SERVER_EXTENSION_API extension_api;
8184 static SERVER_CALLBACK_API callback_api;
8185 static ALLOCATOR_HOOKS_API hooks_api;
8186 static SERVER_HANDLE_V1 rv;
8187
8188 if (!init) {
8189 init = 1;
8190 core_api.server_version = get_server_version;
8191 core_api.hash = hash;
8192 core_api.realtime = realtime;
8193 core_api.abstime = abstime;
8194 core_api.get_current_time = get_current_time;
8195 core_api.parse_config = parse_config;
8196 core_api.shutdown = shutdown_server;
8197 core_api.get_config = get_config;
8198
8199 server_cookie_api.get_auth_data = get_auth_data;
8200 server_cookie_api.store_engine_specific = store_engine_specific;
8201 server_cookie_api.get_engine_specific = get_engine_specific;
8202 server_cookie_api.is_datatype_supported = is_datatype_supported;
8203 server_cookie_api.get_opcode_if_ewouldblock_set = get_opcode_if_ewouldblock_set;
8204 server_cookie_api.validate_session_cas = validate_session_cas;
8205 server_cookie_api.decrement_session_ctr = decrement_session_ctr;
8206 server_cookie_api.get_socket_fd = get_socket_fd;
8207 server_cookie_api.notify_io_complete = notify_io_complete;
8208 server_cookie_api.reserve = reserve_cookie;
8209 server_cookie_api.release = release_cookie;
8210
8211 server_stat_api.new_stats = new_independent_stats;
8212 server_stat_api.release_stats = release_independent_stats;
8213 server_stat_api.evicting = count_eviction;
8214
8215 server_log_api.get_logger = get_logger;
8216 server_log_api.get_level = get_log_level;
8217 server_log_api.set_level = set_log_level;
8218
8219 extension_api.register_extension = register_extension;
8220 extension_api.unregister_extension = unregister_extension;
8221 extension_api.get_extension = get_extension;
8222
8223 callback_api.register_callback = register_callback;
8224 callback_api.perform_callbacks = perform_callbacks;
8225
8226 hooks_api.add_new_hook = mc_add_new_hook;
8227 hooks_api.remove_new_hook = mc_remove_new_hook;
8228 hooks_api.add_delete_hook = mc_add_delete_hook;
8229 hooks_api.remove_delete_hook = mc_remove_delete_hook;
8230 hooks_api.get_extra_stats_size = mc_get_extra_stats_size;
8231 hooks_api.get_allocator_stats = mc_get_allocator_stats;
8232 hooks_api.get_allocation_size = mc_get_allocation_size;
8233 hooks_api.get_detailed_stats = mc_get_detailed_stats;
8234
8235 rv.interface = 1;
8236 rv.core = &core_api;
8237 rv.stat = &server_stat_api;
8238 rv.extension = &extension_api;
8239 rv.callback = &callback_api;
8240 rv.log = &server_log_api;
8241 rv.cookie = &server_cookie_api;
8242 rv.alloc_hooks = &hooks_api;
8243 }
8244
8245 if (rv.engine == NULL((void*)0)) {
8246 rv.engine = settings.engine.v0;
8247 }
8248
8249 return &rv;
8250}
8251
8252static void process_bin_upr_response(conn *c) {
8253 char *packet;
8254 ENGINE_ERROR_CODE ret = ENGINE_DISCONNECT;
8255
8256 c->supports_datatype = true1;
8257 packet = (c->rcurr - (c->binary_header.request.bodylen + sizeof(c->binary_header)));
8258 if (settings.engine.v1->upr.response_handler != NULL((void*)0)) {
8259 ret = settings.engine.v1->upr.response_handler(settings.engine.v0, c,
8260 (void*)packet);
8261 }
8262
8263 if (ret == ENGINE_DISCONNECT) {
8264 conn_set_state(c, conn_closing);
8265 } else {
8266 conn_set_state(c, conn_ship_log);
8267 }
8268}
8269
8270
8271static void initialize_binary_lookup_map(void) {
8272 int ii;
8273 for (ii = 0; ii < 0x100; ++ii) {
8274 request_handlers[ii].descriptor = NULL((void*)0);
8275 request_handlers[ii].callback = default_unknown_command;
8276 }
8277
8278 response_handlers[PROTOCOL_BINARY_CMD_NOOP] = process_bin_noop_response;
8279 response_handlers[PROTOCOL_BINARY_CMD_TAP_MUTATION] = process_bin_tap_ack;
8280 response_handlers[PROTOCOL_BINARY_CMD_TAP_DELETE] = process_bin_tap_ack;
8281 response_handlers[PROTOCOL_BINARY_CMD_TAP_FLUSH] = process_bin_tap_ack;
8282 response_handlers[PROTOCOL_BINARY_CMD_TAP_OPAQUE] = process_bin_tap_ack;
8283 response_handlers[PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET] = process_bin_tap_ack;
8284 response_handlers[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START] = process_bin_tap_ack;
8285 response_handlers[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END] = process_bin_tap_ack;
8286
8287 response_handlers[PROTOCOL_BINARY_CMD_UPR_OPEN] = process_bin_upr_response;
8288 response_handlers[PROTOCOL_BINARY_CMD_UPR_ADD_STREAM] = process_bin_upr_response;
8289 response_handlers[PROTOCOL_BINARY_CMD_UPR_CLOSE_STREAM] = process_bin_upr_response;
8290 response_handlers[PROTOCOL_BINARY_CMD_UPR_STREAM_REQ] = process_bin_upr_response;
8291 response_handlers[PROTOCOL_BINARY_CMD_UPR_GET_FAILOVER_LOG] = process_bin_upr_response;
8292 response_handlers[PROTOCOL_BINARY_CMD_UPR_STREAM_END] = process_bin_upr_response;
8293 response_handlers[PROTOCOL_BINARY_CMD_UPR_SNAPSHOT_MARKER] = process_bin_upr_response;
8294 response_handlers[PROTOCOL_BINARY_CMD_UPR_MUTATION] = process_bin_upr_response;
8295 response_handlers[PROTOCOL_BINARY_CMD_UPR_DELETION] = process_bin_upr_response;
8296 response_handlers[PROTOCOL_BINARY_CMD_UPR_EXPIRATION] = process_bin_upr_response;
8297 response_handlers[PROTOCOL_BINARY_CMD_UPR_FLUSH] = process_bin_upr_response;
8298 response_handlers[PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE] = process_bin_upr_response;
8299 response_handlers[PROTOCOL_BINARY_CMD_UPR_NOOP] = process_bin_upr_response;
8300 response_handlers[PROTOCOL_BINARY_CMD_UPR_BUFFER_ACKNOWLEDGEMENT] = process_bin_upr_response;
8301 response_handlers[PROTOCOL_BINARY_CMD_UPR_CONTROL] = process_bin_upr_response;
8302 response_handlers[PROTOCOL_BINARY_CMD_UPR_RESERVED4] = process_bin_upr_response;
8303}
8304
8305/**
8306 * Load a shared object and initialize all the extensions in there.
8307 *
8308 * @param soname the name of the shared object (may not be NULL)
8309 * @param config optional configuration parameters
8310 * @return true if success, false otherwise
8311 */
8312bool_Bool load_extension(const char *soname, const char *config) {
8313 cb_dlhandle_t handle;
8314 void *symbol;
8315 EXTENSION_ERROR_CODE error;
8316 union my_hack {
8317 MEMCACHED_EXTENSIONS_INITIALIZE initialize;
8318 void* voidptr;
8319 } funky;
8320 char *error_msg;
8321
8322 if (soname == NULL((void*)0)) {
8323 return false0;
8324 }
8325
8326 handle = cb_dlopen(soname, &error_msg);
8327 if (handle == NULL((void*)0)) {
8328 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8329 "Failed to open library \"%s\": %s\n",
8330 soname, error_msg);
8331 free(error_msg);
8332 return false0;
8333 }
8334
8335 symbol = cb_dlsym(handle, "memcached_extensions_initialize", &error_msg);
8336 if (symbol == NULL((void*)0)) {
8337 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8338 "Could not find symbol \"memcached_extensions_initialize\" in %s: %s\n",
8339 soname, error_msg);
8340 free(error_msg);
8341 return false0;
8342 }
8343 funky.voidptr = symbol;
8344
8345 error = (*funky.initialize)(config, get_server_api);
8346 if (error != EXTENSION_SUCCESS) {
8347 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8348 "Failed to initalize extensions from %s. Error code: %d\n",
8349 soname, error);
8350 cb_dlclose(handle);
8351 return false0;
8352 }
8353
8354 if (settings.verbose > 0) {
8355 settings.extensions.logger->log(EXTENSION_LOG_INFO, NULL((void*)0),
8356 "Loaded extensions from: %s\n", soname);
8357 }
8358
8359 return true1;
8360}
8361
8362/**
8363 * Do basic sanity check of the runtime environment
8364 * @return true if no errors found, false if we can't use this env
8365 */
8366static bool_Bool sanitycheck(void) {
8367 /* One of our biggest problems is old and bogus libevents */
8368 const char *ever = event_get_version();
8369 if (ever != NULL((void*)0)) {
8370 if (strncmp(ever, "1.", 2) == 0) {
8371 /* Require at least 1.3 (that's still a couple of years old) */
8372 if ((ever[2] == '1' || ever[2] == '2') && !isdigit(ever[3])) {
8373 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8374 "You are using libevent %s.\nPlease upgrade to"
8375 " a more recent version (1.3 or newer)\n",
8376 event_get_version());
8377 return false0;
8378 }
8379 }
8380 }
8381
8382 return true1;
8383}
8384
8385/**
8386 * Log a socket error message.
8387 *
8388 * @param severity the severity to put in the log
8389 * @param cookie cookie representing the client
8390 * @param prefix What to put as a prefix (MUST INCLUDE
8391 * the %s for where the string should go)
8392 */
8393void log_socket_error(EXTENSION_LOG_LEVEL severity,
8394 const void* cookie,
8395 const char* prefix)
8396{
8397#ifdef WIN32
8398 log_errcode_error(severity, cookie, prefix,
8399 WSAGetLastError());
8400#else
8401 log_errcode_error(severity, cookie, prefix, errno(*__error()));
8402#endif
8403}
8404
8405/**
8406 * Log a system error message.
8407 *
8408 * @param severity the severity to put in the log
8409 * @param cookie cookie representing the client
8410 * @param prefix What to put as a prefix (MUST INCLUDE
8411 * the %s for where the string should go)
8412 */
8413void log_system_error(EXTENSION_LOG_LEVEL severity,
8414 const void* cookie,
8415 const char* prefix)
8416{
8417#ifdef WIN32
8418 log_errcode_error(severity, cookie, prefix,
8419 GetLastError());
8420#else
8421 log_errcode_error(severity, cookie, prefix, errno(*__error()));
8422#endif
8423}
8424
8425#ifdef WIN32
8426void log_errcode_error(EXTENSION_LOG_LEVEL severity,
8427 const void* cookie,
8428 const char* prefix, DWORD err) {
8429 LPVOID error_msg;
8430
8431 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
8432 FORMAT_MESSAGE_FROM_SYSTEM |
8433 FORMAT_MESSAGE_IGNORE_INSERTS,
8434 NULL((void*)0), err, 0,
8435 (LPTSTR)&error_msg, 0, NULL((void*)0)) != 0) {
8436 settings.extensions.logger->log(severity, cookie,
8437 prefix, error_msg);
8438 LocalFree(error_msg);
8439 } else {
8440 settings.extensions.logger->log(severity, cookie,
8441 prefix, "unknown error");
8442 }
8443}
8444#else
8445void log_errcode_error(EXTENSION_LOG_LEVEL severity,
8446 const void* cookie,
8447 const char* prefix, int err) {
8448 settings.extensions.logger->log(severity,
8449 cookie,
8450 prefix,
8451 strerror(err));
8452}
8453#endif
8454
8455#ifdef WIN32
8456static void parent_monitor_thread(void *arg) {
8457 HANDLE parent = arg;
8458 WaitForSingleObject(parent, INFINITE);
8459 ExitProcess(EXIT_FAILURE1);
8460}
8461
8462static void setup_parent_monitor(void) {
8463 char *env = getenv("MEMCACHED_PARENT_MONITOR");
8464 if (env != NULL((void*)0)) {
8465 HANDLE handle = OpenProcess(SYNCHRONIZE, FALSE, atoi(env));
8466 if (handle == INVALID_HANDLE_VALUE) {
8467 log_system_error(EXTENSION_LOG_WARNING, NULL((void*)0),
8468 "Failed to open parent process: %s");
8469 exit(EXIT_FAILURE1);
8470 }
8471 cb_create_thread(NULL((void*)0), parent_monitor_thread, handle, 1);
8472 }
8473}
8474
8475static void set_max_filehandles(void) {
8476 /* EMPTY */
8477}
8478
8479#else
8480static void parent_monitor_thread(void *arg) {
8481 pid_t pid = atoi(arg);
8482 while (true1) {
8483 sleep(1);
8484 if (kill(pid, 0) == -1 && errno(*__error()) == ESRCH3) {
8485 _exit(1);
8486 }
8487 }
8488}
8489
8490static void setup_parent_monitor(void) {
8491 char *env = getenv("MEMCACHED_PARENT_MONITOR");
8492 if (env != NULL((void*)0)) {
8493 cb_thread_t t;
8494 if (cb_create_thread(&t, parent_monitor_thread, env, 1) != 0) {
8495 log_system_error(EXTENSION_LOG_WARNING, NULL((void*)0),
8496 "Failed to open parent process: %s");
8497 exit(EXIT_FAILURE1);
8498 }
8499 }
8500}
8501
8502static void set_max_filehandles(void) {
8503 struct rlimit rlim;
8504
8505 if (getrlimit(RLIMIT_NOFILE8, &rlim) != 0) {
8506 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8507 "failed to getrlimit number of files\n");
8508 exit(EX_OSERR71);
8509 } else {
8510 int maxfiles = settings.maxconns + (3 * (settings.num_threads + 2));
8511 int syslimit = rlim.rlim_cur;
8512 if (rlim.rlim_cur < maxfiles) {
8513 rlim.rlim_cur = maxfiles;
8514 }
8515 if (rlim.rlim_max < rlim.rlim_cur) {
8516 rlim.rlim_max = rlim.rlim_cur;
8517 }
8518 if (setrlimit(RLIMIT_NOFILE8, &rlim) != 0) {
8519 const char *fmt;
8520 int req;
8521 fmt = "WARNING: maxconns cannot be set to (%d) connections due to "
8522 "system\nresouce restrictions. Increase the number of file "
8523 "descriptors allowed\nto the memcached user process or start "
8524 "memcached as root (remember\nto use the -u parameter).\n"
8525 "The maximum number of connections is set to %d.\n";
8526 req = settings.maxconns;
8527 settings.maxconns = syslimit - (3 * (settings.num_threads + 2));
8528 if (settings.maxconns < 0) {
8529 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8530 "failed to set rlimit for open files. Try starting as"
8531 " root or requesting smaller maxconns value.\n");
8532 exit(EX_OSERR71);
8533 }
8534 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8535 fmt, req, settings.maxconns);
8536 }
8537 }
8538}
8539
8540#endif
8541
8542static cb_mutex_t *openssl_lock_cs;
8543
8544static unsigned long get_thread_id(void) {
8545 return (unsigned long)cb_thread_self();
8546}
8547
8548static void openssl_locking_callback(int mode, int type, char *file, int line)
8549{
8550 if (mode & CRYPTO_LOCK1) {
8551 cb_mutex_enter(&(openssl_lock_cs[type]));
8552 } else {
8553 cb_mutex_exit(&(openssl_lock_cs[type]));
8554 }
8555}
8556
8557static void initialize_openssl(void) {
8558 int ii;
8559
8560 CRYPTO_malloc_init()CRYPTO_set_mem_functions( malloc, realloc, free);
8561 SSL_library_init();
8562 SSL_load_error_strings();
8563 ERR_load_BIO_strings();
8564 OpenSSL_add_all_algorithms()OPENSSL_add_all_algorithms_noconf();
8565
8566 openssl_lock_cs = calloc(CRYPTO_num_locks(), sizeof(cb_mutex_t));
8567 for (ii = 0; ii < CRYPTO_num_locks(); ii++) {
8568 cb_mutex_initialize(&(openssl_lock_cs[ii]));
8569 }
8570
8571 CRYPTO_set_id_callback((unsigned long (*)())get_thread_id);
8572 CRYPTO_set_locking_callback((void (*)())openssl_locking_callback);
8573}
8574
8575static void calculate_maxconns(void) {
8576 int ii;
8577 settings.maxconns = 0;
8578 for (ii = 0; ii < settings.num_interfaces; ++ii) {
8579 settings.maxconns += settings.interfaces[ii].maxconn;
8580 }
8581}
8582
8583int main (int argc, char **argv) {
8584 int c;
8585 ENGINE_HANDLE *engine_handle = NULL((void*)0);
8586 const char *config_file = NULL((void*)0);
8587
8588 initialize_openssl();
8589 /* make the time we started always be 2 seconds before we really
8590 did, so time(0) - time.started is never zero. if so, things
8591 like 'settings.oldest_live' which act as booleans as well as
8592 values are now false in boolean context... */
8593 process_started = time(0) - 2;
8594 set_current_time();
8595
8596 initialize_timings();
8597
8598 /* Initialize global variables */
8599 cb_mutex_initialize(&listen_state.mutex);
8600 cb_mutex_initialize(&connections.mutex);
8601 cb_mutex_initialize(&tap_stats.mutex);
8602 cb_mutex_initialize(&stats_lock);
8603 cb_mutex_initialize(&session_cas.mutex);
8604
8605 session_cas.value = 0;
8606 session_cas.ctr = 0;
8607
8608 /* Initialize the socket subsystem */
8609 cb_initialize_sockets();
8610
8611 init_alloc_hooks();
8612
8613 /* init settings */
8614 settings_init();
8615
8616 initialize_binary_lookup_map();
8617
8618 setup_bin_packet_handlers();
8619
8620 if (memcached_initialize_stderr_logger(get_server_api) != EXTENSION_SUCCESS) {
8621 fprintf(stderr__stderrp, "Failed to initialize log system\n");
8622 return EX_OSERR71;
8623 }
8624
8625 if (!sanitycheck()) {
8626 return EX_OSERR71;
8627 }
8628
8629 /* process arguments */
8630 while ((c = getopt(argc, argv,
8631 "C:" /* Read configuration file */
8632 "h" /* help */
8633 )) != -1) {
8634 switch (c) {
8635
8636 case 'h':
8637 usage();
8638 exit(EXIT_SUCCESS0);
8639 case 'C':
8640 config_file = optarg;
8641 break;
8642
8643 default:
8644 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8645 "Illegal argument \"%c\"\n", c);
8646 return 1;
8647 }
8648 }
8649
8650 if (config_file) {
8651 read_config_file(config_file);
8652 }
8653
8654 set_max_filehandles();
8655
8656 if (getenv("MEMCACHED_REQS_TAP_EVENT") != NULL((void*)0)) {
8657 settings.reqs_per_tap_event = atoi(getenv("MEMCACHED_REQS_TAP_EVENT"));
8658 }
8659
8660 if (settings.reqs_per_tap_event <= 0) {
8661 settings.reqs_per_tap_event = DEFAULT_REQS_PER_TAP_EVENT50;
8662 }
8663
8664 if (install_sigterm_handler() != 0) {
8665 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8666 "Failed to install SIGTERM handler\n");
8667 exit(EXIT_FAILURE1);
8668 }
8669
8670 /* Aggregate the maximum number of connections */
8671 calculate_maxconns();
8672
8673 /* allocate the connection array */
8674 initialize_connections();
8675
8676 cbsasl_server_init();
8677
8678 /* initialize main thread libevent instance */
8679 main_base = event_base_new();
8680
8681 /* Load the storage engine */
8682 if (!load_engine(settings.engine_module,
8683 get_server_api,settings.extensions.logger,
8684 &engine_handle)) {
8685 /* Error already reported */
8686 exit(EXIT_FAILURE1);
8687 }
8688
8689 if (!init_engine(engine_handle,
8690 settings.engine_config,
8691 settings.extensions.logger)) {
8692 return false0;
8693 }
8694
8695 if (settings.verbose > 0) {
8696 log_engine_details(engine_handle,settings.extensions.logger);
8697 }
8698 settings.engine.v1 = (ENGINE_HANDLE_V1 *) engine_handle;
8699
8700 if (settings.engine.v1->arithmetic == NULL((void*)0)) {
8701 settings.engine.v1->arithmetic = internal_arithmetic;
8702 }
8703
8704 setup_not_supported_handlers();
8705
8706 /* initialize other stuff */
8707 stats_init();
8708
8709 default_independent_stats = new_independent_stats();
8710
8711#ifndef WIN32
8712 /* daemonize if requested */
8713 /* if we want to ensure our ability to dump core, don't chdir to / */
8714 if (settings.daemonize) {
8715 if (sigignore(SIGHUP1) == -1) {
8716 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8717 "Failed to ignore SIGHUP: ", strerror(errno(*__error())));
8718 }
8719 if (daemonize(1, settings.verbose) == -1) {
8720 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8721 "failed to daemon() in order to daemonize\n");
8722 exit(EXIT_FAILURE1);
8723 }
8724 }
8725
8726 /*
8727 * ignore SIGPIPE signals; we can use errno == EPIPE if we
8728 * need that information
8729 */
8730 if (sigignore(SIGPIPE13) == -1) {
8731 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8732 "failed to ignore SIGPIPE; sigaction");
8733 exit(EX_OSERR71);
8734 }
8735#endif
8736
8737 /* start up worker threads if MT mode */
8738 thread_init(settings.num_threads, main_base, dispatch_event_handler);
8739
8740 /* initialise clock event */
8741 clock_handler(0, 0, 0);
8742
8743 /* create the listening socket, bind it, and init */
8744 {
8745 const char *portnumber_filename = getenv("MEMCACHED_PORT_FILENAME");
8746 char temp_portnumber_filename[PATH_MAX1024];
8747 FILE *portnumber_file = NULL((void*)0);
8748
8749 if (portnumber_filename != NULL((void*)0)) {
8750 snprintf(temp_portnumber_filename,__builtin___snprintf_chk (temp_portnumber_filename, sizeof(temp_portnumber_filename
), 0, __builtin_object_size (temp_portnumber_filename, 2 >
1 ? 1 : 0), "%s.lck", portnumber_filename)
8751 sizeof(temp_portnumber_filename),__builtin___snprintf_chk (temp_portnumber_filename, sizeof(temp_portnumber_filename
), 0, __builtin_object_size (temp_portnumber_filename, 2 >
1 ? 1 : 0), "%s.lck", portnumber_filename)
8752 "%s.lck", portnumber_filename)__builtin___snprintf_chk (temp_portnumber_filename, sizeof(temp_portnumber_filename
), 0, __builtin_object_size (temp_portnumber_filename, 2 >
1 ? 1 : 0), "%s.lck", portnumber_filename)
;
8753
8754 portnumber_file = fopen(temp_portnumber_filename, "a");
8755 if (portnumber_file == NULL((void*)0)) {
8756 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8757 "Failed to open \"%s\": %s\n",
8758 temp_portnumber_filename, strerror(errno(*__error())));
8759 }
8760 }
8761
8762 if (server_sockets(portnumber_file)) {
8763 exit(EX_OSERR71);
8764 }
8765
8766 if (portnumber_file) {
8767 fclose(portnumber_file);
8768 rename(temp_portnumber_filename, portnumber_filename);
8769 }
8770 }
8771
8772#ifndef WIN32
8773 if (settings.pid_file != NULL((void*)0)) {
8774 save_pid(settings.pid_file);
8775 }
8776#endif
8777
8778 /* Drop privileges no longer needed */
8779 drop_privileges();
8780
8781 /* Optional parent monitor */
8782 setup_parent_monitor();
8783
8784 if (!memcached_shutdown) {
8785 /* enter the event loop */
8786 event_base_loop(main_base, 0);
8787 }
8788
8789 if (settings.verbose) {
8790 settings.extensions.logger->log(EXTENSION_LOG_INFO, NULL((void*)0),
8791 "Initiating shutdown\n");
8792 }
8793 threads_shutdown();
8794
8795 settings.engine.v1->destroy(settings.engine.v0, false0);
8796
8797 threads_cleanup();
8798
8799 /* remove the PID file if we're a daemon */
8800#ifndef WIN32
8801 if (settings.daemonize)
8802 remove_pidfile(settings.pid_file);
8803#endif
8804
8805 /* Free the memory used by listening_port structure */
8806 if (stats.listening_ports) {
8807 free(stats.listening_ports);
8808 }
8809
8810 event_base_free(main_base);
8811 release_independent_stats(default_independent_stats);
8812 destroy_connections();
8813
8814 if (get_alloc_hooks_type() == none) {
8815 unload_engine();
8816 }
8817
8818 free(settings.config);
8819
8820 return EXIT_SUCCESS0;
8821}