Bug Summary

File:memcached/daemon/memcached.c
Location:line 4242, column 30
Description:Function call argument is an uninitialized value

Annotated Source Code

1/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2/*
3 * memcached - memory caching daemon
4 *
5 * http://www.danga.com/memcached/
6 *
7 * Copyright 2003 Danga Interactive, Inc. All rights reserved.
8 *
9 * Use and distribution licensed under the BSD license. See
10 * the LICENSE file for full text.
11 *
12 * Authors:
13 * Anatoly Vorobey <mellon@pobox.com>
14 * Brad Fitzpatrick <brad@danga.com>
15 */
16#include "config.h"
17#include "memcached.h"
18#include "memcached/extension_loggers.h"
19#include "alloc_hooks.h"
20#include "utilities/engine_loader.h"
21#include "timings.h"
22
23#include <signal.h>
24#include <getopt.h>
25#include <fcntl.h>
26#include <errno(*__error()).h>
27#include <stdlib.h>
28#include <stdio.h>
29#include <string.h>
30#include <time.h>
31#include <limits.h>
32#include <ctype.h>
33#include <stdarg.h>
34#include <stddef.h>
35#include <snappy-c.h>
36#include <JSON_checker.h>
37
38static bool_Bool grow_dynamic_buffer(conn *c, size_t needed);
39
40typedef union {
41 item_info info;
42 char bytes[sizeof(item_info) + ((IOV_MAX1024 - 1) * sizeof(struct iovec))];
43} item_info_holder;
44
45static const char* get_server_version(void);
46
47static void item_set_cas(const void *cookie, item *it, uint64_t cas) {
48 settings.engine.v1->item_set_cas(settings.engine.v0, cookie, it, cas);
49}
50
51#define MAX_SASL_MECH_LEN32 32
52
53/* The item must always be called "it" */
54#define SLAB_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->slab_stats[info.info.clsid].slab_op++; \
55 thread_stats->slab_stats[info.info.clsid].slab_op++;
56
57#define THREAD_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->thread_op++; \
58 thread_stats->thread_op++;
59
60#define THREAD_GUTS2(conn, thread_stats, slab_op, thread_op)thread_stats->slab_op++; thread_stats->thread_op++; \
61 thread_stats->slab_op++; \
62 thread_stats->thread_op++;
63
64#define SLAB_THREAD_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->slab_stats[info.info.clsid].slab_op++; thread_stats
->thread_op++;
\
65 SLAB_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->slab_stats[info.info.clsid].slab_op++; \
66 THREAD_GUTS(conn, thread_stats, slab_op, thread_op)thread_stats->thread_op++;
67
68#define STATS_INCR1(GUTS, conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); GUTS(conn, thread_stats
, slab_op, thread_op); cb_mutex_exit(&thread_stats->mutex
); }
{ \
69 struct thread_stats *thread_stats = get_thread_stats(conn); \
70 cb_mutex_enter(&thread_stats->mutex); \
71 GUTS(conn, thread_stats, slab_op, thread_op); \
72 cb_mutex_exit(&thread_stats->mutex); \
73}
74
75#define STATS_INCR(conn, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op++;; cb_mutex_exit(&thread_stats->mutex); }
\
76 STATS_INCR1(THREAD_GUTS, conn, op, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op++;; cb_mutex_exit(&thread_stats->mutex); }
77
78#define SLAB_INCR(conn, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].op++;; cb_mutex_exit(&thread_stats
->mutex); }
\
79 STATS_INCR1(SLAB_GUTS, conn, op, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].op++;; cb_mutex_exit(&thread_stats
->mutex); }
80
81#define STATS_TWO(conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_op++; thread_stats->thread_op++;; cb_mutex_exit(&
thread_stats->mutex); }
\
82 STATS_INCR1(THREAD_GUTS2, conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_op++; thread_stats->thread_op++;; cb_mutex_exit(&
thread_stats->mutex); }
83
84#define SLAB_TWO(conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].slab_op++; thread_stats->thread_op
++;; cb_mutex_exit(&thread_stats->mutex); }
\
85 STATS_INCR1(SLAB_THREAD_GUTS, conn, slab_op, thread_op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].slab_op++; thread_stats->thread_op
++;; cb_mutex_exit(&thread_stats->mutex); }
86
87#define STATS_HIT(conn, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].op_hits++; thread_stats->cmd_op
++;; cb_mutex_exit(&thread_stats->mutex); }
\
88 SLAB_TWO(conn, op##_hits, cmd_##op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
slab_stats[info.info.clsid].op##_hits++; thread_stats->cmd_
##op++;; cb_mutex_exit(&thread_stats->mutex); }
89
90#define STATS_MISS(conn, op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op_misses++; thread_stats->cmd_op++;; cb_mutex_exit(&thread_stats
->mutex); }
\
91 STATS_TWO(conn, op##_misses, cmd_##op, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op##_misses++; thread_stats->cmd_##op++;; cb_mutex_exit(&
thread_stats->mutex); }
92
93#define STATS_NOKEY(conn, op){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op++; cb_mutex_exit(&thread_stats->mutex); }
{ \
94 struct thread_stats *thread_stats = \
95 get_thread_stats(conn); \
96 cb_mutex_enter(&thread_stats->mutex); \
97 thread_stats->op++; \
98 cb_mutex_exit(&thread_stats->mutex); \
99}
100
101#define STATS_NOKEY2(conn, op1, op2){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op1++; thread_stats->op2++; cb_mutex_exit(&thread_stats
->mutex); }
{ \
102 struct thread_stats *thread_stats = \
103 get_thread_stats(conn); \
104 cb_mutex_enter(&thread_stats->mutex); \
105 thread_stats->op1++; \
106 thread_stats->op2++; \
107 cb_mutex_exit(&thread_stats->mutex); \
108}
109
110#define STATS_ADD(conn, op, amt){ struct thread_stats *thread_stats = get_thread_stats(conn);
cb_mutex_enter(&thread_stats->mutex); thread_stats->
op += amt; cb_mutex_exit(&thread_stats->mutex); }
{ \
111 struct thread_stats *thread_stats = \
112 get_thread_stats(conn); \
113 cb_mutex_enter(&thread_stats->mutex); \
114 thread_stats->op += amt; \
115 cb_mutex_exit(&thread_stats->mutex); \
116}
117
118volatile sig_atomic_t memcached_shutdown;
119
120/* Lock for global stats */
121static cb_mutex_t stats_lock;
122
123/**
124 * Structure to save ns_server's session cas token.
125 */
126static struct session_cas {
127 uint64_t value;
128 uint64_t ctr;
129 cb_mutex_t mutex;
130} session_cas;
131
132void STATS_LOCK() {
133 cb_mutex_enter(&stats_lock);
134}
135
136void STATS_UNLOCK() {
137 cb_mutex_exit(&stats_lock);
138}
139
140#ifdef WIN32
141static int is_blocking(DWORD dw) {
142 return (dw == WSAEWOULDBLOCK);
143}
144static int is_emfile(DWORD dw) {
145 return (dw == WSAEMFILE);
146}
147static int is_closed_conn(DWORD dw) {
148 return (dw == WSAENOTCONN || WSAECONNRESET);
149}
150static int is_addrinuse(DWORD dw) {
151 return (dw == WSAEADDRINUSE);
152}
153static void set_ewouldblock(void) {
154 WSASetLastError(WSAEWOULDBLOCK);
155}
156static void set_econnreset(void) {
157 WSASetLastError(WSAECONNRESET);
158}
159#else
160static int is_blocking(int dw) {
161 return (dw == EAGAIN35 || dw == EWOULDBLOCK35);
162}
163static int is_emfile(int dw) {
164 return (dw == EMFILE24);
165}
166static int is_closed_conn(int dw) {
167 return (dw == ENOTCONN57 || dw != ECONNRESET54);
168}
169static int is_addrinuse(int dw) {
170 return (dw == EADDRINUSE48);
171}
172static void set_ewouldblock(void) {
173 errno(*__error()) = EWOULDBLOCK35;
174}
175static void set_econnreset(void) {
176 errno(*__error()) = ECONNRESET54;
177}
178#endif
179
180
181/*
182 * We keep the current time of day in a global variable that's updated by a
183 * timer event. This saves us a bunch of time() system calls (we really only
184 * need to get the time once a second, whereas there can be tens of thousands
185 * of requests a second) and allows us to use server-start-relative timestamps
186 * rather than absolute UNIX timestamps, a space savings on systems where
187 * sizeof(time_t) > sizeof(unsigned int).
188 */
189volatile rel_time_t current_time;
190
191/*
192 * forward declarations
193 */
194static SOCKETint new_socket(struct addrinfo *ai);
195static int try_read_command(conn *c);
196static struct thread_stats* get_independent_stats(conn *c);
197static struct thread_stats* get_thread_stats(conn *c);
198static void register_callback(ENGINE_HANDLE *eh,
199 ENGINE_EVENT_TYPE type,
200 EVENT_CALLBACK cb, const void *cb_data);
201
202
203enum try_read_result {
204 READ_DATA_RECEIVED,
205 READ_NO_DATA_RECEIVED,
206 READ_ERROR, /** an error occured (on the socket) (or client closed connection) */
207 READ_MEMORY_ERROR /** failed to allocate more memory */
208};
209
210static enum try_read_result try_read_network(conn *c);
211
212/* stats */
213static void stats_init(void);
214static void server_stats(ADD_STAT add_stats, conn *c, bool_Bool aggregate);
215static void process_stat_settings(ADD_STAT add_stats, void *c);
216
217
218/* defaults */
219static void settings_init(void);
220
221/* event handling, network IO */
222static void event_handler(evutil_socket_tint fd, short which, void *arg);
223static void complete_nread(conn *c);
224static void write_and_free(conn *c, char *buf, size_t bytes);
225static int ensure_iov_space(conn *c);
226static int add_iov(conn *c, const void *buf, size_t len);
227static int add_msghdr(conn *c);
228
229
230/* time handling */
231static void set_current_time(void); /* update the global variable holding
232 global 32-bit seconds-since-start time
233 (to avoid 64 bit time_t) */
234
235/** exported globals **/
236struct stats stats;
237struct settings settings;
238static time_t process_started; /* when the process was started */
239
240/** file scope variables **/
241static conn *listen_conn = NULL((void*)0);
242static struct event_base *main_base;
243static struct thread_stats *default_independent_stats;
244
245static struct engine_event_handler *engine_event_handlers[MAX_ENGINE_EVENT_TYPE5 + 1];
246
247enum transmit_result {
248 TRANSMIT_COMPLETE, /** All done writing. */
249 TRANSMIT_INCOMPLETE, /** More data remaining to write. */
250 TRANSMIT_SOFT_ERROR, /** Can't write any more right now. */
251 TRANSMIT_HARD_ERROR /** Can't write (c->state is set to conn_closing) */
252};
253
254static enum transmit_result transmit(conn *c);
255
256#define REALTIME_MAXDELTA60*60*24*30 60*60*24*30
257
258/* Perform all callbacks of a given type for the given connection. */
259void perform_callbacks(ENGINE_EVENT_TYPE type,
260 const void *data,
261 const void *c) {
262 struct engine_event_handler *h;
263 for (h = engine_event_handlers[type]; h; h = h->next) {
264 h->cb(c, type, data, h->cb_data);
265 }
266}
267
268/*
269 * given time value that's either unix time or delta from current unix time,
270 * return unix time. Use the fact that delta can't exceed one month
271 * (and real time value can't be that low).
272 */
273static rel_time_t realtime(const time_t exptime) {
274 /* no. of seconds in 30 days - largest possible delta exptime */
275
276 if (exptime == 0) return 0; /* 0 means never expire */
277
278 if (exptime > REALTIME_MAXDELTA60*60*24*30) {
279 /* if item expiration is at/before the server started, give it an
280 expiration time of 1 second after the server started.
281 (because 0 means don't expire). without this, we'd
282 underflow and wrap around to some large value way in the
283 future, effectively making items expiring in the past
284 really expiring never */
285 if (exptime <= process_started)
286 return (rel_time_t)1;
287 return (rel_time_t)(exptime - process_started);
288 } else {
289 return (rel_time_t)(exptime + current_time);
290 }
291}
292
293/**
294 * Convert the relative time to an absolute time (relative to EPOC ;) )
295 */
296static time_t abstime(const rel_time_t exptime)
297{
298 return process_started + exptime;
299}
300
301/**
302 * Return the TCP or domain socket listening_port structure that
303 * has a given port number
304 */
305static struct listening_port *get_listening_port_instance(const int port) {
306 struct listening_port *port_ins = NULL((void*)0);
307 int i;
308 for (i = 0; i < settings.num_interfaces; ++i) {
309 if (stats.listening_ports[i].port == port) {
310 port_ins = &stats.listening_ports[i];
311 }
312 }
313 return port_ins;
314}
315
316static void stats_init(void) {
317 stats.daemon_conns = 0;
318 stats.rejected_conns = 0;
319 stats.curr_conns = stats.total_conns = 0;
320 stats.listening_ports = calloc(settings.num_interfaces, sizeof(struct listening_port));
321
322 stats_prefix_init();
323}
324
325static void stats_reset(const void *cookie) {
326 struct conn *conn = (struct conn*)cookie;
327 STATS_LOCK();
328 stats.rejected_conns = 0;
329 stats.total_conns = 0;
330 stats_prefix_clear();
331 STATS_UNLOCK();
332 threadlocal_stats_reset(get_independent_stats(conn));
333 settings.engine.v1->reset_stats(settings.engine.v0, cookie);
334}
335
336static int get_number_of_worker_threads(void) {
337 int ret;
338 char *override = getenv("MEMCACHED_NUM_CPUS");
339 if (override == NULL((void*)0)) {
340#ifdef WIN32
341 SYSTEM_INFO sysinfo;
342 GetSystemInfo(&sysinfo);
343 ret = (int)sysinfo.dwNumberOfProcessors;
344#else
345 ret = (int)sysconf(_SC_NPROCESSORS_ONLN58);
346#endif
347 if (ret > 4) {
348 ret = (int)(ret * 0.75f);
349 }
350 if (ret < 4) {
351 ret = 4;
352 }
353 } else {
354 ret = atoi(override);
355 if (ret == 0) {
356 ret = 4;
357 }
358 }
359
360 return ret;
361}
362
363static void settings_init(void) {
364 static struct interface default_interface;
365 default_interface.port = 11211;
366 default_interface.maxconn = 1000;
367 default_interface.backlog = 1024;
368
369 settings.num_interfaces = 1;
370 settings.interfaces = &default_interface;
371 settings.daemonize = false0;
372 settings.pid_file = NULL((void*)0);
373 settings.bio_drain_buffer_sz = 8192;
374
375 settings.verbose = 0;
376 settings.num_threads = get_number_of_worker_threads();
377 settings.prefix_delimiter = ':';
378 settings.detail_enabled = 0;
379 settings.allow_detailed = true1;
380 settings.reqs_per_event = DEFAULT_REQS_PER_EVENT20;
381 settings.require_sasl = false0;
382 settings.extensions.logger = get_stderr_logger();
383 settings.tcp_nodelay = getenv("MEMCACHED_DISABLE_TCP_NODELAY") == NULL((void*)0);
384 settings.engine_module = "default_engine.so";
385 settings.engine_config = NULL((void*)0);
386 settings.config = NULL((void*)0);
387}
388
389/*
390 * Adds a message header to a connection.
391 *
392 * Returns 0 on success, -1 on out-of-memory.
393 */
394static int add_msghdr(conn *c)
395{
396 struct msghdr *msg;
397
398 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 398, "c != ((void*)0)") : (void)0)
;
399
400 if (c->msgsize == c->msgused) {
401 msg = realloc(c->msglist, c->msgsize * 2 * sizeof(struct msghdr));
402 if (! msg)
403 return -1;
404 c->msglist = msg;
405 c->msgsize *= 2;
406 }
407
408 msg = c->msglist + c->msgused;
409
410 /* this wipes msg_iovlen, msg_control, msg_controllen, and
411 msg_flags, the last 3 of which aren't defined on solaris: */
412 memset(msg, 0, sizeof(struct msghdr))__builtin___memset_chk (msg, 0, sizeof(struct msghdr), __builtin_object_size
(msg, 0))
;
413
414 msg->msg_iov = &c->iov[c->iovused];
415
416 if (c->request_addr_size > 0) {
417 msg->msg_name = &c->request_addr;
418 msg->msg_namelen = c->request_addr_size;
419 }
420
421 c->msgbytes = 0;
422 c->msgused++;
423
424 return 0;
425}
426
427struct {
428 cb_mutex_t mutex;
429 bool_Bool disabled;
430 ssize_t count;
431 uint64_t num_disable;
432} listen_state;
433
434static bool_Bool is_listen_disabled(void) {
435 bool_Bool ret;
436 cb_mutex_enter(&listen_state.mutex);
437 ret = listen_state.disabled;
438 cb_mutex_exit(&listen_state.mutex);
439 return ret;
440}
441
442static uint64_t get_listen_disabled_num(void) {
443 uint64_t ret;
444 cb_mutex_enter(&listen_state.mutex);
445 ret = listen_state.num_disable;
446 cb_mutex_exit(&listen_state.mutex);
447 return ret;
448}
449
450static void disable_listen(void) {
451 conn *next;
452 cb_mutex_enter(&listen_state.mutex);
453 listen_state.disabled = true1;
454 listen_state.count = 10;
455 ++listen_state.num_disable;
456 cb_mutex_exit(&listen_state.mutex);
457
458 for (next = listen_conn; next; next = next->next) {
459 update_event(next, 0);
460 if (listen(next->sfd, 1) != 0) {
461 log_socket_error(EXTENSION_LOG_WARNING, NULL((void*)0),
462 "listen() failed: %s");
463 }
464 }
465}
466
467void safe_close(SOCKETint sfd) {
468 if (sfd != INVALID_SOCKET-1) {
469 int rval;
470 while ((rval = closesocket(sfd)close(sfd)) == SOCKET_ERROR-1 &&
471 (errno(*__error()) == EINTR4 || errno(*__error()) == EAGAIN35)) {
472 /* go ahead and retry */
473 }
474
475 if (rval == SOCKET_ERROR-1) {
476 char msg[80];
477 snprintf(msg, sizeof(msg), "Failed to close socket %d (%%s)!!", (int)sfd)__builtin___snprintf_chk (msg, sizeof(msg), 0, __builtin_object_size
(msg, 2 > 1 ? 1 : 0), "Failed to close socket %d (%%s)!!"
, (int)sfd)
;
478 log_socket_error(EXTENSION_LOG_WARNING, NULL((void*)0),
479 msg);
480 } else {
481 STATS_LOCK();
482 stats.curr_conns--;
483 STATS_UNLOCK();
484
485 if (is_listen_disabled()) {
486 notify_dispatcher();
487 }
488 }
489 }
490}
491
492/**
493 * Reset all of the dynamic buffers used by a connection back to their
494 * default sizes. The strategy for resizing the buffers is to allocate a
495 * new one of the correct size and free the old one if the allocation succeeds
496 * instead of using realloc to change the buffer size (because realloc may
497 * not shrink the buffers, and will also copy the memory). If the allocation
498 * fails the buffer will be unchanged.
499 *
500 * @param c the connection to resize the buffers for
501 * @return true if all allocations succeeded, false if one or more of the
502 * allocations failed.
503 */
504static bool_Bool conn_reset_buffersize(conn *c) {
505 bool_Bool ret = true1;
506
507 if (c->rsize != DATA_BUFFER_SIZE2048) {
508 void *ptr = malloc(DATA_BUFFER_SIZE2048);
509 if (ptr != NULL((void*)0)) {
510 free(c->rbuf);
511 c->rbuf = ptr;
512 c->rsize = DATA_BUFFER_SIZE2048;
513 } else {
514 ret = false0;
515 }
516 }
517
518 if (c->wsize != DATA_BUFFER_SIZE2048) {
519 void *ptr = malloc(DATA_BUFFER_SIZE2048);
520 if (ptr != NULL((void*)0)) {
521 free(c->wbuf);
522 c->wbuf = ptr;
523 c->wsize = DATA_BUFFER_SIZE2048;
524 } else {
525 ret = false0;
526 }
527 }
528
529 if (c->isize != ITEM_LIST_INITIAL200) {
530 void *ptr = malloc(sizeof(item *) * ITEM_LIST_INITIAL200);
531 if (ptr != NULL((void*)0)) {
532 free(c->ilist);
533 c->ilist = ptr;
534 c->isize = ITEM_LIST_INITIAL200;
535 } else {
536 ret = false0;
537 }
538 }
539
540 if (c->temp_alloc_size != TEMP_ALLOC_LIST_INITIAL20) {
541 void *ptr = malloc(sizeof(char *) * TEMP_ALLOC_LIST_INITIAL20);
542 if (ptr != NULL((void*)0)) {
543 free(c->temp_alloc_list);
544 c->temp_alloc_list = ptr;
545 c->temp_alloc_size = TEMP_ALLOC_LIST_INITIAL20;
546 } else {
547 ret = false0;
548 }
549 }
550
551 if (c->iovsize != IOV_LIST_INITIAL400) {
552 void *ptr = malloc(sizeof(struct iovec) * IOV_LIST_INITIAL400);
553 if (ptr != NULL((void*)0)) {
554 free(c->iov);
555 c->iov = ptr;
556 c->iovsize = IOV_LIST_INITIAL400;
557 } else {
558 ret = false0;
559 }
560 }
561
562 if (c->msgsize != MSG_LIST_INITIAL10) {
563 void *ptr = malloc(sizeof(struct msghdr) * MSG_LIST_INITIAL10);
564 if (ptr != NULL((void*)0)) {
565 free(c->msglist);
566 c->msglist = ptr;
567 c->msgsize = MSG_LIST_INITIAL10;
568 } else {
569 ret = false0;
570 }
571 }
572
573 return ret;
574}
575
576/**
577 * Constructor for all memory allocations of connection objects. Initialize
578 * all members and allocate the transfer buffers.
579 *
580 * @param buffer The memory allocated by the object cache
581 * @return 0 on success, 1 if we failed to allocate memory
582 */
583static int conn_constructor(conn *c) {
584 memset(c, 0, sizeof(*c))__builtin___memset_chk (c, 0, sizeof(*c), __builtin_object_size
(c, 0))
;
585 MEMCACHED_CONN_CREATE(c);
586
587 c->state = conn_immediate_close;
588 c->sfd = INVALID_SOCKET-1;
589 if (!conn_reset_buffersize(c)) {
590 free(c->rbuf);
591 free(c->wbuf);
592 free(c->ilist);
593 free(c->temp_alloc_list);
594 free(c->iov);
595 free(c->msglist);
596 settings.extensions.logger->log(EXTENSION_LOG_WARNING,
597 NULL((void*)0),
598 "Failed to allocate buffers for connection\n");
599 return 1;
600 }
601
602 STATS_LOCK();
603 stats.conn_structs++;
604 STATS_UNLOCK();
605
606 return 0;
607}
608
609/**
610 * Destructor for all connection objects. Release all allocated resources.
611 *
612 * @param buffer The memory allocated by the objec cache
613 */
614static void conn_destructor(conn *c) {
615 free(c->rbuf);
616 free(c->wbuf);
617 free(c->ilist);
618 free(c->temp_alloc_list);
619 free(c->iov);
620 free(c->msglist);
621
622 STATS_LOCK();
623 stats.conn_structs--;
624 STATS_UNLOCK();
625}
626
627/*
628 * Free list management for connections.
629 */
630struct connections {
631 conn* free;
632 conn** all;
633 cb_mutex_t mutex;
634 int next;
635} connections;
636
637static void initialize_connections(void)
638{
639 int preallocate;
640
641 cb_mutex_initialize(&connections.mutex);
642 connections.all = calloc(settings.maxconns, sizeof(conn*));
643 if (connections.all == NULL((void*)0)) {
644 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
645 "Failed to allocate memory for connections");
646 exit(EX_OSERR71);
647 }
648
649 preallocate = settings.maxconns / 2;
650 if (preallocate < 1000) {
651 preallocate = settings.maxconns;
652 } else if (preallocate > 5000) {
653 preallocate = 5000;
654 }
655
656 for (connections.next = 0; connections.next < preallocate; ++connections.next) {
657 connections.all[connections.next] = malloc(sizeof(conn));
658 if (conn_constructor(connections.all[connections.next]) != 0) {
659 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
660 "Failed to allocate memory for connections");
661 exit(EX_OSERR71);
662 }
663 connections.all[connections.next]->next = connections.free;
664 connections.free = connections.all[connections.next];
665 }
666}
667
668static void destroy_connections(void)
669{
670 int ii;
671 for (ii = 0; ii < settings.maxconns; ++ii) {
672 if (connections.all[ii]) {
673 conn *c = connections.all[ii];
674 conn_destructor(c);
675 free(c);
676 }
677 }
678
679 free(connections.all);
680}
681
682static conn *allocate_connection(void) {
683 conn *ret;
684
685 cb_mutex_enter(&connections.mutex);
686 ret = connections.free;
687 if (ret != NULL((void*)0)) {
688 connections.free = connections.free->next;
689 ret->next = NULL((void*)0);
690 }
691 cb_mutex_exit(&connections.mutex);
692
693 if (ret == NULL((void*)0)) {
694 ret = malloc(sizeof(conn));
695 if (ret == NULL((void*)0)) {
696 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
697 "Failed to allocate memory for connection");
698 return NULL((void*)0);
699 }
700
701 if (conn_constructor(ret) != 0) {
702 free(ret);
703 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
704 "Failed to allocate memory for connection");
705 return NULL((void*)0);
706 }
707
708 cb_mutex_enter(&connections.mutex);
709 if (connections.next == settings.maxconns) {
710 free(ret);
711 ret = NULL((void*)0);
712 } else {
713 connections.all[connections.next++] = ret;
714 }
715 cb_mutex_exit(&connections.mutex);
716 }
717
718 return ret;
719}
720
721static void release_connection(conn *c) {
722 c->sfd = INVALID_SOCKET-1;
723 cb_mutex_enter(&connections.mutex);
724 c->next = connections.free;
725 connections.free = c;
726 cb_mutex_exit(&connections.mutex);
727}
728
729static const char *substate_text(enum bin_substates state) {
730 switch (state) {
731 case bin_no_state: return "bin_no_state";
732 case bin_reading_set_header: return "bin_reading_set_header";
733 case bin_reading_cas_header: return "bin_reading_cas_header";
734 case bin_read_set_value: return "bin_read_set_value";
735 case bin_reading_sasl_auth: return "bin_reading_sasl_auth";
736 case bin_reading_sasl_auth_data: return "bin_reading_sasl_auth_data";
737 case bin_reading_packet: return "bin_reading_packet";
738 default:
739 return "illegal";
740 }
741}
742
743static void add_connection_stats(ADD_STAT add_stats, conn *d, conn *c) {
744 append_stat("conn", add_stats, d, "%p", c);
745 if (c->sfd == INVALID_SOCKET-1) {
746 append_stat("socket", add_stats, d, "disconnected");
747 } else {
748 append_stat("socket", add_stats, d, "%lu", (long)c->sfd);
749 append_stat("protocol", add_stats, d, "%s", "binary");
750 append_stat("transport", add_stats, d, "TCP");
751 append_stat("nevents", add_stats, d, "%u", c->nevents);
752 if (c->sasl_conn != NULL((void*)0)) {
753 append_stat("sasl_conn", add_stats, d, "%p", c->sasl_conn);
754 }
755 append_stat("state", add_stats, d, "%s", state_text(c->state));
756 append_stat("substate", add_stats, d, "%s", substate_text(c->substate));
757 append_stat("registered_in_libevent", add_stats, d, "%d",
758 (int)c->registered_in_libevent);
759 append_stat("ev_flags", add_stats, d, "%x", c->ev_flags);
760 append_stat("which", add_stats, d, "%x", c->which);
761 append_stat("rbuf", add_stats, d, "%p", c->rbuf);
762 append_stat("rcurr", add_stats, d, "%p", c->rcurr);
763 append_stat("rsize", add_stats, d, "%u", c->rsize);
764 append_stat("rbytes", add_stats, d, "%u", c->rbytes);
765 append_stat("wbuf", add_stats, d, "%p", c->wbuf);
766 append_stat("wcurr", add_stats, d, "%p", c->wcurr);
767 append_stat("wsize", add_stats, d, "%u", c->wsize);
768 append_stat("wbytes", add_stats, d, "%u", c->wbytes);
769 append_stat("write_and_go", add_stats, d, "%p", c->write_and_go);
770 append_stat("write_and_free", add_stats, d, "%p", c->write_and_free);
771 append_stat("ritem", add_stats, d, "%p", c->ritem);
772 append_stat("rlbytes", add_stats, d, "%u", c->rlbytes);
773 append_stat("item", add_stats, d, "%p", c->item);
774 append_stat("store_op", add_stats, d, "%u", c->store_op);
775 append_stat("sbytes", add_stats, d, "%u", c->sbytes);
776 append_stat("iov", add_stats, d, "%p", c->iov);
777 append_stat("iovsize", add_stats, d, "%u", c->iovsize);
778 append_stat("iovused", add_stats, d, "%u", c->iovused);
779 append_stat("msglist", add_stats, d, "%p", c->msglist);
780 append_stat("msgsize", add_stats, d, "%u", c->msgsize);
781 append_stat("msgused", add_stats, d, "%u", c->msgused);
782 append_stat("msgcurr", add_stats, d, "%u", c->msgcurr);
783 append_stat("msgbytes", add_stats, d, "%u", c->msgbytes);
784 append_stat("ilist", add_stats, d, "%p", c->ilist);
785 append_stat("isize", add_stats, d, "%u", c->isize);
786 append_stat("icurr", add_stats, d, "%p", c->icurr);
787 append_stat("ileft", add_stats, d, "%u", c->ileft);
788 append_stat("temp_alloc_list", add_stats, d, "%p", c->temp_alloc_list);
789 append_stat("temp_alloc_size", add_stats, d, "%u", c->temp_alloc_size);
790 append_stat("temp_alloc_curr", add_stats, d, "%p", c->temp_alloc_curr);
791 append_stat("temp_alloc_left", add_stats, d, "%u", c->temp_alloc_left);
792
793 append_stat("noreply", add_stats, d, "%d", c->noreply);
794 append_stat("refcount", add_stats, d, "%u", (int)c->refcount);
795 append_stat("dynamic_buffer.buffer", add_stats, d, "%p",
796 c->dynamic_buffer.buffer);
797 append_stat("dynamic_buffer.size", add_stats, d, "%zu",
798 c->dynamic_buffer.size);
799 append_stat("dynamic_buffer.offset", add_stats, d, "%zu",
800 c->dynamic_buffer.offset);
801 append_stat("engine_storage", add_stats, d, "%p", c->engine_storage);
802 /* @todo we should decode the binary header */
803 append_stat("cas", add_stats, d, "%"PRIu64"ll" "u", c->cas);
804 append_stat("cmd", add_stats, d, "%u", c->cmd);
805 append_stat("opaque", add_stats, d, "%u", c->opaque);
806 append_stat("keylen", add_stats, d, "%u", c->keylen);
807 append_stat("list_state", add_stats, d, "%u", c->list_state);
808 append_stat("next", add_stats, d, "%p", c->next);
809 append_stat("thread", add_stats, d, "%p", c->thread);
810 append_stat("aiostat", add_stats, d, "%u", c->aiostat);
811 append_stat("ewouldblock", add_stats, d, "%u", c->ewouldblock);
812 append_stat("tap_iterator", add_stats, d, "%p", c->tap_iterator);
813 }
814}
815
816/**
817 * Do a full stats of all of the connections.
818 * Do _NOT_ try to follow _ANY_ of the pointers in the conn structure
819 * because we read all of the values _DIRTY_. We preallocated the array
820 * of all of the connection pointers during startup, so we _KNOW_ that
821 * we can iterate through all of them. All of the conn structs will
822 * only appear in the connections.all array when we've allocated them,
823 * and we don't release them so it's safe to look at them.
824 */
825static void connection_stats(ADD_STAT add_stats, conn *c) {
826 int ii;
827 for (ii = 0; ii < settings.maxconns && connections.all[ii]; ++ii) {
828 add_connection_stats(add_stats, c, connections.all[ii]);
829 }
830}
831
832conn *conn_new(const SOCKETint sfd, in_port_t parent_port,
833 STATE_FUNC init_state, int event_flags,
834 unsigned int read_buffer_size, struct event_base *base,
835 struct timeval *timeout) {
836 conn *c = allocate_connection();
837 if (c == NULL((void*)0)) {
838 return NULL((void*)0);
839 }
840
841 cb_assert(c->thread == NULL)(__builtin_expect(!(c->thread == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 841, "c->thread == ((void*)0)") : (void)0)
;
842
843 if (c->rsize < read_buffer_size) {
844 void *mem = malloc(read_buffer_size);
845 if (mem) {
846 c->rsize = read_buffer_size;
847 free(c->rbuf);
848 c->rbuf = mem;
849 } else {
850 cb_assert(c->thread == NULL)(__builtin_expect(!(c->thread == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 850, "c->thread == ((void*)0)") : (void)0)
;
851 release_connection(c);
852 return NULL((void*)0);
853 }
854 }
855
856 memset(&c->ssl, 0, sizeof(c->ssl))__builtin___memset_chk (&c->ssl, 0, sizeof(c->ssl),
__builtin_object_size (&c->ssl, 0))
;
857 if (init_state != conn_listening) {
858 int ii;
859 for (ii = 0; ii < settings.num_interfaces; ++ii) {
860 if (parent_port == settings.interfaces[ii].port) {
861 if (settings.interfaces[ii].ssl.cert != NULL((void*)0)) {
862 const char *cert = settings.interfaces[ii].ssl.cert;
863 const char *pkey = settings.interfaces[ii].ssl.key;
864
865 c->ssl.ctx = SSL_CTX_new(SSLv23_server_method());
866
867 /* @todo don't read files, but use in-memory-copies */
868 if (!SSL_CTX_use_certificate_chain_file(c->ssl.ctx, cert) ||
869 !SSL_CTX_use_PrivateKey_file(c->ssl.ctx, pkey, SSL_FILETYPE_PEM1)) {
870 release_connection(c);
871 return NULL((void*)0);
872 }
873
874 c->ssl.enabled = true1;
875 c->ssl.error = false0;
876 c->ssl.client = NULL((void*)0);
877
878 c->ssl.in.buffer = malloc(settings.bio_drain_buffer_sz);
879 c->ssl.out.buffer = malloc(settings.bio_drain_buffer_sz);
880
881 if (c->ssl.in.buffer == NULL((void*)0) || c->ssl.out.buffer == NULL((void*)0)) {
882 release_connection(c);
883 return NULL((void*)0);
884 }
885
886 c->ssl.in.buffsz = settings.bio_drain_buffer_sz;
887 c->ssl.out.buffsz = settings.bio_drain_buffer_sz;
888 BIO_new_bio_pair(&c->ssl.application,
889 settings.bio_drain_buffer_sz,
890 &c->ssl.network,
891 settings.bio_drain_buffer_sz);
892
893 c->ssl.client = SSL_new(c->ssl.ctx);
894 SSL_set_bio(c->ssl.client,
895 c->ssl.application,
896 c->ssl.application);
897 }
898 }
899 }
900 }
901
902 c->request_addr_size = 0;
903
904 if (settings.verbose > 1) {
905 if (init_state == conn_listening) {
906 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
907 "<%d server listening", sfd);
908 } else {
909 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
910 "<%d new client connection", sfd);
911 }
912 }
913
914 c->sfd = sfd;
915 c->parent_port = parent_port;
916 c->state = init_state;
917 c->rlbytes = 0;
918 c->cmd = -1;
919 c->rbytes = c->wbytes = 0;
920 c->wcurr = c->wbuf;
921 c->rcurr = c->rbuf;
922 c->ritem = 0;
923 c->icurr = c->ilist;
924 c->temp_alloc_curr = c->temp_alloc_list;
925 c->ileft = 0;
926 c->temp_alloc_left = 0;
927 c->iovused = 0;
928 c->msgcurr = 0;
929 c->msgused = 0;
930 c->next = NULL((void*)0);
931 c->list_state = 0;
932
933 c->write_and_go = init_state;
934 c->write_and_free = 0;
935 c->item = 0;
936 c->supports_datatype = false0;
937 c->noreply = false0;
938
939 event_set(&c->event, sfd, event_flags, event_handler, (void *)c);
940 event_base_set(base, &c->event);
941 c->ev_flags = event_flags;
942
943 if (!register_event(c, timeout)) {
944 cb_assert(c->thread == NULL)(__builtin_expect(!(c->thread == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 944, "c->thread == ((void*)0)") : (void)0)
;
945 release_connection(c);
946 return NULL((void*)0);
947 }
948
949 STATS_LOCK();
950 stats.total_conns++;
951 STATS_UNLOCK();
952
953 c->aiostat = ENGINE_SUCCESS;
954 c->ewouldblock = false0;
955 c->refcount = 1;
956
957 MEMCACHED_CONN_ALLOCATE(c->sfd);
958
959 perform_callbacks(ON_CONNECT, NULL((void*)0), c);
960
961 return c;
962}
963
964static void conn_cleanup(conn *c) {
965 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 965, "c != ((void*)0)") : (void)0)
;
966
967 if (c->item) {
968 settings.engine.v1->release(settings.engine.v0, c, c->item);
969 c->item = 0;
970 }
971
972 if (c->ileft != 0) {
973 for (; c->ileft > 0; c->ileft--,c->icurr++) {
974 settings.engine.v1->release(settings.engine.v0, c, *(c->icurr));
975 }
976 }
977
978 if (c->temp_alloc_left != 0) {
979 for (; c->temp_alloc_left > 0; c->temp_alloc_left--, c->temp_alloc_curr++) {
980 free(*(c->temp_alloc_curr));
981 }
982 }
983
984 if (c->write_and_free) {
985 free(c->write_and_free);
986 c->write_and_free = 0;
987 }
988
989 if (c->sasl_conn) {
990 cbsasl_dispose(&c->sasl_conn);
991 c->sasl_conn = NULL((void*)0);
992 }
993
994 c->engine_storage = NULL((void*)0);
995 c->tap_iterator = NULL((void*)0);
996 c->thread = NULL((void*)0);
997 cb_assert(c->next == NULL)(__builtin_expect(!(c->next == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 997, "c->next == ((void*)0)") : (void)0)
;
998 c->sfd = INVALID_SOCKET-1;
999 c->upr = 0;
1000 c->start = 0;
1001 if (c->ssl.enabled) {
1002 BIO_free_all(c->ssl.network);
1003 SSL_free(c->ssl.client);
1004 c->ssl.enabled = false0;
1005 c->ssl.error = false0;
1006 free(c->ssl.in.buffer);
1007 free(c->ssl.out.buffer);
1008 memset(&c->ssl, 0, sizeof(c->ssl))__builtin___memset_chk (&c->ssl, 0, sizeof(c->ssl),
__builtin_object_size (&c->ssl, 0))
;
1009 }
1010}
1011
1012void conn_close(conn *c) {
1013 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1013, "c != ((void*)0)") : (void)0)
;
1014 cb_assert(c->sfd == INVALID_SOCKET)(__builtin_expect(!(c->sfd == -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1014, "c->sfd == -1") : (void)0)
;
1015 cb_assert(c->state == conn_immediate_close)(__builtin_expect(!(c->state == conn_immediate_close), 0) ?
__assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1015, "c->state == conn_immediate_close") : (void)0)
;
1016
1017 cb_assert(c->thread)(__builtin_expect(!(c->thread), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1017, "c->thread") : (void)0)
;
1018 /* remove from pending-io list */
1019 if (settings.verbose > 1 && list_contains(c->thread->pending_io, c)) {
1020 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
1021 "Current connection was in the pending-io list.. Nuking it\n");
1022 }
1023 c->thread->pending_io = list_remove(c->thread->pending_io, c);
1024
1025 conn_cleanup(c);
1026
1027 /*
1028 * The contract with the object cache is that we should return the
1029 * object in a constructed state. Reset the buffers to the default
1030 * size
1031 */
1032 conn_reset_buffersize(c);
1033 cb_assert(c->thread == NULL)(__builtin_expect(!(c->thread == ((void*)0)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1033, "c->thread == ((void*)0)") : (void)0)
;
1034 release_connection(c);
1035}
1036
1037/*
1038 * Shrinks a connection's buffers if they're too big. This prevents
1039 * periodic large "get" requests from permanently chewing lots of server
1040 * memory.
1041 *
1042 * This should only be called in between requests since it can wipe output
1043 * buffers!
1044 */
1045static void conn_shrink(conn *c) {
1046 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1046, "c != ((void*)0)") : (void)0)
;
1047
1048 if (c->rsize > READ_BUFFER_HIGHWAT8192 && c->rbytes < DATA_BUFFER_SIZE2048) {
1049 char *newbuf;
1050
1051 if (c->rcurr != c->rbuf)
1052 memmove(c->rbuf, c->rcurr, (size_t)c->rbytes)__builtin___memmove_chk (c->rbuf, c->rcurr, (size_t)c->
rbytes, __builtin_object_size (c->rbuf, 0))
;
1053
1054 newbuf = (char *)realloc((void *)c->rbuf, DATA_BUFFER_SIZE2048);
1055
1056 if (newbuf) {
1057 c->rbuf = newbuf;
1058 c->rsize = DATA_BUFFER_SIZE2048;
1059 }
1060 /* TODO check other branch... */
1061 c->rcurr = c->rbuf;
1062 }
1063
1064 if (c->isize > ITEM_LIST_HIGHWAT400) {
1065 item **newbuf = (item**) realloc((void *)c->ilist, ITEM_LIST_INITIAL200 * sizeof(c->ilist[0]));
1066 if (newbuf) {
1067 c->ilist = newbuf;
1068 c->isize = ITEM_LIST_INITIAL200;
1069 }
1070 /* TODO check error condition? */
1071 }
1072
1073 if (c->msgsize > MSG_LIST_HIGHWAT100) {
1074 struct msghdr *newbuf = (struct msghdr *) realloc((void *)c->msglist, MSG_LIST_INITIAL10 * sizeof(c->msglist[0]));
1075 if (newbuf) {
1076 c->msglist = newbuf;
1077 c->msgsize = MSG_LIST_INITIAL10;
1078 }
1079 /* TODO check error condition? */
1080 }
1081
1082 if (c->iovsize > IOV_LIST_HIGHWAT600) {
1083 struct iovec *newbuf = (struct iovec *) realloc((void *)c->iov, IOV_LIST_INITIAL400 * sizeof(c->iov[0]));
1084 if (newbuf) {
1085 c->iov = newbuf;
1086 c->iovsize = IOV_LIST_INITIAL400;
1087 }
1088 /* TODO check return value */
1089 }
1090}
1091
1092/**
1093 * Convert a state name to a human readable form.
1094 */
1095const char *state_text(STATE_FUNC state) {
1096 if (state == conn_listening) {
1097 return "conn_listening";
1098 } else if (state == conn_new_cmd) {
1099 return "conn_new_cmd";
1100 } else if (state == conn_waiting) {
1101 return "conn_waiting";
1102 } else if (state == conn_read) {
1103 return "conn_read";
1104 } else if (state == conn_parse_cmd) {
1105 return "conn_parse_cmd";
1106 } else if (state == conn_write) {
1107 return "conn_write";
1108 } else if (state == conn_nread) {
1109 return "conn_nread";
1110 } else if (state == conn_swallow) {
1111 return "conn_swallow";
1112 } else if (state == conn_closing) {
1113 return "conn_closing";
1114 } else if (state == conn_mwrite) {
1115 return "conn_mwrite";
1116 } else if (state == conn_ship_log) {
1117 return "conn_ship_log";
1118 } else if (state == conn_setup_tap_stream) {
1119 return "conn_setup_tap_stream";
1120 } else if (state == conn_pending_close) {
1121 return "conn_pending_close";
1122 } else if (state == conn_immediate_close) {
1123 return "conn_immediate_close";
1124 } else if (state == conn_refresh_cbsasl) {
1125 return "conn_refresh_cbsasl";
1126 } else if (state == conn_refresh_ssl_certs) {
1127 return "conn_refresh_ssl_cert";
1128 } else {
1129 return "Unknown";
1130 }
1131}
1132
1133/*
1134 * Sets a connection's current state in the state machine. Any special
1135 * processing that needs to happen on certain state transitions can
1136 * happen here.
1137 */
1138void conn_set_state(conn *c, STATE_FUNC state) {
1139 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1139, "c != ((void*)0)") : (void)0)
;
1140
1141 if (state != c->state) {
1142 /*
1143 * The connections in the "tap thread" behaves differently than
1144 * normal connections because they operate in a full duplex mode.
1145 * New messages may appear from both sides, so we can't block on
1146 * read from the nework / engine
1147 */
1148 if (c->tap_iterator != NULL((void*)0) || c->upr) {
1149 if (state == conn_waiting) {
1150 c->which = EV_WRITE0x04;
1151 state = conn_ship_log;
1152 }
1153 }
1154
1155 if (settings.verbose > 2 || c->state == conn_closing
1156 || c->state == conn_setup_tap_stream) {
1157 settings.extensions.logger->log(EXTENSION_LOG_DETAIL, c,
1158 "%d: going from %s to %s\n",
1159 c->sfd, state_text(c->state),
1160 state_text(state));
1161 }
1162
1163 if (state == conn_write || state == conn_mwrite) {
1164 if (c->start != 0) {
1165 collect_timing(c->cmd, gethrtime() - c->start);
1166 c->start = 0;
1167 }
1168 MEMCACHED_PROCESS_COMMAND_END(c->sfd, c->wbuf, c->wbytes);
1169 }
1170
1171 c->state = state;
1172 }
1173}
1174
1175/*
1176 * Ensures that there is room for another struct iovec in a connection's
1177 * iov list.
1178 *
1179 * Returns 0 on success, -1 on out-of-memory.
1180 */
1181static int ensure_iov_space(conn *c) {
1182 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1182, "c != ((void*)0)") : (void)0)
;
1183
1184 if (c->iovused >= c->iovsize) {
1185 int i, iovnum;
1186 struct iovec *new_iov = (struct iovec *)realloc(c->iov,
1187 (c->iovsize * 2) * sizeof(struct iovec));
1188 if (! new_iov)
1189 return -1;
1190 c->iov = new_iov;
1191 c->iovsize *= 2;
1192
1193 /* Point all the msghdr structures at the new list. */
1194 for (i = 0, iovnum = 0; i < c->msgused; i++) {
1195 c->msglist[i].msg_iov = &c->iov[iovnum];
1196 iovnum += c->msglist[i].msg_iovlen;
1197 }
1198 }
1199
1200 return 0;
1201}
1202
1203
1204/*
1205 * Adds data to the list of pending data that will be written out to a
1206 * connection.
1207 *
1208 * Returns 0 on success, -1 on out-of-memory.
1209 */
1210
1211static int add_iov(conn *c, const void *buf, size_t len) {
1212 struct msghdr *m;
1213 size_t leftover;
1214 bool_Bool limit_to_mtu;
1215
1216 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1216, "c != ((void*)0)") : (void)0)
;
1217
1218 if (len == 0) {
1219 return 0;
1220 }
1221
1222 do {
1223 m = &c->msglist[c->msgused - 1];
1224
1225 /*
1226 * Limit the first payloads of TCP replies, to
1227 * UDP_MAX_PAYLOAD_SIZE bytes.
1228 */
1229 limit_to_mtu = (1 == c->msgused);
1230
1231 /* We may need to start a new msghdr if this one is full. */
1232 if (m->msg_iovlen == IOV_MAX1024 ||
1233 (limit_to_mtu && c->msgbytes >= UDP_MAX_PAYLOAD_SIZE1400)) {
1234 add_msghdr(c);
1235 m = &c->msglist[c->msgused - 1];
1236 }
1237
1238 if (ensure_iov_space(c) != 0)
1239 return -1;
1240
1241 /* If the fragment is too big to fit in the datagram, split it up */
1242 if (limit_to_mtu && len + c->msgbytes > UDP_MAX_PAYLOAD_SIZE1400) {
1243 leftover = len + c->msgbytes - UDP_MAX_PAYLOAD_SIZE1400;
1244 len -= leftover;
1245 } else {
1246 leftover = 0;
1247 }
1248
1249 m = &c->msglist[c->msgused - 1];
1250 m->msg_iov[m->msg_iovlen].iov_base = (void *)buf;
1251 m->msg_iov[m->msg_iovlen].iov_len = len;
1252
1253 c->msgbytes += (int)len;
1254 c->iovused++;
1255 m->msg_iovlen++;
1256
1257 buf = ((char *)buf) + len;
1258 len = leftover;
1259 } while (leftover > 0);
1260
1261 return 0;
1262}
1263
1264/**
1265 * get a pointer to the start of the request struct for the current command
1266 */
1267static void* binary_get_request(conn *c) {
1268 char *ret = c->rcurr;
1269 ret -= (sizeof(c->binary_header) + c->binary_header.request.keylen +
1270 c->binary_header.request.extlen);
1271
1272 cb_assert(ret >= c->rbuf)(__builtin_expect(!(ret >= c->rbuf), 0) ? __assert_rtn(
__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1272, "ret >= c->rbuf") : (void)0)
;
1273 return ret;
1274}
1275
1276/**
1277 * get a pointer to the key in this request
1278 */
1279static char* binary_get_key(conn *c) {
1280 return c->rcurr - (c->binary_header.request.keylen);
1281}
1282
1283/**
1284 * Insert a key into a buffer, but replace all non-printable characters
1285 * with a '.'.
1286 *
1287 * @param dest where to store the output
1288 * @param destsz size of destination buffer
1289 * @param prefix string to insert before the data
1290 * @param client the client we are serving
1291 * @param from_client set to true if this data is from the client
1292 * @param key the key to add to the buffer
1293 * @param nkey the number of bytes in the key
1294 * @return number of bytes in dest if success, -1 otherwise
1295 */
1296static ssize_t key_to_printable_buffer(char *dest, size_t destsz,
1297 SOCKETint client, bool_Bool from_client,
1298 const char *prefix,
1299 const char *key,
1300 size_t nkey)
1301{
1302 char *ptr;
1303 ssize_t ii;
1304 ssize_t nw = snprintf(dest, destsz, "%c%d %s ", from_client ? '>' : '<',__builtin___snprintf_chk (dest, destsz, 0, __builtin_object_size
(dest, 2 > 1 ? 1 : 0), "%c%d %s ", from_client ? '>' :
'<', (int)client, prefix)
1305 (int)client, prefix)__builtin___snprintf_chk (dest, destsz, 0, __builtin_object_size
(dest, 2 > 1 ? 1 : 0), "%c%d %s ", from_client ? '>' :
'<', (int)client, prefix)
;
1306 if (nw == -1) {
1307 return -1;
1308 }
1309
1310 ptr = dest + nw;
1311 destsz -= nw;
1312 if (nkey > destsz) {
1313 nkey = destsz;
1314 }
1315
1316 for (ii = 0; ii < nkey; ++ii, ++key, ++ptr) {
1317 if (isgraph(*key)) {
1318 *ptr = *key;
1319 } else {
1320 *ptr = '.';
1321 }
1322 }
1323
1324 *ptr = '\0';
1325 return (ssize_t)(ptr - dest);
1326}
1327
1328/**
1329 * Convert a byte array to a text string
1330 *
1331 * @param dest where to store the output
1332 * @param destsz size of destination buffer
1333 * @param prefix string to insert before the data
1334 * @param client the client we are serving
1335 * @param from_client set to true if this data is from the client
1336 * @param data the data to add to the buffer
1337 * @param size the number of bytes in data to print
1338 * @return number of bytes in dest if success, -1 otherwise
1339 */
1340static ssize_t bytes_to_output_string(char *dest, size_t destsz,
1341 SOCKETint client, bool_Bool from_client,
1342 const char *prefix,
1343 const char *data,
1344 size_t size)
1345{
1346 ssize_t nw = snprintf(dest, destsz, "%c%d %s", from_client ? '>' : '<',__builtin___snprintf_chk (dest, destsz, 0, __builtin_object_size
(dest, 2 > 1 ? 1 : 0), "%c%d %s", from_client ? '>' : '<'
, (int)client, prefix)
1347 (int)client, prefix)__builtin___snprintf_chk (dest, destsz, 0, __builtin_object_size
(dest, 2 > 1 ? 1 : 0), "%c%d %s", from_client ? '>' : '<'
, (int)client, prefix)
;
1348 ssize_t offset = nw;
1349 ssize_t ii;
1350
1351 if (nw == -1) {
1352 return -1;
1353 }
1354
1355 for (ii = 0; ii < size; ++ii) {
1356 if (ii % 4 == 0) {
1357 if ((nw = snprintf(dest + offset, destsz - offset, "\n%c%d ",__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), "\n%c%d ", from_client ?
'>' : '<', client)
1358 from_client ? '>' : '<', client)__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), "\n%c%d ", from_client ?
'>' : '<', client)
) == -1) {
1359 return -1;
1360 }
1361 offset += nw;
1362 }
1363 if ((nw = snprintf(dest + offset, destsz - offset,__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), " 0x%02x", (unsigned char
)data[ii])
1364 " 0x%02x", (unsigned char)data[ii])__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), " 0x%02x", (unsigned char
)data[ii])
) == -1) {
1365 return -1;
1366 }
1367 offset += nw;
1368 }
1369
1370 if ((nw = snprintf(dest + offset, destsz - offset, "\n")__builtin___snprintf_chk (dest + offset, destsz - offset, 0, __builtin_object_size
(dest + offset, 2 > 1 ? 1 : 0), "\n")
) == -1) {
1371 return -1;
1372 }
1373
1374 return offset + nw;
1375}
1376
1377static int add_bin_header(conn *c,
1378 uint16_t err,
1379 uint8_t hdr_len,
1380 uint16_t key_len,
1381 uint32_t body_len,
1382 uint8_t datatype) {
1383 protocol_binary_response_header* header;
1384
1385 cb_assert(c)(__builtin_expect(!(c), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1385, "c") : (void)0)
;
1386
1387 c->msgcurr = 0;
1388 c->msgused = 0;
1389 c->iovused = 0;
1390 if (add_msghdr(c) != 0) {
1391 return -1;
1392 }
1393
1394 header = (protocol_binary_response_header *)c->wbuf;
1395
1396 header->response.magic = (uint8_t)PROTOCOL_BINARY_RES;
1397 header->response.opcode = c->binary_header.request.opcode;
1398 header->response.keylen = (uint16_t)htons(key_len)((__uint16_t)(__builtin_constant_p(key_len) ? ((__uint16_t)((
((__uint16_t)(key_len) & 0xff00) >> 8) | (((__uint16_t
)(key_len) & 0x00ff) << 8))) : _OSSwapInt16(key_len
)))
;
1399
1400 header->response.extlen = (uint8_t)hdr_len;
1401 header->response.datatype = datatype;
1402 header->response.status = (uint16_t)htons(err)((__uint16_t)(__builtin_constant_p(err) ? ((__uint16_t)((((__uint16_t
)(err) & 0xff00) >> 8) | (((__uint16_t)(err) & 0x00ff
) << 8))) : _OSSwapInt16(err)))
;
1403
1404 header->response.bodylen = htonl(body_len)(__builtin_constant_p(body_len) ? ((__uint32_t)((((__uint32_t
)(body_len) & 0xff000000) >> 24) | (((__uint32_t)(body_len
) & 0x00ff0000) >> 8) | (((__uint32_t)(body_len) &
0x0000ff00) << 8) | (((__uint32_t)(body_len) & 0x000000ff
) << 24))) : _OSSwapInt32(body_len))
;
1405 header->response.opaque = c->opaque;
1406 header->response.cas = htonll(c->cas);
1407
1408 if (settings.verbose > 1) {
1409 char buffer[1024];
1410 if (bytes_to_output_string(buffer, sizeof(buffer), c->sfd, false0,
1411 "Writing bin response:",
1412 (const char*)header->bytes,
1413 sizeof(header->bytes)) != -1) {
1414 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
1415 "%s", buffer);
1416 }
1417 }
1418
1419 return add_iov(c, c->wbuf, sizeof(header->response));
1420}
1421
1422/**
1423 * Convert an error code generated from the storage engine to the corresponding
1424 * error code used by the protocol layer.
1425 * @param e the error code as used in the engine
1426 * @return the error code as used by the protocol layer
1427 */
1428static protocol_binary_response_status engine_error_2_protocol_error(ENGINE_ERROR_CODE e) {
1429 protocol_binary_response_status ret;
1430
1431 switch (e) {
1432 case ENGINE_SUCCESS:
1433 return PROTOCOL_BINARY_RESPONSE_SUCCESS;
1434 case ENGINE_KEY_ENOENT:
1435 return PROTOCOL_BINARY_RESPONSE_KEY_ENOENT;
1436 case ENGINE_KEY_EEXISTS:
1437 return PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS;
1438 case ENGINE_ENOMEM:
1439 return PROTOCOL_BINARY_RESPONSE_ENOMEM;
1440 case ENGINE_TMPFAIL:
1441 return PROTOCOL_BINARY_RESPONSE_ETMPFAIL;
1442 case ENGINE_NOT_STORED:
1443 return PROTOCOL_BINARY_RESPONSE_NOT_STORED;
1444 case ENGINE_EINVAL:
1445 return PROTOCOL_BINARY_RESPONSE_EINVAL;
1446 case ENGINE_ENOTSUP:
1447 return PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED;
1448 case ENGINE_E2BIG:
1449 return PROTOCOL_BINARY_RESPONSE_E2BIG;
1450 case ENGINE_NOT_MY_VBUCKET:
1451 return PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET;
1452 case ENGINE_ERANGE:
1453 return PROTOCOL_BINARY_RESPONSE_ERANGE;
1454 case ENGINE_ROLLBACK:
1455 return PROTOCOL_BINARY_RESPONSE_ROLLBACK;
1456 default:
1457 ret = PROTOCOL_BINARY_RESPONSE_EINTERNAL;
1458 }
1459
1460 return ret;
1461}
1462
1463static ENGINE_ERROR_CODE get_vb_map_cb(const void *cookie,
1464 const void *map,
1465 size_t mapsize)
1466{
1467 char *buf;
1468 conn *c = (conn*)cookie;
1469 protocol_binary_response_header header;
1470 size_t needed = mapsize+ sizeof(protocol_binary_response_header);
1471 if (!grow_dynamic_buffer(c, needed)) {
1472 if (settings.verbose > 0) {
1473 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
1474 "<%d ERROR: Failed to allocate memory for response\n",
1475 c->sfd);
1476 }
1477 return ENGINE_ENOMEM;
1478 }
1479
1480 buf = c->dynamic_buffer.buffer + c->dynamic_buffer.offset;
1481 memset(&header, 0, sizeof(header))__builtin___memset_chk (&header, 0, sizeof(header), __builtin_object_size
(&header, 0))
;
1482
1483 header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
1484 header.response.opcode = c->binary_header.request.opcode;
1485 header.response.status = (uint16_t)htons(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET)((__uint16_t)(__builtin_constant_p(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET
) ? ((__uint16_t)((((__uint16_t)(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET
) & 0xff00) >> 8) | (((__uint16_t)(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET
) & 0x00ff) << 8))) : _OSSwapInt16(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET
)))
;
1486 header.response.bodylen = htonl((uint32_t)mapsize)(__builtin_constant_p((uint32_t)mapsize) ? ((__uint32_t)((((__uint32_t
)((uint32_t)mapsize) & 0xff000000) >> 24) | (((__uint32_t
)((uint32_t)mapsize) & 0x00ff0000) >> 8) | (((__uint32_t
)((uint32_t)mapsize) & 0x0000ff00) << 8) | (((__uint32_t
)((uint32_t)mapsize) & 0x000000ff) << 24))) : _OSSwapInt32
((uint32_t)mapsize))
;
1487 header.response.opaque = c->opaque;
1488
1489 memcpy(buf, header.bytes, sizeof(header.response))__builtin___memcpy_chk (buf, header.bytes, sizeof(header.response
), __builtin_object_size (buf, 0))
;
1490 buf += sizeof(header.response);
1491 memcpy(buf, map, mapsize)__builtin___memcpy_chk (buf, map, mapsize, __builtin_object_size
(buf, 0))
;
1492 c->dynamic_buffer.offset += needed;
1493
1494 return ENGINE_SUCCESS;
1495}
1496
1497static void write_bin_packet(conn *c, protocol_binary_response_status err, int swallow) {
1498 if (err == PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET) {
1499 ENGINE_ERROR_CODE ret;
1500 cb_assert(swallow == 0)(__builtin_expect(!(swallow == 0), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1500, "swallow == 0") : (void)0)
;
1501
1502 ret = settings.engine.v1->get_engine_vb_map(settings.engine.v0, c,
1503 get_vb_map_cb);
1504 if (ret == ENGINE_SUCCESS) {
1505 write_and_free(c, c->dynamic_buffer.buffer,
1506 c->dynamic_buffer.offset);
1507 c->dynamic_buffer.buffer = NULL((void*)0);
1508 } else {
1509 conn_set_state(c, conn_closing);
1510 }
1511 } else {
1512 ssize_t len = 0;
1513 const char *errtext = NULL((void*)0);
1514
1515 if (err != PROTOCOL_BINARY_RESPONSE_SUCCESS) {
1516 errtext = memcached_protocol_errcode_2_text(err);
1517 if (errtext != NULL((void*)0)) {
1518 len = (ssize_t)strlen(errtext);
1519 }
1520 }
1521
1522 if (errtext && settings.verbose > 1) {
1523 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
1524 ">%d Writing an error: %s\n", c->sfd,
1525 errtext);
1526 }
1527
1528 add_bin_header(c, err, 0, 0, len, PROTOCOL_BINARY_RAW_BYTES);
1529 if (errtext) {
1530 add_iov(c, errtext, len);
1531 }
1532 conn_set_state(c, conn_mwrite);
1533 if (swallow > 0) {
1534 c->sbytes = swallow;
1535 c->write_and_go = conn_swallow;
1536 } else {
1537 c->write_and_go = conn_new_cmd;
1538 }
1539 }
1540}
1541
1542/* Form and send a response to a command over the binary protocol */
1543static void write_bin_response(conn *c, const void *d, int hlen, int keylen, int dlen) {
1544 if (!c->noreply || c->cmd == PROTOCOL_BINARY_CMD_GET ||
1545 c->cmd == PROTOCOL_BINARY_CMD_GETK) {
1546 if (add_bin_header(c, 0, hlen, keylen, dlen, PROTOCOL_BINARY_RAW_BYTES) == -1) {
1547 conn_set_state(c, conn_closing);
1548 return;
1549 }
1550 add_iov(c, d, dlen);
1551 conn_set_state(c, conn_mwrite);
1552 c->write_and_go = conn_new_cmd;
1553 } else {
1554 if (c->start != 0) {
1555 collect_timing(c->cmd, gethrtime() - c->start);
1556 c->start = 0;
1557 }
1558 conn_set_state(c, conn_new_cmd);
1559 }
1560}
1561
1562static void complete_update_bin(conn *c) {
1563 protocol_binary_response_status eno = PROTOCOL_BINARY_RESPONSE_EINVAL;
1564 ENGINE_ERROR_CODE ret;
1565 item *it;
1566 item_info_holder info;
1567
1568 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1568, "c != ((void*)0)") : (void)0)
;
1569 it = c->item;
1570 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
1571 info.info.nvalue = 1;
1572 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
1573 (void*)&info)) {
1574 settings.engine.v1->release(settings.engine.v0, c, it);
1575 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
1576 "%d: Failed to get item info",
1577 c->sfd);
1578 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
1579 return;
1580 }
1581
1582 ret = c->aiostat;
1583 c->aiostat = ENGINE_SUCCESS;
1584 if (ret == ENGINE_SUCCESS) {
1585 if (!c->supports_datatype) {
1586 if (checkUTF8JSON((void*)info.info.value[0].iov_base,
1587 (int)info.info.value[0].iov_len)) {
1588 info.info.datatype = PROTOCOL_BINARY_DATATYPE_JSON;
1589 if (!settings.engine.v1->set_item_info(settings.engine.v0, c,
1590 it, &info.info)) {
1591 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
1592 "%d: Failed to set item info",
1593 c->sfd);
1594 }
1595 }
1596 }
1597 ret = settings.engine.v1->store(settings.engine.v0, c,
1598 it, &c->cas, c->store_op,
1599 c->binary_header.request.vbucket);
1600 }
1601
1602#ifdef ENABLE_DTRACE
1603 switch (c->cmd) {
1604 case OPERATION_ADD:
1605 MEMCACHED_COMMAND_ADD(c->sfd, info.info.key, info.info.nkey,
1606 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1607 break;
1608 case OPERATION_REPLACE:
1609 MEMCACHED_COMMAND_REPLACE(c->sfd, info.info.key, info.info.nkey,
1610 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1611 break;
1612 case OPERATION_APPEND:
1613 MEMCACHED_COMMAND_APPEND(c->sfd, info.info.key, info.info.nkey,
1614 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1615 break;
1616 case OPERATION_PREPEND:
1617 MEMCACHED_COMMAND_PREPEND(c->sfd, info.info.key, info.info.nkey,
1618 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1619 break;
1620 case OPERATION_SET:
1621 MEMCACHED_COMMAND_SET(c->sfd, info.info.key, info.info.nkey,
1622 (ret == ENGINE_SUCCESS) ? info.info.nbytes : -1, c->cas);
1623 break;
1624 }
1625#endif
1626
1627 switch (ret) {
1628 case ENGINE_SUCCESS:
1629 /* Stored */
1630 write_bin_response(c, NULL((void*)0), 0, 0, 0);
1631 break;
1632 case ENGINE_KEY_EEXISTS:
1633 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, 0);
1634 break;
1635 case ENGINE_KEY_ENOENT:
1636 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
1637 break;
1638 case ENGINE_ENOMEM:
1639 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
1640 break;
1641 case ENGINE_TMPFAIL:
1642 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ETMPFAIL, 0);
1643 break;
1644 case ENGINE_EWOULDBLOCK:
1645 c->ewouldblock = true1;
1646 break;
1647 case ENGINE_DISCONNECT:
1648 c->state = conn_closing;
1649 break;
1650 case ENGINE_ENOTSUP:
1651 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
1652 break;
1653 case ENGINE_NOT_MY_VBUCKET:
1654 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, 0);
1655 break;
1656 case ENGINE_E2BIG:
1657 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_E2BIG, 0);
1658 break;
1659 default:
1660 if (c->store_op == OPERATION_ADD) {
1661 eno = PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS;
1662 } else if(c->store_op == OPERATION_REPLACE) {
1663 eno = PROTOCOL_BINARY_RESPONSE_KEY_ENOENT;
1664 } else {
1665 eno = PROTOCOL_BINARY_RESPONSE_NOT_STORED;
1666 }
1667 write_bin_packet(c, eno, 0);
1668 }
1669
1670 if (c->store_op == OPERATION_CAS) {
1671 switch (ret) {
1672 case ENGINE_SUCCESS:
1673 SLAB_INCR(c, cas_hits, info.info.key, info.info.nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].cas_hits++;; cb_mutex_exit(&thread_stats->
mutex); }
;
1674 break;
1675 case ENGINE_KEY_EEXISTS:
1676 SLAB_INCR(c, cas_badval, info.info.key, info.info.nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].cas_badval++;; cb_mutex_exit(&thread_stats->
mutex); }
;
1677 break;
1678 case ENGINE_KEY_ENOENT:
1679 STATS_NOKEY(c, cas_misses){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->cas_misses++;
cb_mutex_exit(&thread_stats->mutex); }
;
1680 break;
1681 default:
1682 ;
1683 }
1684 } else {
1685 SLAB_INCR(c, cmd_set, info.info.key, info.info.nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].cmd_set++;; cb_mutex_exit(&thread_stats->
mutex); }
;
1686 }
1687
1688 if (!c->ewouldblock) {
1689 /* release the c->item reference */
1690 settings.engine.v1->release(settings.engine.v0, c, c->item);
1691 c->item = 0;
1692 }
1693}
1694
1695static void process_bin_get(conn *c) {
1696 item *it;
1697 protocol_binary_response_get* rsp = (protocol_binary_response_get*)c->wbuf;
1698 char* key = binary_get_key(c);
1699 size_t nkey = c->binary_header.request.keylen;
1700 uint16_t keylen;
1701 uint32_t bodylen;
1702 item_info_holder info;
1703 int ii;
1704 ENGINE_ERROR_CODE ret;
1705 uint8_t datatype;
1706 bool_Bool need_inflate = false0;
1707
1708 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
1709 if (settings.verbose > 1) {
1710 char buffer[1024];
1711 if (key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
1712 "GET", key, nkey) != -1) {
1713 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s\n",
1714 buffer);
1715 }
1716 }
1717
1718 ret = c->aiostat;
1719 c->aiostat = ENGINE_SUCCESS;
1720 if (ret == ENGINE_SUCCESS) {
1721 ret = settings.engine.v1->get(settings.engine.v0, c, &it, key, (int)nkey,
1722 c->binary_header.request.vbucket);
1723 }
1724
1725 info.info.nvalue = IOV_MAX1024;
1726 switch (ret) {
1727 case ENGINE_SUCCESS:
1728 STATS_HIT(c, get, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].get_hits++; thread_stats->cmd_get++;; cb_mutex_exit
(&thread_stats->mutex); }
;
1729
1730 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
1731 (void*)&info)) {
1732 settings.engine.v1->release(settings.engine.v0, c, it);
1733 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
1734 "%d: Failed to get item info",
1735 c->sfd);
1736 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
1737 break;
1738 }
1739
1740 datatype = info.info.datatype;
1741 if (!c->supports_datatype) {
1742 if ((datatype & PROTOCOL_BINARY_DATATYPE_COMPRESSED) == PROTOCOL_BINARY_DATATYPE_COMPRESSED) {
1743 need_inflate = true1;
1744 } else {
1745 datatype = PROTOCOL_BINARY_RAW_BYTES;
1746 }
1747 }
1748
1749 keylen = 0;
1750 bodylen = sizeof(rsp->message.body) + info.info.nbytes;
1751
1752 if (c->cmd == PROTOCOL_BINARY_CMD_GETK) {
1753 bodylen += (uint32_t)nkey;
1754 keylen = (uint16_t)nkey;
1755 }
1756
1757 if (need_inflate) {
1758 if (info.info.nvalue != 1) {
1759 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
1760 } else if (binary_response_handler(key, keylen,
1761 &info.info.flags, 4,
1762 info.info.value[0].iov_base,
1763 (uint32_t)info.info.value[0].iov_len,
1764 datatype,
1765 PROTOCOL_BINARY_RESPONSE_SUCCESS,
1766 info.info.cas, c)) {
1767 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
1768 c->dynamic_buffer.buffer = NULL((void*)0);
1769 settings.engine.v1->release(settings.engine.v0, c, it);
1770 } else {
1771 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
1772 }
1773 } else {
1774 if (add_bin_header(c, 0, sizeof(rsp->message.body),
1775 keylen, bodylen, datatype) == -1) {
1776 conn_set_state(c, conn_closing);
1777 return;
1778 }
1779 rsp->message.header.response.cas = htonll(info.info.cas);
1780
1781 /* add the flags */
1782 rsp->message.body.flags = info.info.flags;
1783 add_iov(c, &rsp->message.body, sizeof(rsp->message.body));
1784
1785 if (c->cmd == PROTOCOL_BINARY_CMD_GETK) {
1786 add_iov(c, info.info.key, nkey);
1787 }
1788
1789 for (ii = 0; ii < info.info.nvalue; ++ii) {
1790 add_iov(c, info.info.value[ii].iov_base,
1791 info.info.value[ii].iov_len);
1792 }
1793 conn_set_state(c, conn_mwrite);
1794 /* Remember this item so we can garbage collect it later */
1795 c->item = it;
1796 }
1797 break;
1798 case ENGINE_KEY_ENOENT:
1799 STATS_MISS(c, get, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->get_misses++;
thread_stats->cmd_get++;; cb_mutex_exit(&thread_stats
->mutex); }
;
1800
1801 MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0);
1802
1803 if (c->noreply) {
1804 conn_set_state(c, conn_new_cmd);
1805 } else {
1806 if (c->cmd == PROTOCOL_BINARY_CMD_GETK) {
1807 char *ofs = c->wbuf + sizeof(protocol_binary_response_header);
1808 if (add_bin_header(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT,
1809 0, (uint16_t)nkey,
1810 (uint32_t)nkey, PROTOCOL_BINARY_RAW_BYTES) == -1) {
1811 conn_set_state(c, conn_closing);
1812 return;
1813 }
1814 memcpy(ofs, key, nkey)__builtin___memcpy_chk (ofs, key, nkey, __builtin_object_size
(ofs, 0))
;
1815 add_iov(c, ofs, nkey);
1816 conn_set_state(c, conn_mwrite);
1817 } else {
1818 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
1819 }
1820 }
1821 break;
1822 case ENGINE_EWOULDBLOCK:
1823 c->ewouldblock = true1;
1824 break;
1825 case ENGINE_DISCONNECT:
1826 c->state = conn_closing;
1827 break;
1828 default:
1829 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
1830 }
1831
1832 if (settings.detail_enabled && ret != ENGINE_EWOULDBLOCK) {
1833 stats_prefix_record_get(key, nkey, ret == ENGINE_SUCCESS);
1834 }
1835}
1836
1837static void append_bin_stats(const char *key, const uint16_t klen,
1838 const char *val, const uint32_t vlen,
1839 conn *c) {
1840 char *buf = c->dynamic_buffer.buffer + c->dynamic_buffer.offset;
1841 uint32_t bodylen = klen + vlen;
1842 protocol_binary_response_header header;
1843
1844 memset(&header, 0, sizeof(header))__builtin___memset_chk (&header, 0, sizeof(header), __builtin_object_size
(&header, 0))
;
1845 header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
1846 header.response.opcode = PROTOCOL_BINARY_CMD_STAT;
1847 header.response.keylen = (uint16_t)htons(klen)((__uint16_t)(__builtin_constant_p(klen) ? ((__uint16_t)((((__uint16_t
)(klen) & 0xff00) >> 8) | (((__uint16_t)(klen) &
0x00ff) << 8))) : _OSSwapInt16(klen)))
;
1848 header.response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES;
1849 header.response.bodylen = htonl(bodylen)(__builtin_constant_p(bodylen) ? ((__uint32_t)((((__uint32_t)
(bodylen) & 0xff000000) >> 24) | (((__uint32_t)(bodylen
) & 0x00ff0000) >> 8) | (((__uint32_t)(bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(bodylen) & 0x000000ff
) << 24))) : _OSSwapInt32(bodylen))
;
1850 header.response.opaque = c->opaque;
1851
1852 memcpy(buf, header.bytes, sizeof(header.response))__builtin___memcpy_chk (buf, header.bytes, sizeof(header.response
), __builtin_object_size (buf, 0))
;
1853 buf += sizeof(header.response);
1854
1855 if (klen > 0) {
1856 memcpy(buf, key, klen)__builtin___memcpy_chk (buf, key, klen, __builtin_object_size
(buf, 0))
;
1857 buf += klen;
1858
1859 if (vlen > 0) {
1860 memcpy(buf, val, vlen)__builtin___memcpy_chk (buf, val, vlen, __builtin_object_size
(buf, 0))
;
1861 }
1862 }
1863
1864 c->dynamic_buffer.offset += sizeof(header.response) + bodylen;
1865}
1866
1867static bool_Bool grow_dynamic_buffer(conn *c, size_t needed) {
1868 size_t nsize = c->dynamic_buffer.size;
1869 size_t available = nsize - c->dynamic_buffer.offset;
1870 bool_Bool rv = true1;
1871
1872 /* Special case: No buffer -- need to allocate fresh */
1873 if (c->dynamic_buffer.buffer == NULL((void*)0)) {
1874 nsize = 1024;
1875 available = c->dynamic_buffer.size = c->dynamic_buffer.offset = 0;
1876 }
1877
1878 while (needed > available) {
1879 cb_assert(nsize > 0)(__builtin_expect(!(nsize > 0), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1879, "nsize > 0") : (void)0)
;
1880 nsize = nsize << 1;
1881 available = nsize - c->dynamic_buffer.offset;
1882 }
1883
1884 if (nsize != c->dynamic_buffer.size) {
1885 char *ptr = realloc(c->dynamic_buffer.buffer, nsize);
1886 if (ptr) {
1887 c->dynamic_buffer.buffer = ptr;
1888 c->dynamic_buffer.size = nsize;
1889 } else {
1890 rv = false0;
1891 }
1892 }
1893
1894 return rv;
1895}
1896
1897static void append_stats(const char *key, const uint16_t klen,
1898 const char *val, const uint32_t vlen,
1899 const void *cookie)
1900{
1901 size_t needed;
1902 conn *c = (conn*)cookie;
1903 /* value without a key is invalid */
1904 if (klen == 0 && vlen > 0) {
1905 return ;
1906 }
1907
1908 needed = vlen + klen + sizeof(protocol_binary_response_header);
1909 if (!grow_dynamic_buffer(c, needed)) {
1910 return ;
1911 }
1912 append_bin_stats(key, klen, val, vlen, c);
1913 cb_assert(c->dynamic_buffer.offset <= c->dynamic_buffer.size)(__builtin_expect(!(c->dynamic_buffer.offset <= c->dynamic_buffer
.size), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1913, "c->dynamic_buffer.offset <= c->dynamic_buffer.size"
) : (void)0)
;
1914}
1915
1916static void bin_read_chunk(conn *c,
1917 enum bin_substates next_substate,
1918 uint32_t chunk) {
1919 ptrdiff_t offset;
1920 cb_assert(c)(__builtin_expect(!(c), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 1920, "c") : (void)0)
;
1921 c->substate = next_substate;
1922 c->rlbytes = chunk;
1923
1924 /* Ok... do we have room for everything in our buffer? */
1925 offset = c->rcurr + sizeof(protocol_binary_request_header) - c->rbuf;
1926 if (c->rlbytes > c->rsize - offset) {
1927 size_t nsize = c->rsize;
1928 size_t size = c->rlbytes + sizeof(protocol_binary_request_header);
1929
1930 while (size > nsize) {
1931 nsize *= 2;
1932 }
1933
1934 if (nsize != c->rsize) {
1935 char *newm;
1936 if (settings.verbose > 1) {
1937 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
1938 "%d: Need to grow buffer from %lu to %lu\n",
1939 c->sfd, (unsigned long)c->rsize, (unsigned long)nsize);
1940 }
1941 newm = realloc(c->rbuf, nsize);
1942 if (newm == NULL((void*)0)) {
1943 if (settings.verbose) {
1944 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
1945 "%d: Failed to grow buffer.. closing connection\n",
1946 c->sfd);
1947 }
1948 conn_set_state(c, conn_closing);
1949 return;
1950 }
1951
1952 c->rbuf= newm;
1953 /* rcurr should point to the same offset in the packet */
1954 c->rcurr = c->rbuf + offset - sizeof(protocol_binary_request_header);
1955 c->rsize = (int)nsize;
1956 }
1957 if (c->rbuf != c->rcurr) {
1958 memmove(c->rbuf, c->rcurr, c->rbytes)__builtin___memmove_chk (c->rbuf, c->rcurr, c->rbytes
, __builtin_object_size (c->rbuf, 0))
;
1959 c->rcurr = c->rbuf;
1960 if (settings.verbose > 1) {
1961 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
1962 "%d: Repack input buffer\n",
1963 c->sfd);
1964 }
1965 }
1966 }
1967
1968 /* preserve the header in the buffer.. */
1969 c->ritem = c->rcurr + sizeof(protocol_binary_request_header);
1970 conn_set_state(c, conn_nread);
1971}
1972
1973static void bin_read_key(conn *c, enum bin_substates next_substate, int extra) {
1974 bin_read_chunk(c, next_substate, c->keylen + extra);
1975}
1976
1977
1978/* Just write an error message and disconnect the client */
1979static void handle_binary_protocol_error(conn *c) {
1980 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
1981 if (settings.verbose) {
1982 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
1983 "%d: Protocol error (opcode %02x), close connection\n",
1984 c->sfd, c->binary_header.request.opcode);
1985 }
1986 c->write_and_go = conn_closing;
1987}
1988
1989static void get_auth_data(const void *cookie, auth_data_t *data) {
1990 conn *c = (conn*)cookie;
1991 if (c->sasl_conn) {
1992 cbsasl_getprop(c->sasl_conn, CBSASL_USERNAME, (void*)&data->username);
1993 cbsasl_getprop(c->sasl_conn, CBSASL_CONFIG, (void*)&data->config);
1994 }
1995}
1996
1997struct sasl_tmp {
1998 int ksize;
1999 int vsize;
2000 char data[1]; /* data + ksize == value */
2001};
2002
2003static void process_bin_sasl_auth(conn *c) {
2004 int nkey;
2005 int vlen;
2006 char *key;
2007 size_t buffer_size;
2008 struct sasl_tmp *data;
2009
2010 cb_assert(c->binary_header.request.extlen == 0)(__builtin_expect(!(c->binary_header.request.extlen == 0),
0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2010, "c->binary_header.request.extlen == 0") : (void)0)
;
2011 nkey = c->binary_header.request.keylen;
2012 vlen = c->binary_header.request.bodylen - nkey;
2013
2014 if (nkey > MAX_SASL_MECH_LEN32) {
2015 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, vlen);
2016 c->write_and_go = conn_swallow;
2017 return;
2018 }
2019
2020 key = binary_get_key(c);
2021 cb_assert(key)(__builtin_expect(!(key), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2021, "key") : (void)0)
;
2022
2023 buffer_size = sizeof(struct sasl_tmp) + nkey + vlen + 2;
2024 data = calloc(sizeof(struct sasl_tmp) + buffer_size, 1);
2025 if (!data) {
2026 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, vlen);
2027 c->write_and_go = conn_swallow;
2028 return;
2029 }
2030
2031 data->ksize = nkey;
2032 data->vsize = vlen;
2033 memcpy(data->data, key, nkey)__builtin___memcpy_chk (data->data, key, nkey, __builtin_object_size
(data->data, 0))
;
2034
2035 c->item = data;
2036 c->ritem = data->data + nkey;
2037 c->rlbytes = vlen;
2038 conn_set_state(c, conn_nread);
2039 c->substate = bin_reading_sasl_auth_data;
2040}
2041
2042static void process_bin_complete_sasl_auth(conn *c) {
2043 auth_data_t data;
2044 const char *out = NULL((void*)0);
2045 unsigned int outlen = 0;
2046 int nkey;
2047 int vlen;
2048 struct sasl_tmp *stmp;
2049 char mech[1024];
2050 const char *challenge;
2051 int result=-1;
2052
2053 cb_assert(c->item)(__builtin_expect(!(c->item), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2053, "c->item") : (void)0)
;
2054
2055 nkey = c->binary_header.request.keylen;
2056 if (nkey > 1023) {
2057 /* too big.. */
2058 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0);
2059 return;
2060 }
2061 vlen = c->binary_header.request.bodylen - nkey;
2062
2063 stmp = c->item;
2064 memcpy(mech, stmp->data, nkey)__builtin___memcpy_chk (mech, stmp->data, nkey, __builtin_object_size
(mech, 0))
;
2065 mech[nkey] = 0x00;
2066
2067 if (settings.verbose) {
2068 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
2069 "%d: mech: ``%s'' with %d bytes of data\n", c->sfd, mech, vlen);
2070 }
2071
2072 challenge = vlen == 0 ? NULL((void*)0) : (stmp->data + nkey);
2073 switch (c->cmd) {
2074 case PROTOCOL_BINARY_CMD_SASL_AUTH:
2075 result = cbsasl_server_start(&c->sasl_conn, mech,
2076 challenge, vlen,
2077 (unsigned char **)&out, &outlen);
2078 break;
2079 case PROTOCOL_BINARY_CMD_SASL_STEP:
2080 result = cbsasl_server_step(c->sasl_conn, challenge,
2081 vlen, &out, &outlen);
2082 break;
2083 default:
2084 cb_assert(false)(__builtin_expect(!(0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2084, "0") : (void)0)
; /* CMD should be one of the above */
2085 /* This code is pretty much impossible, but makes the compiler
2086 happier */
2087 if (settings.verbose) {
2088 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2089 "%d: Unhandled command %d with challenge %s\n",
2090 c->sfd, c->cmd, challenge);
2091 }
2092 break;
2093 }
2094
2095 free(c->item);
2096 c->item = NULL((void*)0);
2097 c->ritem = NULL((void*)0);
2098
2099 if (settings.verbose) {
2100 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2101 "%d: sasl result code: %d\n",
2102 c->sfd, result);
2103 }
2104
2105 switch(result) {
2106 case SASL_OK:
2107 write_bin_response(c, "Authenticated", 0, 0, (uint32_t)strlen("Authenticated"));
2108 get_auth_data(c, &data);
2109 perform_callbacks(ON_AUTH, (const void*)&data, c);
2110 STATS_NOKEY(c, auth_cmds){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->auth_cmds++; cb_mutex_exit
(&thread_stats->mutex); }
;
2111 break;
2112 case SASL_CONTINUE:
2113 if (add_bin_header(c, PROTOCOL_BINARY_RESPONSE_AUTH_CONTINUE, 0, 0,
2114 outlen, PROTOCOL_BINARY_RAW_BYTES) == -1) {
2115 conn_set_state(c, conn_closing);
2116 return;
2117 }
2118 add_iov(c, out, outlen);
2119 conn_set_state(c, conn_mwrite);
2120 c->write_and_go = conn_new_cmd;
2121 break;
2122 case SASL_BADPARAM:
2123 if (settings.verbose) {
2124 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2125 "%d: Bad sasl params: %d\n",
2126 c->sfd, result);
2127 }
2128 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
2129 STATS_NOKEY2(c, auth_cmds, auth_errors){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->auth_cmds++; thread_stats
->auth_errors++; cb_mutex_exit(&thread_stats->mutex
); }
;
2130 break;
2131 default:
2132 if (settings.verbose) {
2133 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2134 "%d: Unknown sasl response: %d\n",
2135 c->sfd, result);
2136 }
2137 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0);
2138 STATS_NOKEY2(c, auth_cmds, auth_errors){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->auth_cmds++; thread_stats
->auth_errors++; cb_mutex_exit(&thread_stats->mutex
); }
;
2139 }
2140}
2141
2142static bool_Bool authenticated(conn *c) {
2143 bool_Bool rv = false0;
2144
2145 switch (c->cmd) {
2146 case PROTOCOL_BINARY_CMD_SASL_LIST_MECHS: /* FALLTHROUGH */
2147 case PROTOCOL_BINARY_CMD_SASL_AUTH: /* FALLTHROUGH */
2148 case PROTOCOL_BINARY_CMD_SASL_STEP: /* FALLTHROUGH */
2149 case PROTOCOL_BINARY_CMD_VERSION: /* FALLTHROUGH */
2150 case PROTOCOL_BINARY_CMD_HELLO:
2151 rv = true1;
2152 break;
2153 default:
2154 if (c->sasl_conn) {
2155 const void *uname = NULL((void*)0);
2156 cbsasl_getprop(c->sasl_conn, CBSASL_USERNAME, &uname);
2157 rv = uname != NULL((void*)0);
2158 }
2159 }
2160
2161 if (settings.verbose > 1) {
2162 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
2163 "%d: authenticated() in cmd 0x%02x is %s\n",
2164 c->sfd, c->cmd, rv ? "true" : "false");
2165 }
2166
2167 return rv;
2168}
2169
2170bool_Bool binary_response_handler(const void *key, uint16_t keylen,
2171 const void *ext, uint8_t extlen,
2172 const void *body, uint32_t bodylen,
2173 uint8_t datatype, uint16_t status,
2174 uint64_t cas, const void *cookie)
2175{
2176 protocol_binary_response_header header;
2177 char *buf;
2178 conn *c = (conn*)cookie;
2179 /* Look at append_bin_stats */
2180 size_t needed;
2181 bool_Bool need_inflate = false0;
2182 size_t inflated_length;
2183
2184 if (!c->supports_datatype) {
2185 if ((datatype & PROTOCOL_BINARY_DATATYPE_COMPRESSED) == PROTOCOL_BINARY_DATATYPE_COMPRESSED) {
2186 need_inflate = true1;
2187 }
2188 /* We may silently drop the knowledge about a JSON item */
2189 datatype = PROTOCOL_BINARY_RAW_BYTES;
2190 }
2191
2192 needed = keylen + extlen + sizeof(protocol_binary_response_header);
2193 if (need_inflate) {
2194 if (snappy_uncompressed_length(body, bodylen,
2195 &inflated_length) != SNAPPY_OK) {
2196 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2197 "<%d ERROR: Failed to determine inflated size",
2198 c->sfd);
2199 return false0;
2200 }
2201 needed += inflated_length;
2202 } else {
2203 needed += bodylen;
2204 }
2205
2206 if (!grow_dynamic_buffer(c, needed)) {
2207 if (settings.verbose > 0) {
2208 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2209 "<%d ERROR: Failed to allocate memory for response",
2210 c->sfd);
2211 }
2212 return false0;
2213 }
2214
2215 buf = c->dynamic_buffer.buffer + c->dynamic_buffer.offset;
2216 memset(&header, 0, sizeof(header))__builtin___memset_chk (&header, 0, sizeof(header), __builtin_object_size
(&header, 0))
;
2217 header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
2218 header.response.opcode = c->binary_header.request.opcode;
2219 header.response.keylen = (uint16_t)htons(keylen)((__uint16_t)(__builtin_constant_p(keylen) ? ((__uint16_t)(((
(__uint16_t)(keylen) & 0xff00) >> 8) | (((__uint16_t
)(keylen) & 0x00ff) << 8))) : _OSSwapInt16(keylen))
)
;
2220 header.response.extlen = extlen;
2221 header.response.datatype = datatype;
2222 header.response.status = (uint16_t)htons(status)((__uint16_t)(__builtin_constant_p(status) ? ((__uint16_t)(((
(__uint16_t)(status) & 0xff00) >> 8) | (((__uint16_t
)(status) & 0x00ff) << 8))) : _OSSwapInt16(status))
)
;
2223 if (need_inflate) {
2224 header.response.bodylen = htonl((uint32_t)(inflated_length + keylen + extlen))(__builtin_constant_p((uint32_t)(inflated_length + keylen + extlen
)) ? ((__uint32_t)((((__uint32_t)((uint32_t)(inflated_length +
keylen + extlen)) & 0xff000000) >> 24) | (((__uint32_t
)((uint32_t)(inflated_length + keylen + extlen)) & 0x00ff0000
) >> 8) | (((__uint32_t)((uint32_t)(inflated_length + keylen
+ extlen)) & 0x0000ff00) << 8) | (((__uint32_t)((uint32_t
)(inflated_length + keylen + extlen)) & 0x000000ff) <<
24))) : _OSSwapInt32((uint32_t)(inflated_length + keylen + extlen
)))
;
2225 } else {
2226 header.response.bodylen = htonl(bodylen + keylen + extlen)(__builtin_constant_p(bodylen + keylen + extlen) ? ((__uint32_t
)((((__uint32_t)(bodylen + keylen + extlen) & 0xff000000)
>> 24) | (((__uint32_t)(bodylen + keylen + extlen) &
0x00ff0000) >> 8) | (((__uint32_t)(bodylen + keylen + extlen
) & 0x0000ff00) << 8) | (((__uint32_t)(bodylen + keylen
+ extlen) & 0x000000ff) << 24))) : _OSSwapInt32(bodylen
+ keylen + extlen))
;
2227 }
2228 header.response.opaque = c->opaque;
2229 header.response.cas = htonll(cas);
2230
2231 memcpy(buf, header.bytes, sizeof(header.response))__builtin___memcpy_chk (buf, header.bytes, sizeof(header.response
), __builtin_object_size (buf, 0))
;
2232 buf += sizeof(header.response);
2233
2234 if (extlen > 0) {
2235 memcpy(buf, ext, extlen)__builtin___memcpy_chk (buf, ext, extlen, __builtin_object_size
(buf, 0))
;
2236 buf += extlen;
2237 }
2238
2239 if (keylen > 0) {
2240 memcpy(buf, key, keylen)__builtin___memcpy_chk (buf, key, keylen, __builtin_object_size
(buf, 0))
;
2241 buf += keylen;
2242 }
2243
2244 if (bodylen > 0) {
2245 if (need_inflate) {
2246 if (snappy_uncompress(body, bodylen, buf, &inflated_length) != SNAPPY_OK) {
2247 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2248 "<%d ERROR: Failed to inflate item", c->sfd);
2249 return false0;
2250 }
2251 } else {
2252 memcpy(buf, body, bodylen)__builtin___memcpy_chk (buf, body, bodylen, __builtin_object_size
(buf, 0))
;
2253 }
2254 }
2255
2256 c->dynamic_buffer.offset += needed;
2257 return true1;
2258}
2259
2260/**
2261 * Tap stats (these are only used by the tap thread, so they don't need
2262 * to be in the threadlocal struct right now...
2263 */
2264struct tap_cmd_stats {
2265 uint64_t connect;
2266 uint64_t mutation;
2267 uint64_t checkpoint_start;
2268 uint64_t checkpoint_end;
2269 uint64_t delete;
2270 uint64_t flush;
2271 uint64_t opaque;
2272 uint64_t vbucket_set;
2273};
2274
2275struct tap_stats {
2276 cb_mutex_t mutex;
2277 struct tap_cmd_stats sent;
2278 struct tap_cmd_stats received;
2279} tap_stats;
2280
2281static void ship_tap_log(conn *c) {
2282 bool_Bool more_data = true1;
2283 bool_Bool send_data = false0;
2284 bool_Bool disconnect = false0;
2285 item *it;
2286 uint32_t bodylen;
2287 int ii = 0;
2288
2289 c->msgcurr = 0;
2290 c->msgused = 0;
2291 c->iovused = 0;
2292 if (add_msghdr(c) != 0) {
2293 if (settings.verbose) {
2294 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2295 "%d: Failed to create output headers. Shutting down tap connection\n", c->sfd);
2296 }
2297 conn_set_state(c, conn_closing);
2298 return ;
2299 }
2300 /* @todo add check for buffer overflow of c->wbuf) */
2301 c->wbytes = 0;
2302 c->wcurr = c->wbuf;
2303 c->icurr = c->ilist;
2304 do {
2305 /* @todo fixme! */
2306 void *engine;
2307 uint16_t nengine;
2308 uint8_t ttl;
2309 uint16_t tap_flags;
2310 uint32_t seqno;
2311 uint16_t vbucket;
2312 tap_event_t event;
2313 bool_Bool inflate = false0;
2314 size_t inflated_length = 0;
2315
2316 union {
2317 protocol_binary_request_tap_mutation mutation;
2318 protocol_binary_request_tap_delete delete;
2319 protocol_binary_request_tap_flush flush;
2320 protocol_binary_request_tap_opaque opaque;
2321 protocol_binary_request_noop noop;
2322 } msg;
2323 item_info_holder info;
2324 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
2325
2326 if (ii++ == 10) {
2327 break;
2328 }
2329
2330 event = c->tap_iterator(settings.engine.v0, c, &it,
2331 &engine, &nengine, &ttl,
2332 &tap_flags, &seqno, &vbucket);
2333 memset(&msg, 0, sizeof(msg))__builtin___memset_chk (&msg, 0, sizeof(msg), __builtin_object_size
(&msg, 0))
;
2334 msg.opaque.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
2335 msg.opaque.message.header.request.opaque = htonl(seqno)(__builtin_constant_p(seqno) ? ((__uint32_t)((((__uint32_t)(seqno
) & 0xff000000) >> 24) | (((__uint32_t)(seqno) &
0x00ff0000) >> 8) | (((__uint32_t)(seqno) & 0x0000ff00
) << 8) | (((__uint32_t)(seqno) & 0x000000ff) <<
24))) : _OSSwapInt32(seqno))
;
2336 msg.opaque.message.body.tap.enginespecific_length = htons(nengine)((__uint16_t)(__builtin_constant_p(nengine) ? ((__uint16_t)((
((__uint16_t)(nengine) & 0xff00) >> 8) | (((__uint16_t
)(nengine) & 0x00ff) << 8))) : _OSSwapInt16(nengine
)))
;
2337 msg.opaque.message.body.tap.ttl = ttl;
2338 msg.opaque.message.body.tap.flags = htons(tap_flags)((__uint16_t)(__builtin_constant_p(tap_flags) ? ((__uint16_t)
((((__uint16_t)(tap_flags) & 0xff00) >> 8) | (((__uint16_t
)(tap_flags) & 0x00ff) << 8))) : _OSSwapInt16(tap_flags
)))
;
2339 msg.opaque.message.header.request.extlen = 8;
2340 msg.opaque.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
2341 info.info.nvalue = IOV_MAX1024;
2342
2343 switch (event) {
2344 case TAP_NOOP :
2345 send_data = true1;
2346 msg.noop.message.header.request.opcode = PROTOCOL_BINARY_CMD_NOOP;
2347 msg.noop.message.header.request.extlen = 0;
2348 msg.noop.message.header.request.bodylen = htonl(0)(__builtin_constant_p(0) ? ((__uint32_t)((((__uint32_t)(0) &
0xff000000) >> 24) | (((__uint32_t)(0) & 0x00ff0000
) >> 8) | (((__uint32_t)(0) & 0x0000ff00) << 8
) | (((__uint32_t)(0) & 0x000000ff) << 24))) : _OSSwapInt32
(0))
;
2349 memcpy(c->wcurr, msg.noop.bytes, sizeof(msg.noop.bytes))__builtin___memcpy_chk (c->wcurr, msg.noop.bytes, sizeof(msg
.noop.bytes), __builtin_object_size (c->wcurr, 0))
;
2350 add_iov(c, c->wcurr, sizeof(msg.noop.bytes));
2351 c->wcurr += sizeof(msg.noop.bytes);
2352 c->wbytes += sizeof(msg.noop.bytes);
2353 break;
2354 case TAP_PAUSE :
2355 more_data = false0;
2356 break;
2357 case TAP_CHECKPOINT_START:
2358 case TAP_CHECKPOINT_END:
2359 case TAP_MUTATION:
2360 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
2361 (void*)&info)) {
2362 settings.engine.v1->release(settings.engine.v0, c, it);
2363 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2364 "%d: Failed to get item info\n", c->sfd);
2365 break;
2366 }
2367 send_data = true1;
2368 c->ilist[c->ileft++] = it;
2369
2370 if (event == TAP_CHECKPOINT_START) {
2371 msg.mutation.message.header.request.opcode =
2372 PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START;
2373 cb_mutex_enter(&tap_stats.mutex);
2374 tap_stats.sent.checkpoint_start++;
2375 cb_mutex_exit(&tap_stats.mutex);
2376 } else if (event == TAP_CHECKPOINT_END) {
2377 msg.mutation.message.header.request.opcode =
2378 PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END;
2379 cb_mutex_enter(&tap_stats.mutex);
2380 tap_stats.sent.checkpoint_end++;
2381 cb_mutex_exit(&tap_stats.mutex);
2382 } else if (event == TAP_MUTATION) {
2383 msg.mutation.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_MUTATION;
2384 cb_mutex_enter(&tap_stats.mutex);
2385 tap_stats.sent.mutation++;
2386 cb_mutex_exit(&tap_stats.mutex);
2387 }
2388
2389 msg.mutation.message.header.request.cas = htonll(info.info.cas);
2390 msg.mutation.message.header.request.keylen = htons(info.info.nkey)((__uint16_t)(__builtin_constant_p(info.info.nkey) ? ((__uint16_t
)((((__uint16_t)(info.info.nkey) & 0xff00) >> 8) | (
((__uint16_t)(info.info.nkey) & 0x00ff) << 8))) : _OSSwapInt16
(info.info.nkey)))
;
2391 msg.mutation.message.header.request.extlen = 16;
2392 if (c->supports_datatype) {
2393 msg.mutation.message.header.request.datatype = info.info.datatype;
2394 } else {
2395 switch (info.info.datatype) {
2396 case 0:
2397 break;
2398 case PROTOCOL_BINARY_DATATYPE_JSON:
2399 break;
2400 case PROTOCOL_BINARY_DATATYPE_COMPRESSED:
2401 case PROTOCOL_BINARY_DATATYPE_COMPRESSED_JSON:
2402 inflate = true1;
2403 break;
2404 default:
2405 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2406 "%d: shipping data with"
2407 " an invalid datatype "
2408 "(stripping info)",
2409 c->sfd);
2410 }
2411 msg.mutation.message.header.request.datatype = 0;
2412 }
2413
2414 bodylen = 16 + info.info.nkey + nengine;
2415 if ((tap_flags & TAP_FLAG_NO_VALUE0x02) == 0) {
2416 if (inflate) {
2417 if (snappy_uncompressed_length(info.info.value[0].iov_base,
2418 info.info.nbytes,
2419 &inflated_length) == SNAPPY_OK) {
2420 bodylen += inflated_length;
2421 } else {
2422 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
2423 "<%d ERROR: Failed to determine inflated size. Sending as compressed",
2424 c->sfd);
2425 inflate = false0;
2426 bodylen += info.info.nbytes;
2427 }
2428 } else {
2429 bodylen += info.info.nbytes;
2430 }
2431 }
2432 msg.mutation.message.header.request.bodylen = htonl(bodylen)(__builtin_constant_p(bodylen) ? ((__uint32_t)((((__uint32_t)
(bodylen) & 0xff000000) >> 24) | (((__uint32_t)(bodylen
) & 0x00ff0000) >> 8) | (((__uint32_t)(bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(bodylen) & 0x000000ff
) << 24))) : _OSSwapInt32(bodylen))
;
2433
2434 if ((tap_flags & TAP_FLAG_NETWORK_BYTE_ORDER0x04) == 0) {
2435 msg.mutation.message.body.item.flags = htonl(info.info.flags)(__builtin_constant_p(info.info.flags) ? ((__uint32_t)((((__uint32_t
)(info.info.flags) & 0xff000000) >> 24) | (((__uint32_t
)(info.info.flags) & 0x00ff0000) >> 8) | (((__uint32_t
)(info.info.flags) & 0x0000ff00) << 8) | (((__uint32_t
)(info.info.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(info.info.flags))
;
2436 } else {
2437 msg.mutation.message.body.item.flags = info.info.flags;
2438 }
2439 msg.mutation.message.body.item.expiration = htonl(info.info.exptime)(__builtin_constant_p(info.info.exptime) ? ((__uint32_t)((((__uint32_t
)(info.info.exptime) & 0xff000000) >> 24) | (((__uint32_t
)(info.info.exptime) & 0x00ff0000) >> 8) | (((__uint32_t
)(info.info.exptime) & 0x0000ff00) << 8) | (((__uint32_t
)(info.info.exptime) & 0x000000ff) << 24))) : _OSSwapInt32
(info.info.exptime))
;
2440 msg.mutation.message.body.tap.enginespecific_length = htons(nengine)((__uint16_t)(__builtin_constant_p(nengine) ? ((__uint16_t)((
((__uint16_t)(nengine) & 0xff00) >> 8) | (((__uint16_t
)(nengine) & 0x00ff) << 8))) : _OSSwapInt16(nengine
)))
;
2441 msg.mutation.message.body.tap.ttl = ttl;
2442 msg.mutation.message.body.tap.flags = htons(tap_flags)((__uint16_t)(__builtin_constant_p(tap_flags) ? ((__uint16_t)
((((__uint16_t)(tap_flags) & 0xff00) >> 8) | (((__uint16_t
)(tap_flags) & 0x00ff) << 8))) : _OSSwapInt16(tap_flags
)))
;
2443 memcpy(c->wcurr, msg.mutation.bytes, sizeof(msg.mutation.bytes))__builtin___memcpy_chk (c->wcurr, msg.mutation.bytes, sizeof
(msg.mutation.bytes), __builtin_object_size (c->wcurr, 0))
;
2444
2445 add_iov(c, c->wcurr, sizeof(msg.mutation.bytes));
2446 c->wcurr += sizeof(msg.mutation.bytes);
2447 c->wbytes += sizeof(msg.mutation.bytes);
2448
2449 if (nengine > 0) {
2450 memcpy(c->wcurr, engine, nengine)__builtin___memcpy_chk (c->wcurr, engine, nengine, __builtin_object_size
(c->wcurr, 0))
;
2451 add_iov(c, c->wcurr, nengine);
2452 c->wcurr += nengine;
2453 c->wbytes += nengine;
2454 }
2455
2456 add_iov(c, info.info.key, info.info.nkey);
2457 if ((tap_flags & TAP_FLAG_NO_VALUE0x02) == 0) {
2458 if (inflate) {
2459 void *buf = malloc(inflated_length);
2460 void *body = info.info.value[0].iov_base;
2461 size_t bodylen = info.info.value[0].iov_len;
2462 if (snappy_uncompress(body, bodylen,
2463 buf, &inflated_length) == SNAPPY_OK) {
2464 c->temp_alloc_list[c->temp_alloc_left++] = buf;
2465
2466 add_iov(c, buf, inflated_length);
2467 } else {
2468 free(buf);
2469 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2470 "%d: FATAL: failed to inflate object. shutitng down connection", c->sfd);
2471 conn_set_state(c, conn_closing);
2472 return;
2473 }
2474 } else {
2475 int xx;
2476 for (xx = 0; xx < info.info.nvalue; ++xx) {
2477 add_iov(c, info.info.value[xx].iov_base,
2478 info.info.value[xx].iov_len);
2479 }
2480 }
2481 }
2482
2483 break;
2484 case TAP_DELETION:
2485 /* This is a delete */
2486 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
2487 (void*)&info)) {
2488 settings.engine.v1->release(settings.engine.v0, c, it);
2489 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2490 "%d: Failed to get item info\n", c->sfd);
2491 break;
2492 }
2493 send_data = true1;
2494 c->ilist[c->ileft++] = it;
2495 msg.delete.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_DELETE;
2496 msg.delete.message.header.request.cas = htonll(info.info.cas);
2497 msg.delete.message.header.request.keylen = htons(info.info.nkey)((__uint16_t)(__builtin_constant_p(info.info.nkey) ? ((__uint16_t
)((((__uint16_t)(info.info.nkey) & 0xff00) >> 8) | (
((__uint16_t)(info.info.nkey) & 0x00ff) << 8))) : _OSSwapInt16
(info.info.nkey)))
;
2498
2499 bodylen = 8 + info.info.nkey + nengine;
2500 if ((tap_flags & TAP_FLAG_NO_VALUE0x02) == 0) {
2501 bodylen += info.info.nbytes;
2502 }
2503 msg.delete.message.header.request.bodylen = htonl(bodylen)(__builtin_constant_p(bodylen) ? ((__uint32_t)((((__uint32_t)
(bodylen) & 0xff000000) >> 24) | (((__uint32_t)(bodylen
) & 0x00ff0000) >> 8) | (((__uint32_t)(bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(bodylen) & 0x000000ff
) << 24))) : _OSSwapInt32(bodylen))
;
2504
2505 memcpy(c->wcurr, msg.delete.bytes, sizeof(msg.delete.bytes))__builtin___memcpy_chk (c->wcurr, msg.delete.bytes, sizeof
(msg.delete.bytes), __builtin_object_size (c->wcurr, 0))
;
2506 add_iov(c, c->wcurr, sizeof(msg.delete.bytes));
2507 c->wcurr += sizeof(msg.delete.bytes);
2508 c->wbytes += sizeof(msg.delete.bytes);
2509
2510 if (nengine > 0) {
2511 memcpy(c->wcurr, engine, nengine)__builtin___memcpy_chk (c->wcurr, engine, nengine, __builtin_object_size
(c->wcurr, 0))
;
2512 add_iov(c, c->wcurr, nengine);
2513 c->wcurr += nengine;
2514 c->wbytes += nengine;
2515 }
2516
2517 add_iov(c, info.info.key, info.info.nkey);
2518 if ((tap_flags & TAP_FLAG_NO_VALUE0x02) == 0) {
2519 int xx;
2520 for (xx = 0; xx < info.info.nvalue; ++xx) {
2521 add_iov(c, info.info.value[xx].iov_base,
2522 info.info.value[xx].iov_len);
2523 }
2524 }
2525
2526 cb_mutex_enter(&tap_stats.mutex);
2527 tap_stats.sent.delete++;
2528 cb_mutex_exit(&tap_stats.mutex);
2529 break;
2530
2531 case TAP_DISCONNECT:
2532 disconnect = true1;
2533 more_data = false0;
2534 break;
2535 case TAP_VBUCKET_SET:
2536 case TAP_FLUSH:
2537 case TAP_OPAQUE:
2538 send_data = true1;
2539
2540 if (event == TAP_OPAQUE) {
2541 msg.flush.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_OPAQUE;
2542 cb_mutex_enter(&tap_stats.mutex);
2543 tap_stats.sent.opaque++;
2544 cb_mutex_exit(&tap_stats.mutex);
2545
2546 } else if (event == TAP_FLUSH) {
2547 msg.flush.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_FLUSH;
2548 cb_mutex_enter(&tap_stats.mutex);
2549 tap_stats.sent.flush++;
2550 cb_mutex_exit(&tap_stats.mutex);
2551 } else if (event == TAP_VBUCKET_SET) {
2552 msg.flush.message.header.request.opcode = PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET;
2553 msg.flush.message.body.tap.flags = htons(tap_flags)((__uint16_t)(__builtin_constant_p(tap_flags) ? ((__uint16_t)
((((__uint16_t)(tap_flags) & 0xff00) >> 8) | (((__uint16_t
)(tap_flags) & 0x00ff) << 8))) : _OSSwapInt16(tap_flags
)))
;
2554 cb_mutex_enter(&tap_stats.mutex);
2555 tap_stats.sent.vbucket_set++;
2556 cb_mutex_exit(&tap_stats.mutex);
2557 }
2558
2559 msg.flush.message.header.request.bodylen = htonl(8 + nengine)(__builtin_constant_p(8 + nengine) ? ((__uint32_t)((((__uint32_t
)(8 + nengine) & 0xff000000) >> 24) | (((__uint32_t
)(8 + nengine) & 0x00ff0000) >> 8) | (((__uint32_t)
(8 + nengine) & 0x0000ff00) << 8) | (((__uint32_t)(
8 + nengine) & 0x000000ff) << 24))) : _OSSwapInt32(
8 + nengine))
;
2560 memcpy(c->wcurr, msg.flush.bytes, sizeof(msg.flush.bytes))__builtin___memcpy_chk (c->wcurr, msg.flush.bytes, sizeof(
msg.flush.bytes), __builtin_object_size (c->wcurr, 0))
;
2561 add_iov(c, c->wcurr, sizeof(msg.flush.bytes));
2562 c->wcurr += sizeof(msg.flush.bytes);
2563 c->wbytes += sizeof(msg.flush.bytes);
2564 if (nengine > 0) {
2565 memcpy(c->wcurr, engine, nengine)__builtin___memcpy_chk (c->wcurr, engine, nengine, __builtin_object_size
(c->wcurr, 0))
;
2566 add_iov(c, c->wcurr, nengine);
2567 c->wcurr += nengine;
2568 c->wbytes += nengine;
2569 }
2570 break;
2571 default:
2572 abort();
2573 }
2574 } while (more_data);
2575
2576 c->ewouldblock = false0;
2577 if (send_data) {
2578 conn_set_state(c, conn_mwrite);
2579 if (disconnect) {
2580 c->write_and_go = conn_closing;
2581 } else {
2582 c->write_and_go = conn_ship_log;
2583 }
2584 } else {
2585 if (disconnect) {
2586 conn_set_state(c, conn_closing);
2587 } else {
2588 /* No more items to ship to the slave at this time.. suspend.. */
2589 if (settings.verbose > 1) {
2590 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
2591 "%d: No more items in tap log.. waiting\n",
2592 c->sfd);
2593 }
2594 c->ewouldblock = true1;
2595 }
2596 }
2597}
2598
2599static ENGINE_ERROR_CODE default_unknown_command(EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *descriptor,
2600 ENGINE_HANDLE* handle,
2601 const void* cookie,
2602 protocol_binary_request_header *request,
2603 ADD_RESPONSE response)
2604{
2605 return settings.engine.v1->unknown_command(handle, cookie, request, response);
2606}
2607
2608struct request_lookup {
2609 EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *descriptor;
2610 BINARY_COMMAND_CALLBACK callback;
2611};
2612
2613static struct request_lookup request_handlers[0x100];
2614
2615typedef void (*RESPONSE_HANDLER)(conn*);
2616/**
2617 * A map between the response packets op-code and the function to handle
2618 * the response message.
2619 */
2620static RESPONSE_HANDLER response_handlers[0x100];
2621
2622static void setup_binary_lookup_cmd(EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *descriptor,
2623 uint8_t cmd,
2624 BINARY_COMMAND_CALLBACK new_handler) {
2625 request_handlers[cmd].descriptor = descriptor;
2626 request_handlers[cmd].callback = new_handler;
2627}
2628
2629static void process_bin_unknown_packet(conn *c) {
2630 void *packet = c->rcurr - (c->binary_header.request.bodylen +
2631 sizeof(c->binary_header));
2632 ENGINE_ERROR_CODE ret = c->aiostat;
2633 c->aiostat = ENGINE_SUCCESS;
2634 c->ewouldblock = false0;
2635
2636 if (ret == ENGINE_SUCCESS) {
2637 struct request_lookup *rq = request_handlers + c->binary_header.request.opcode;
2638 ret = rq->callback(rq->descriptor, settings.engine.v0, c, packet,
2639 binary_response_handler);
2640 }
2641
2642 switch (ret) {
2643 case ENGINE_SUCCESS:
2644 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
2645 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
2646 c->dynamic_buffer.buffer = NULL((void*)0);
2647 } else {
2648 conn_set_state(c, conn_new_cmd);
2649 }
2650 break;
2651 case ENGINE_EWOULDBLOCK:
2652 c->ewouldblock = true1;
2653 break;
2654 case ENGINE_DISCONNECT:
2655 conn_set_state(c, conn_closing);
2656 break;
2657 default:
2658 /* Release the dynamic buffer.. it may be partial.. */
2659 free(c->dynamic_buffer.buffer);
2660 c->dynamic_buffer.buffer = NULL((void*)0);
2661 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
2662 }
2663}
2664
2665static void cbsasl_refresh_main(void *c)
2666{
2667 int rv = cbsasl_server_refresh();
2668 if (rv == SASL_OK) {
2669 notify_io_complete(c, ENGINE_SUCCESS);
2670 } else {
2671 notify_io_complete(c, ENGINE_EINVAL);
2672 }
2673}
2674
2675static ENGINE_ERROR_CODE refresh_cbsasl(conn *c)
2676{
2677 cb_thread_t tid;
2678 int err;
2679
2680 err = cb_create_thread(&tid, cbsasl_refresh_main, c, 1);
2681 if (err != 0) {
2682 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2683 "Failed to create cbsasl db "
2684 "update thread: %s",
2685 strerror(err));
2686 return ENGINE_DISCONNECT;
2687 }
2688
2689 return ENGINE_EWOULDBLOCK;
2690}
2691
2692#if 0
2693static void ssl_certs_refresh_main(void *c)
2694{
2695 /* Update the internal certificates */
2696
2697 notify_io_complete(c, ENGINE_SUCCESS);
2698}
2699#endif
2700static ENGINE_ERROR_CODE refresh_ssl_certs(conn *c)
2701{
2702#if 0
2703 cb_thread_t tid;
2704 int err;
2705
2706 err = cb_create_thread(&tid, ssl_certs_refresh_main, c, 1);
2707 if (err != 0) {
2708 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2709 "Failed to create ssl_certificate "
2710 "update thread: %s",
2711 strerror(err));
2712 return ENGINE_DISCONNECT;
2713 }
2714
2715 return ENGINE_EWOULDBLOCK;
2716#endif
2717 return ENGINE_SUCCESS;
2718}
2719
2720static void process_bin_tap_connect(conn *c) {
2721 TAP_ITERATOR iterator;
2722 char *packet = (c->rcurr - (c->binary_header.request.bodylen +
2723 sizeof(c->binary_header)));
2724 protocol_binary_request_tap_connect *req = (void*)packet;
2725 const char *key = packet + sizeof(req->bytes);
2726 const char *data = key + c->binary_header.request.keylen;
2727 uint32_t flags = 0;
2728 size_t ndata = c->binary_header.request.bodylen -
2729 c->binary_header.request.extlen -
2730 c->binary_header.request.keylen;
2731
2732 if (c->binary_header.request.extlen == 4) {
2733 flags = ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
;
2734
2735 if (flags & TAP_CONNECT_FLAG_BACKFILL0x01) {
2736 /* the userdata has to be at least 8 bytes! */
2737 if (ndata < 8) {
2738 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2739 "%d: ERROR: Invalid tap connect message\n",
2740 c->sfd);
2741 conn_set_state(c, conn_closing);
2742 return ;
2743 }
2744 }
2745 } else {
2746 data -= 4;
2747 key -= 4;
2748 }
2749
2750 if (settings.verbose && c->binary_header.request.keylen > 0) {
2751 char buffer[1024];
2752 int len = c->binary_header.request.keylen;
2753 if (len >= sizeof(buffer)) {
2754 len = sizeof(buffer) - 1;
2755 }
2756 memcpy(buffer, key, len)__builtin___memcpy_chk (buffer, key, len, __builtin_object_size
(buffer, 0))
;
2757 buffer[len] = '\0';
2758 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
2759 "%d: Trying to connect with named tap connection: <%s>\n",
2760 c->sfd, buffer);
2761 }
2762
2763 iterator = settings.engine.v1->get_tap_iterator(
2764 settings.engine.v0, c, key, c->binary_header.request.keylen,
2765 flags, data, ndata);
2766
2767 if (iterator == NULL((void*)0)) {
2768 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
2769 "%d: FATAL: The engine does not support tap\n",
2770 c->sfd);
2771 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
2772 c->write_and_go = conn_closing;
2773 } else {
2774 c->tap_iterator = iterator;
2775 c->which = EV_WRITE0x04;
2776 conn_set_state(c, conn_ship_log);
2777 }
2778}
2779
2780static void process_bin_tap_packet(tap_event_t event, conn *c) {
2781 char *packet;
2782 protocol_binary_request_tap_no_extras *tap;
2783 uint16_t nengine;
2784 uint16_t tap_flags;
2785 uint32_t seqno;
2786 uint8_t ttl;
2787 char *engine_specific;
2788 char *key;
2789 uint16_t nkey;
2790 char *data;
2791 uint32_t flags;
2792 uint32_t exptime;
2793 uint32_t ndata;
2794 ENGINE_ERROR_CODE ret;
2795
2796 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2796, "c != ((void*)0)") : (void)0)
;
2797 packet = (c->rcurr - (c->binary_header.request.bodylen +
2798 sizeof(c->binary_header)));
2799 tap = (void*)packet;
2800 nengine = ntohs(tap->message.body.tap.enginespecific_length)((__uint16_t)(__builtin_constant_p(tap->message.body.tap.enginespecific_length
) ? ((__uint16_t)((((__uint16_t)(tap->message.body.tap.enginespecific_length
) & 0xff00) >> 8) | (((__uint16_t)(tap->message.
body.tap.enginespecific_length) & 0x00ff) << 8))) :
_OSSwapInt16(tap->message.body.tap.enginespecific_length)
))
;
2801 tap_flags = ntohs(tap->message.body.tap.flags)((__uint16_t)(__builtin_constant_p(tap->message.body.tap.flags
) ? ((__uint16_t)((((__uint16_t)(tap->message.body.tap.flags
) & 0xff00) >> 8) | (((__uint16_t)(tap->message.
body.tap.flags) & 0x00ff) << 8))) : _OSSwapInt16(tap
->message.body.tap.flags)))
;
2802 seqno = ntohl(tap->message.header.request.opaque)(__builtin_constant_p(tap->message.header.request.opaque) ?
((__uint32_t)((((__uint32_t)(tap->message.header.request.
opaque) & 0xff000000) >> 24) | (((__uint32_t)(tap->
message.header.request.opaque) & 0x00ff0000) >> 8) |
(((__uint32_t)(tap->message.header.request.opaque) & 0x0000ff00
) << 8) | (((__uint32_t)(tap->message.header.request
.opaque) & 0x000000ff) << 24))) : _OSSwapInt32(tap->
message.header.request.opaque))
;
2803 ttl = tap->message.body.tap.ttl;
2804 engine_specific = packet + sizeof(tap->bytes);
2805 key = engine_specific + nengine;
2806 nkey = c->binary_header.request.keylen;
2807 data = key + nkey;
2808 flags = 0;
2809 exptime = 0;
2810 ndata = c->binary_header.request.bodylen - nengine - nkey - 8;
2811 ret = c->aiostat;
2812
2813 if (ttl == 0) {
2814 ret = ENGINE_EINVAL;
2815 } else {
2816 if (event == TAP_MUTATION || event == TAP_CHECKPOINT_START ||
2817 event == TAP_CHECKPOINT_END) {
2818 protocol_binary_request_tap_mutation *mutation = (void*)tap;
2819
2820 /* engine_specific data in protocol_binary_request_tap_mutation is */
2821 /* at a different offset than protocol_binary_request_tap_no_extras */
2822 engine_specific = packet + sizeof(mutation->bytes);
2823
2824 flags = mutation->message.body.item.flags;
2825 if ((tap_flags & TAP_FLAG_NETWORK_BYTE_ORDER0x04) == 0) {
2826 flags = ntohl(flags)(__builtin_constant_p(flags) ? ((__uint32_t)((((__uint32_t)(flags
) & 0xff000000) >> 24) | (((__uint32_t)(flags) &
0x00ff0000) >> 8) | (((__uint32_t)(flags) & 0x0000ff00
) << 8) | (((__uint32_t)(flags) & 0x000000ff) <<
24))) : _OSSwapInt32(flags))
;
2827 }
2828
2829 exptime = ntohl(mutation->message.body.item.expiration)(__builtin_constant_p(mutation->message.body.item.expiration
) ? ((__uint32_t)((((__uint32_t)(mutation->message.body.item
.expiration) & 0xff000000) >> 24) | (((__uint32_t)(
mutation->message.body.item.expiration) & 0x00ff0000) >>
8) | (((__uint32_t)(mutation->message.body.item.expiration
) & 0x0000ff00) << 8) | (((__uint32_t)(mutation->
message.body.item.expiration) & 0x000000ff) << 24))
) : _OSSwapInt32(mutation->message.body.item.expiration))
;
2830 key += 8;
2831 data += 8;
2832 ndata -= 8;
2833 }
2834
2835 if (ret == ENGINE_SUCCESS) {
2836 uint8_t datatype = c->binary_header.request.datatype;
2837 if (event == TAP_MUTATION && !c->supports_datatype) {
2838 if (checkUTF8JSON((void*)data, ndata)) {
2839 datatype = PROTOCOL_BINARY_DATATYPE_JSON;
2840 }
2841 }
2842
2843 ret = settings.engine.v1->tap_notify(settings.engine.v0, c,
2844 engine_specific, nengine,
2845 ttl - 1, tap_flags,
2846 event, seqno,
2847 key, nkey,
2848 flags, exptime,
2849 ntohll(tap->message.header.request.cas),
2850 datatype,
2851 data, ndata,
2852 c->binary_header.request.vbucket);
2853 }
2854 }
2855
2856 switch (ret) {
2857 case ENGINE_DISCONNECT:
2858 conn_set_state(c, conn_closing);
2859 break;
2860 case ENGINE_EWOULDBLOCK:
2861 c->ewouldblock = true1;
2862 break;
2863 default:
2864 if ((tap_flags & TAP_FLAG_ACK0x01) || (ret != ENGINE_SUCCESS)) {
2865 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
2866 } else {
2867 conn_set_state(c, conn_new_cmd);
2868 }
2869 }
2870}
2871
2872static void process_bin_tap_ack(conn *c) {
2873 char *packet;
2874 protocol_binary_response_no_extras *rsp;
2875 uint32_t seqno;
2876 uint16_t status;
2877 char *key;
2878 ENGINE_ERROR_CODE ret = ENGINE_DISCONNECT;
2879
2880 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2880, "c != ((void*)0)") : (void)0)
;
2881 packet = (c->rcurr - (c->binary_header.request.bodylen + sizeof(c->binary_header)));
2882 rsp = (void*)packet;
2883 seqno = ntohl(rsp->message.header.response.opaque)(__builtin_constant_p(rsp->message.header.response.opaque)
? ((__uint32_t)((((__uint32_t)(rsp->message.header.response
.opaque) & 0xff000000) >> 24) | (((__uint32_t)(rsp->
message.header.response.opaque) & 0x00ff0000) >> 8)
| (((__uint32_t)(rsp->message.header.response.opaque) &
0x0000ff00) << 8) | (((__uint32_t)(rsp->message.header
.response.opaque) & 0x000000ff) << 24))) : _OSSwapInt32
(rsp->message.header.response.opaque))
;
2884 status = ntohs(rsp->message.header.response.status)((__uint16_t)(__builtin_constant_p(rsp->message.header.response
.status) ? ((__uint16_t)((((__uint16_t)(rsp->message.header
.response.status) & 0xff00) >> 8) | (((__uint16_t)(
rsp->message.header.response.status) & 0x00ff) <<
8))) : _OSSwapInt16(rsp->message.header.response.status))
)
;
2885 key = packet + sizeof(rsp->bytes);
2886
2887 if (settings.engine.v1->tap_notify != NULL((void*)0)) {
2888 ret = settings.engine.v1->tap_notify(settings.engine.v0, c, NULL((void*)0), 0, 0, status,
2889 TAP_ACK, seqno, key,
2890 c->binary_header.request.keylen, 0, 0,
2891 0, c->binary_header.request.datatype, NULL((void*)0),
2892 0, 0);
2893 }
2894
2895 if (ret == ENGINE_DISCONNECT) {
2896 conn_set_state(c, conn_closing);
2897 } else {
2898 conn_set_state(c, conn_ship_log);
2899 }
2900}
2901
2902/**
2903 * We received a noop response.. just ignore it
2904 */
2905static void process_bin_noop_response(conn *c) {
2906 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 2906, "c != ((void*)0)") : (void)0)
;
2907 conn_set_state(c, conn_new_cmd);
2908}
2909
2910/*******************************************************************************
2911 ** UPR MESSAGE PRODUCERS **
2912 ******************************************************************************/
2913static ENGINE_ERROR_CODE upr_message_get_failover_log(const void *cookie,
2914 uint32_t opaque,
2915 uint16_t vbucket)
2916{
2917 protocol_binary_request_upr_get_failover_log packet;
2918 conn *c = (void*)cookie;
2919
2920 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
2921 /* We don't have room in the buffer */
2922 return ENGINE_E2BIG;
2923 }
2924
2925 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
2926 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
2927 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_GET_FAILOVER_LOG;
2928 packet.message.header.request.opaque = opaque;
2929 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
2930
2931 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
2932 add_iov(c, c->wcurr, sizeof(packet.bytes));
2933 c->wcurr += sizeof(packet.bytes);
2934 c->wbytes += sizeof(packet.bytes);
2935
2936 return ENGINE_SUCCESS;
2937}
2938
2939static ENGINE_ERROR_CODE upr_message_stream_req(const void *cookie,
2940 uint32_t opaque,
2941 uint16_t vbucket,
2942 uint32_t flags,
2943 uint64_t start_seqno,
2944 uint64_t end_seqno,
2945 uint64_t vbucket_uuid,
2946 uint64_t snap_start_seqno,
2947 uint64_t snap_end_seqno)
2948{
2949 protocol_binary_request_upr_stream_req packet;
2950 conn *c = (void*)cookie;
2951
2952 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
2953 /* We don't have room in the buffer */
2954 return ENGINE_E2BIG;
2955 }
2956
2957 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
2958 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
2959 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_STREAM_REQ;
2960 packet.message.header.request.extlen = 48;
2961 packet.message.header.request.bodylen = htonl(48)(__builtin_constant_p(48) ? ((__uint32_t)((((__uint32_t)(48) &
0xff000000) >> 24) | (((__uint32_t)(48) & 0x00ff0000
) >> 8) | (((__uint32_t)(48) & 0x0000ff00) <<
8) | (((__uint32_t)(48) & 0x000000ff) << 24))) : _OSSwapInt32
(48))
;
2962 packet.message.header.request.opaque = opaque;
2963 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
2964
2965 packet.message.body.flags = ntohl(flags)(__builtin_constant_p(flags) ? ((__uint32_t)((((__uint32_t)(flags
) & 0xff000000) >> 24) | (((__uint32_t)(flags) &
0x00ff0000) >> 8) | (((__uint32_t)(flags) & 0x0000ff00
) << 8) | (((__uint32_t)(flags) & 0x000000ff) <<
24))) : _OSSwapInt32(flags))
;
2966 packet.message.body.start_seqno = ntohll(start_seqno);
2967 packet.message.body.end_seqno = ntohll(end_seqno);
2968 packet.message.body.vbucket_uuid = ntohll(vbucket_uuid);
2969 packet.message.body.snap_start_seqno = ntohll(snap_start_seqno);
2970 packet.message.body.snap_end_seqno = ntohll(snap_end_seqno);
2971
2972 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
2973 add_iov(c, c->wcurr, sizeof(packet.bytes));
2974 c->wcurr += sizeof(packet.bytes);
2975 c->wbytes += sizeof(packet.bytes);
2976
2977 return ENGINE_SUCCESS;
2978}
2979
2980static ENGINE_ERROR_CODE upr_message_add_stream_response(const void *cookie,
2981 uint32_t opaque,
2982 uint32_t dialogopaque,
2983 uint8_t status)
2984{
2985 protocol_binary_response_upr_add_stream packet;
2986 conn *c = (void*)cookie;
2987
2988 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
2989 /* We don't have room in the buffer */
2990 return ENGINE_E2BIG;
2991 }
2992
2993 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
2994 packet.message.header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
2995 packet.message.header.response.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_ADD_STREAM;
2996 packet.message.header.response.extlen = 4;
2997 packet.message.header.response.status = htons(status)((__uint16_t)(__builtin_constant_p(status) ? ((__uint16_t)(((
(__uint16_t)(status) & 0xff00) >> 8) | (((__uint16_t
)(status) & 0x00ff) << 8))) : _OSSwapInt16(status))
)
;
2998 packet.message.header.response.bodylen = htonl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
;
2999 packet.message.header.response.opaque = opaque;
3000 packet.message.body.opaque = ntohl(dialogopaque)(__builtin_constant_p(dialogopaque) ? ((__uint32_t)((((__uint32_t
)(dialogopaque) & 0xff000000) >> 24) | (((__uint32_t
)(dialogopaque) & 0x00ff0000) >> 8) | (((__uint32_t
)(dialogopaque) & 0x0000ff00) << 8) | (((__uint32_t
)(dialogopaque) & 0x000000ff) << 24))) : _OSSwapInt32
(dialogopaque))
;
3001
3002 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3003 add_iov(c, c->wcurr, sizeof(packet.bytes));
3004 c->wcurr += sizeof(packet.bytes);
3005 c->wbytes += sizeof(packet.bytes);
3006
3007 return ENGINE_SUCCESS;
3008}
3009
3010static ENGINE_ERROR_CODE upr_message_set_vbucket_state_response(const void *cookie,
3011 uint32_t opaque,
3012 uint8_t status)
3013{
3014 protocol_binary_response_upr_set_vbucket_state packet;
3015 conn *c = (void*)cookie;
3016
3017 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3018 /* We don't have room in the buffer */
3019 return ENGINE_E2BIG;
3020 }
3021
3022 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3023 packet.message.header.response.magic = (uint8_t)PROTOCOL_BINARY_RES;
3024 packet.message.header.response.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE;
3025 packet.message.header.response.extlen = 0;
3026 packet.message.header.response.status = htons(status)((__uint16_t)(__builtin_constant_p(status) ? ((__uint16_t)(((
(__uint16_t)(status) & 0xff00) >> 8) | (((__uint16_t
)(status) & 0x00ff) << 8))) : _OSSwapInt16(status))
)
;
3027 packet.message.header.response.bodylen = 0;
3028 packet.message.header.response.opaque = opaque;
3029
3030 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3031 add_iov(c, c->wcurr, sizeof(packet.bytes));
3032 c->wcurr += sizeof(packet.bytes);
3033 c->wbytes += sizeof(packet.bytes);
3034
3035 return ENGINE_SUCCESS;
3036}
3037
3038static ENGINE_ERROR_CODE upr_message_stream_end(const void *cookie,
3039 uint32_t opaque,
3040 uint16_t vbucket,
3041 uint32_t flags)
3042{
3043 protocol_binary_request_upr_stream_end packet;
3044 conn *c = (void*)cookie;
3045
3046 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3047 /* We don't have room in the buffer */
3048 return ENGINE_E2BIG;
3049 }
3050
3051 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3052 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3053 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_STREAM_END;
3054 packet.message.header.request.extlen = 4;
3055 packet.message.header.request.bodylen = htonl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
;
3056 packet.message.header.request.opaque = opaque;
3057 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3058 packet.message.body.flags = ntohl(flags)(__builtin_constant_p(flags) ? ((__uint32_t)((((__uint32_t)(flags
) & 0xff000000) >> 24) | (((__uint32_t)(flags) &
0x00ff0000) >> 8) | (((__uint32_t)(flags) & 0x0000ff00
) << 8) | (((__uint32_t)(flags) & 0x000000ff) <<
24))) : _OSSwapInt32(flags))
;
3059
3060 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3061 add_iov(c, c->wcurr, sizeof(packet.bytes));
3062 c->wcurr += sizeof(packet.bytes);
3063 c->wbytes += sizeof(packet.bytes);
3064
3065 return ENGINE_SUCCESS;
3066}
3067
3068static ENGINE_ERROR_CODE upr_message_marker(const void *cookie,
3069 uint32_t opaque,
3070 uint16_t vbucket,
3071 uint64_t start_seqno,
3072 uint64_t end_seqno,
3073 uint32_t flags)
3074{
3075 protocol_binary_request_upr_snapshot_marker packet;
3076 conn *c = (void*)cookie;
3077
3078 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3079 /* We don't have room in the buffer */
3080 return ENGINE_E2BIG;
3081 }
3082
3083 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3084 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3085 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_SNAPSHOT_MARKER;
3086 packet.message.header.request.opaque = opaque;
3087 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3088 packet.message.header.request.extlen = 20;
3089 packet.message.header.request.bodylen = htonl(20)(__builtin_constant_p(20) ? ((__uint32_t)((((__uint32_t)(20) &
0xff000000) >> 24) | (((__uint32_t)(20) & 0x00ff0000
) >> 8) | (((__uint32_t)(20) & 0x0000ff00) <<
8) | (((__uint32_t)(20) & 0x000000ff) << 24))) : _OSSwapInt32
(20))
;
3090 packet.message.body.start_seqno = htonll(start_seqno);
3091 packet.message.body.end_seqno = htonll(end_seqno);
3092 packet.message.body.flags = htonl(flags)(__builtin_constant_p(flags) ? ((__uint32_t)((((__uint32_t)(flags
) & 0xff000000) >> 24) | (((__uint32_t)(flags) &
0x00ff0000) >> 8) | (((__uint32_t)(flags) & 0x0000ff00
) << 8) | (((__uint32_t)(flags) & 0x000000ff) <<
24))) : _OSSwapInt32(flags))
;
3093
3094 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3095 add_iov(c, c->wcurr, sizeof(packet.bytes));
3096 c->wcurr += sizeof(packet.bytes);
3097 c->wbytes += sizeof(packet.bytes);
3098
3099 return ENGINE_SUCCESS;
3100}
3101
3102static ENGINE_ERROR_CODE upr_message_mutation(const void* cookie,
3103 uint32_t opaque,
3104 item *it,
3105 uint16_t vbucket,
3106 uint64_t by_seqno,
3107 uint64_t rev_seqno,
3108 uint32_t lock_time,
3109 const void *meta,
3110 uint16_t nmeta,
3111 uint8_t nru)
3112{
3113 conn *c = (void*)cookie;
3114 item_info_holder info;
3115 protocol_binary_request_upr_mutation packet;
3116 int xx;
3117
3118 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
3119 info.info.nvalue = IOV_MAX1024;
3120
3121 if (!settings.engine.v1->get_item_info(settings.engine.v0, c, it,
3122 (void*)&info)) {
3123 settings.engine.v1->release(settings.engine.v0, c, it);
3124 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
3125 "%d: Failed to get item info\n", c->sfd);
3126 return ENGINE_FAILED;
3127 }
3128
3129 memset(packet.bytes, 0, sizeof(packet))__builtin___memset_chk (packet.bytes, 0, sizeof(packet), __builtin_object_size
(packet.bytes, 0))
;
3130 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3131 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_MUTATION;
3132 packet.message.header.request.opaque = opaque;
3133 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3134 packet.message.header.request.cas = htonll(info.info.cas);
3135 packet.message.header.request.keylen = htons(info.info.nkey)((__uint16_t)(__builtin_constant_p(info.info.nkey) ? ((__uint16_t
)((((__uint16_t)(info.info.nkey) & 0xff00) >> 8) | (
((__uint16_t)(info.info.nkey) & 0x00ff) << 8))) : _OSSwapInt16
(info.info.nkey)))
;
3136 packet.message.header.request.extlen = 31;
3137 packet.message.header.request.bodylen = ntohl(31 + info.info.nkey + info.info.nbytes + nmeta)(__builtin_constant_p(31 + info.info.nkey + info.info.nbytes +
nmeta) ? ((__uint32_t)((((__uint32_t)(31 + info.info.nkey + info
.info.nbytes + nmeta) & 0xff000000) >> 24) | (((__uint32_t
)(31 + info.info.nkey + info.info.nbytes + nmeta) & 0x00ff0000
) >> 8) | (((__uint32_t)(31 + info.info.nkey + info.info
.nbytes + nmeta) & 0x0000ff00) << 8) | (((__uint32_t
)(31 + info.info.nkey + info.info.nbytes + nmeta) & 0x000000ff
) << 24))) : _OSSwapInt32(31 + info.info.nkey + info.info
.nbytes + nmeta))
;
3138 packet.message.header.request.datatype = info.info.datatype;
3139 packet.message.body.by_seqno = htonll(by_seqno);
3140 packet.message.body.rev_seqno = htonll(rev_seqno);
3141 packet.message.body.lock_time = htonl(lock_time)(__builtin_constant_p(lock_time) ? ((__uint32_t)((((__uint32_t
)(lock_time) & 0xff000000) >> 24) | (((__uint32_t)(
lock_time) & 0x00ff0000) >> 8) | (((__uint32_t)(lock_time
) & 0x0000ff00) << 8) | (((__uint32_t)(lock_time) &
0x000000ff) << 24))) : _OSSwapInt32(lock_time))
;
3142 packet.message.body.flags = info.info.flags;
3143 packet.message.body.expiration = htonl(info.info.exptime)(__builtin_constant_p(info.info.exptime) ? ((__uint32_t)((((__uint32_t
)(info.info.exptime) & 0xff000000) >> 24) | (((__uint32_t
)(info.info.exptime) & 0x00ff0000) >> 8) | (((__uint32_t
)(info.info.exptime) & 0x0000ff00) << 8) | (((__uint32_t
)(info.info.exptime) & 0x000000ff) << 24))) : _OSSwapInt32
(info.info.exptime))
;
3144 packet.message.body.nmeta = htons(nmeta)((__uint16_t)(__builtin_constant_p(nmeta) ? ((__uint16_t)((((
__uint16_t)(nmeta) & 0xff00) >> 8) | (((__uint16_t)
(nmeta) & 0x00ff) << 8))) : _OSSwapInt16(nmeta)))
;
3145 packet.message.body.nru = nru;
3146
3147 c->ilist[c->ileft++] = it;
3148
3149 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3150 add_iov(c, c->wcurr, sizeof(packet.bytes));
3151 c->wcurr += sizeof(packet.bytes);
3152 c->wbytes += sizeof(packet.bytes);
3153 add_iov(c, info.info.key, info.info.nkey);
3154 for (xx = 0; xx < info.info.nvalue; ++xx) {
3155 add_iov(c, info.info.value[xx].iov_base, info.info.value[xx].iov_len);
3156 }
3157
3158 memcpy(c->wcurr, meta, nmeta)__builtin___memcpy_chk (c->wcurr, meta, nmeta, __builtin_object_size
(c->wcurr, 0))
;
3159 add_iov(c, c->wcurr, nmeta);
3160 c->wcurr += nmeta;
3161 c->wbytes += nmeta;
3162
3163 return ENGINE_SUCCESS;
3164}
3165
3166static ENGINE_ERROR_CODE upr_message_deletion(const void* cookie,
3167 uint32_t opaque,
3168 const void *key,
3169 uint16_t nkey,
3170 uint64_t cas,
3171 uint16_t vbucket,
3172 uint64_t by_seqno,
3173 uint64_t rev_seqno,
3174 const void *meta,
3175 uint16_t nmeta)
3176{
3177 conn *c = (void*)cookie;
3178 protocol_binary_request_upr_deletion packet;
3179 if (c->wbytes + sizeof(packet.bytes) + nkey + nmeta >= c->wsize) {
3180 return ENGINE_E2BIG;
3181 }
3182
3183 memset(packet.bytes, 0, sizeof(packet))__builtin___memset_chk (packet.bytes, 0, sizeof(packet), __builtin_object_size
(packet.bytes, 0))
;
3184 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3185 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_DELETION;
3186 packet.message.header.request.opaque = opaque;
3187 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3188 packet.message.header.request.cas = htonll(cas);
3189 packet.message.header.request.keylen = htons(nkey)((__uint16_t)(__builtin_constant_p(nkey) ? ((__uint16_t)((((__uint16_t
)(nkey) & 0xff00) >> 8) | (((__uint16_t)(nkey) &
0x00ff) << 8))) : _OSSwapInt16(nkey)))
;
3190 packet.message.header.request.extlen = 18;
3191 packet.message.header.request.bodylen = ntohl(18 + nkey + nmeta)(__builtin_constant_p(18 + nkey + nmeta) ? ((__uint32_t)((((__uint32_t
)(18 + nkey + nmeta) & 0xff000000) >> 24) | (((__uint32_t
)(18 + nkey + nmeta) & 0x00ff0000) >> 8) | (((__uint32_t
)(18 + nkey + nmeta) & 0x0000ff00) << 8) | (((__uint32_t
)(18 + nkey + nmeta) & 0x000000ff) << 24))) : _OSSwapInt32
(18 + nkey + nmeta))
;
3192 packet.message.body.by_seqno = htonll(by_seqno);
3193 packet.message.body.rev_seqno = htonll(rev_seqno);
3194 packet.message.body.nmeta = htons(nmeta)((__uint16_t)(__builtin_constant_p(nmeta) ? ((__uint16_t)((((
__uint16_t)(nmeta) & 0xff00) >> 8) | (((__uint16_t)
(nmeta) & 0x00ff) << 8))) : _OSSwapInt16(nmeta)))
;
3195
3196 add_iov(c, c->wcurr, sizeof(packet.bytes) + nkey + nmeta);
3197 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3198 c->wcurr += sizeof(packet.bytes);
3199 c->wbytes += sizeof(packet.bytes);
3200 memcpy(c->wcurr, key, nkey)__builtin___memcpy_chk (c->wcurr, key, nkey, __builtin_object_size
(c->wcurr, 0))
;
3201 c->wcurr += nkey;
3202 c->wbytes += nkey;
3203 memcpy(c->wcurr, meta, nmeta)__builtin___memcpy_chk (c->wcurr, meta, nmeta, __builtin_object_size
(c->wcurr, 0))
;
3204 c->wcurr += nmeta;
3205 c->wbytes += nmeta;
3206
3207 return ENGINE_SUCCESS;
3208}
3209
3210static ENGINE_ERROR_CODE upr_message_expiration(const void* cookie,
3211 uint32_t opaque,
3212 const void *key,
3213 uint16_t nkey,
3214 uint64_t cas,
3215 uint16_t vbucket,
3216 uint64_t by_seqno,
3217 uint64_t rev_seqno,
3218 const void *meta,
3219 uint16_t nmeta)
3220{
3221 conn *c = (void*)cookie;
3222 protocol_binary_request_upr_deletion packet;
3223
3224 if (c->wbytes + sizeof(packet.bytes) + nkey + nmeta >= c->wsize) {
3225 return ENGINE_E2BIG;
3226 }
3227
3228 memset(packet.bytes, 0, sizeof(packet))__builtin___memset_chk (packet.bytes, 0, sizeof(packet), __builtin_object_size
(packet.bytes, 0))
;
3229 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3230 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_EXPIRATION;
3231 packet.message.header.request.opaque = opaque;
3232 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3233 packet.message.header.request.cas = htonll(cas);
3234 packet.message.header.request.keylen = htons(nkey)((__uint16_t)(__builtin_constant_p(nkey) ? ((__uint16_t)((((__uint16_t
)(nkey) & 0xff00) >> 8) | (((__uint16_t)(nkey) &
0x00ff) << 8))) : _OSSwapInt16(nkey)))
;
3235 packet.message.header.request.extlen = 18;
3236 packet.message.header.request.bodylen = ntohl(18 + nkey + nmeta)(__builtin_constant_p(18 + nkey + nmeta) ? ((__uint32_t)((((__uint32_t
)(18 + nkey + nmeta) & 0xff000000) >> 24) | (((__uint32_t
)(18 + nkey + nmeta) & 0x00ff0000) >> 8) | (((__uint32_t
)(18 + nkey + nmeta) & 0x0000ff00) << 8) | (((__uint32_t
)(18 + nkey + nmeta) & 0x000000ff) << 24))) : _OSSwapInt32
(18 + nkey + nmeta))
;
3237 packet.message.body.by_seqno = htonll(by_seqno);
3238 packet.message.body.rev_seqno = htonll(rev_seqno);
3239 packet.message.body.nmeta = htons(nmeta)((__uint16_t)(__builtin_constant_p(nmeta) ? ((__uint16_t)((((
__uint16_t)(nmeta) & 0xff00) >> 8) | (((__uint16_t)
(nmeta) & 0x00ff) << 8))) : _OSSwapInt16(nmeta)))
;
3240
3241 add_iov(c, c->wcurr, sizeof(packet.bytes) + nkey + nmeta);
3242 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3243 c->wcurr += sizeof(packet.bytes);
3244 c->wbytes += sizeof(packet.bytes);
3245 memcpy(c->wcurr, key, nkey)__builtin___memcpy_chk (c->wcurr, key, nkey, __builtin_object_size
(c->wcurr, 0))
;
3246 c->wcurr += nkey;
3247 c->wbytes += nkey;
3248 memcpy(c->wcurr, meta, nmeta)__builtin___memcpy_chk (c->wcurr, meta, nmeta, __builtin_object_size
(c->wcurr, 0))
;
3249 c->wcurr += nmeta;
3250 c->wbytes += nmeta;
3251
3252 return ENGINE_SUCCESS;
3253}
3254
3255static ENGINE_ERROR_CODE upr_message_flush(const void* cookie,
3256 uint32_t opaque,
3257 uint16_t vbucket)
3258{
3259 protocol_binary_request_upr_flush packet;
3260 conn *c = (void*)cookie;
3261
3262 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3263 /* We don't have room in the buffer */
3264 return ENGINE_E2BIG;
3265 }
3266
3267 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3268 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3269 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_FLUSH;
3270 packet.message.header.request.opaque = opaque;
3271 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3272
3273 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3274 add_iov(c, c->wcurr, sizeof(packet.bytes));
3275 c->wcurr += sizeof(packet.bytes);
3276 c->wbytes += sizeof(packet.bytes);
3277
3278 return ENGINE_SUCCESS;
3279}
3280
3281static ENGINE_ERROR_CODE upr_message_set_vbucket_state(const void* cookie,
3282 uint32_t opaque,
3283 uint16_t vbucket,
3284 vbucket_state_t state)
3285{
3286 protocol_binary_request_upr_set_vbucket_state packet;
3287 conn *c = (void*)cookie;
3288
3289 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3290 /* We don't have room in the buffer */
3291 return ENGINE_E2BIG;
3292 }
3293
3294 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3295 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3296 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE;
3297 packet.message.header.request.extlen = 1;
3298 packet.message.header.request.bodylen = htonl(1)(__builtin_constant_p(1) ? ((__uint32_t)((((__uint32_t)(1) &
0xff000000) >> 24) | (((__uint32_t)(1) & 0x00ff0000
) >> 8) | (((__uint32_t)(1) & 0x0000ff00) << 8
) | (((__uint32_t)(1) & 0x000000ff) << 24))) : _OSSwapInt32
(1))
;
3299 packet.message.header.request.opaque = opaque;
3300 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3301
3302 switch (state) {
3303 case vbucket_state_active:
3304 packet.message.body.state = 0x01;
3305 break;
3306 case vbucket_state_pending:
3307 packet.message.body.state = 0x02;
3308 break;
3309 case vbucket_state_replica:
3310 packet.message.body.state = 0x03;
3311 break;
3312 case vbucket_state_dead:
3313 packet.message.body.state = 0x04;
3314 break;
3315 default:
3316 return ENGINE_EINVAL;
3317 }
3318
3319 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3320 add_iov(c, c->wcurr, sizeof(packet.bytes));
3321 c->wcurr += sizeof(packet.bytes);
3322 c->wbytes += sizeof(packet.bytes);
3323
3324 return ENGINE_SUCCESS;
3325}
3326
3327static ENGINE_ERROR_CODE upr_message_noop(const void* cookie,
3328 uint32_t opaque)
3329{
3330 protocol_binary_request_upr_noop packet;
3331 conn *c = (void*)cookie;
3332
3333 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3334 /* We don't have room in the buffer */
3335 return ENGINE_E2BIG;
3336 }
3337
3338 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3339 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3340 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_NOOP;
3341 packet.message.header.request.opaque = opaque;
3342
3343 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3344 add_iov(c, c->wcurr, sizeof(packet.bytes));
3345 c->wcurr += sizeof(packet.bytes);
3346 c->wbytes += sizeof(packet.bytes);
3347
3348 return ENGINE_SUCCESS;
3349}
3350
3351static ENGINE_ERROR_CODE upr_message_buffer_acknowledgement(const void* cookie,
3352 uint32_t opaque,
3353 uint16_t vbucket,
3354 uint32_t buffer_bytes)
3355{
3356 protocol_binary_request_upr_buffer_acknowledgement packet;
3357 conn *c = (void*)cookie;
3358
3359 if (c->wbytes + sizeof(packet.bytes) >= c->wsize) {
3360 /* We don't have room in the buffer */
3361 return ENGINE_E2BIG;
3362 }
3363
3364 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3365 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3366 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_BUFFER_ACKNOWLEDGEMENT;
3367 packet.message.header.request.opaque = opaque;
3368 packet.message.header.request.vbucket = htons(vbucket)((__uint16_t)(__builtin_constant_p(vbucket) ? ((__uint16_t)((
((__uint16_t)(vbucket) & 0xff00) >> 8) | (((__uint16_t
)(vbucket) & 0x00ff) << 8))) : _OSSwapInt16(vbucket
)))
;
3369 packet.message.header.request.bodylen = ntohl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
;
3370 packet.message.body.buffer_bytes = ntohl(buffer_bytes)(__builtin_constant_p(buffer_bytes) ? ((__uint32_t)((((__uint32_t
)(buffer_bytes) & 0xff000000) >> 24) | (((__uint32_t
)(buffer_bytes) & 0x00ff0000) >> 8) | (((__uint32_t
)(buffer_bytes) & 0x0000ff00) << 8) | (((__uint32_t
)(buffer_bytes) & 0x000000ff) << 24))) : _OSSwapInt32
(buffer_bytes))
;
3371
3372 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3373 add_iov(c, c->wcurr, sizeof(packet.bytes));
3374 c->wcurr += sizeof(packet.bytes);
3375 c->wbytes += sizeof(packet.bytes);
3376
3377 return ENGINE_SUCCESS;
3378}
3379
3380static ENGINE_ERROR_CODE upr_message_control(const void* cookie,
3381 uint32_t opaque,
3382 const void *key,
3383 uint16_t nkey,
3384 const void *value,
3385 uint32_t nvalue)
3386{
3387 protocol_binary_request_upr_control packet;
3388 conn *c = (void*)cookie;
3389
3390 if (c->wbytes + sizeof(packet.bytes) + nkey + nvalue >= c->wsize) {
3391 /* We don't have room in the buffer */
3392 return ENGINE_E2BIG;
3393 }
3394
3395 memset(packet.bytes, 0, sizeof(packet.bytes))__builtin___memset_chk (packet.bytes, 0, sizeof(packet.bytes)
, __builtin_object_size (packet.bytes, 0))
;
3396 packet.message.header.request.magic = (uint8_t)PROTOCOL_BINARY_REQ;
3397 packet.message.header.request.opcode = (uint8_t)PROTOCOL_BINARY_CMD_UPR_CONTROL;
3398 packet.message.header.request.opaque = opaque;
3399 packet.message.header.request.keylen = ntohs(nkey)((__uint16_t)(__builtin_constant_p(nkey) ? ((__uint16_t)((((__uint16_t
)(nkey) & 0xff00) >> 8) | (((__uint16_t)(nkey) &
0x00ff) << 8))) : _OSSwapInt16(nkey)))
;
3400 packet.message.header.request.bodylen = ntohl(nvalue + nkey)(__builtin_constant_p(nvalue + nkey) ? ((__uint32_t)((((__uint32_t
)(nvalue + nkey) & 0xff000000) >> 24) | (((__uint32_t
)(nvalue + nkey) & 0x00ff0000) >> 8) | (((__uint32_t
)(nvalue + nkey) & 0x0000ff00) << 8) | (((__uint32_t
)(nvalue + nkey) & 0x000000ff) << 24))) : _OSSwapInt32
(nvalue + nkey))
;
3401
3402 add_iov(c, c->wcurr, sizeof(packet.bytes) + nkey + nvalue);
3403 memcpy(c->wcurr, packet.bytes, sizeof(packet.bytes))__builtin___memcpy_chk (c->wcurr, packet.bytes, sizeof(packet
.bytes), __builtin_object_size (c->wcurr, 0))
;
3404 c->wcurr += sizeof(packet.bytes);
3405 c->wbytes += sizeof(packet.bytes);
3406
3407 memcpy(c->wcurr, key, nkey)__builtin___memcpy_chk (c->wcurr, key, nkey, __builtin_object_size
(c->wcurr, 0))
;
3408 c->wcurr += nkey;
3409 c->wbytes += nkey;
3410
3411 memcpy(c->wcurr, value, nvalue)__builtin___memcpy_chk (c->wcurr, value, nvalue, __builtin_object_size
(c->wcurr, 0))
;
3412 c->wcurr += nvalue;
3413 c->wbytes += nvalue;
3414
3415 return ENGINE_SUCCESS;
3416}
3417
3418static void ship_upr_log(conn *c) {
3419 static struct upr_message_producers producers = {
3420 upr_message_get_failover_log,
3421 upr_message_stream_req,
3422 upr_message_add_stream_response,
3423 upr_message_set_vbucket_state_response,
3424 upr_message_stream_end,
3425 upr_message_marker,
3426 upr_message_mutation,
3427 upr_message_deletion,
3428 upr_message_expiration,
3429 upr_message_flush,
3430 upr_message_set_vbucket_state,
3431 upr_message_noop,
3432 upr_message_buffer_acknowledgement,
3433 upr_message_control
3434 };
3435 ENGINE_ERROR_CODE ret;
3436
3437 c->msgcurr = 0;
3438 c->msgused = 0;
3439 c->iovused = 0;
3440 if (add_msghdr(c) != 0) {
3441 if (settings.verbose) {
3442 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
3443 "%d: Failed to create output headers. Shutting down UPR connection\n", c->sfd);
3444 }
3445 conn_set_state(c, conn_closing);
3446 return ;
3447 }
3448
3449 c->wbytes = 0;
3450 c->wcurr = c->wbuf;
3451 c->icurr = c->ilist;
3452
3453 c->ewouldblock = false0;
3454 ret = settings.engine.v1->upr.step(settings.engine.v0, c, &producers);
3455 if (ret == ENGINE_SUCCESS) {
3456 /* the engine don't have more data to send at this moment */
3457 c->ewouldblock = true1;
3458 } else if (ret == ENGINE_WANT_MORE) {
3459 /* The engine got more data it wants to send */
3460 ret = ENGINE_SUCCESS;
3461 }
3462
3463 if (ret == ENGINE_SUCCESS) {
3464 conn_set_state(c, conn_mwrite);
3465 c->write_and_go = conn_ship_log;
3466 } else {
3467 conn_set_state(c, conn_closing);
3468 }
3469}
3470
3471/******************************************************************************
3472 * TAP packet executors *
3473 ******************************************************************************/
3474static void tap_connect_executor(conn *c, void *packet)
3475{
3476 cb_mutex_enter(&tap_stats.mutex);
3477 tap_stats.received.connect++;
3478 cb_mutex_exit(&tap_stats.mutex);
3479 conn_set_state(c, conn_setup_tap_stream);
3480}
3481
3482static void tap_mutation_executor(conn *c, void *packet)
3483{
3484 cb_mutex_enter(&tap_stats.mutex);
3485 tap_stats.received.mutation++;
3486 cb_mutex_exit(&tap_stats.mutex);
3487 process_bin_tap_packet(TAP_MUTATION, c);
3488}
3489
3490static void tap_delete_executor(conn *c, void *packet)
3491{
3492 cb_mutex_enter(&tap_stats.mutex);
3493 tap_stats.received.delete++;
3494 cb_mutex_exit(&tap_stats.mutex);
3495 process_bin_tap_packet(TAP_DELETION, c);
3496}
3497
3498static void tap_flush_executor(conn *c, void *packet)
3499{
3500 cb_mutex_enter(&tap_stats.mutex);
3501 tap_stats.received.flush++;
3502 cb_mutex_exit(&tap_stats.mutex);
3503 process_bin_tap_packet(TAP_FLUSH, c);
3504}
3505
3506static void tap_opaque_executor(conn *c, void *packet)
3507{
3508 cb_mutex_enter(&tap_stats.mutex);
3509 tap_stats.received.opaque++;
3510 cb_mutex_exit(&tap_stats.mutex);
3511 process_bin_tap_packet(TAP_OPAQUE, c);
3512}
3513
3514static void tap_vbucket_set_executor(conn *c, void *packet)
3515{
3516 cb_mutex_enter(&tap_stats.mutex);
3517 tap_stats.received.vbucket_set++;
3518 cb_mutex_exit(&tap_stats.mutex);
3519 process_bin_tap_packet(TAP_VBUCKET_SET, c);
3520}
3521
3522static void tap_checkpoint_start_executor(conn *c, void *packet)
3523{
3524 cb_mutex_enter(&tap_stats.mutex);
3525 tap_stats.received.checkpoint_start++;
3526 cb_mutex_exit(&tap_stats.mutex);
3527 process_bin_tap_packet(TAP_CHECKPOINT_START, c);
3528}
3529
3530static void tap_checkpoint_end_executor(conn *c, void *packet)
3531{
3532 cb_mutex_enter(&tap_stats.mutex);
3533 tap_stats.received.checkpoint_end++;
3534 cb_mutex_exit(&tap_stats.mutex);
3535 process_bin_tap_packet(TAP_CHECKPOINT_END, c);
3536}
3537
3538/*******************************************************************************
3539 * UPR packet validators *
3540 ******************************************************************************/
3541static int upr_open_validator(void *packet)
3542{
3543 protocol_binary_request_upr_open *req = packet;
3544 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3545 req->message.header.request.extlen != 8 ||
3546 req->message.header.request.keylen == 0 ||
3547 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3548 /* INCORRECT FORMAT */
3549 return -1;
3550 }
3551
3552 return 0;
3553}
3554
3555static int upr_add_stream_validator(void *packet)
3556{
3557 protocol_binary_request_upr_add_stream *req = packet;
3558 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3559 req->message.header.request.extlen != 4 ||
3560 req->message.header.request.keylen != 0 ||
3561 req->message.header.request.bodylen != htonl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
||
3562 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3563 /* INCORRECT FORMAT */
3564 return -1;
3565 }
3566
3567 return 0;
3568}
3569
3570static int upr_close_stream_validator(void *packet)
3571{
3572 protocol_binary_request_upr_close_stream *req = packet;
3573 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3574 req->message.header.request.extlen != 0 ||
3575 req->message.header.request.keylen != 0 ||
3576 req->message.header.request.bodylen != 0 ||
3577 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3578 /* INCORRECT FORMAT */
3579 return -1;
3580 }
3581
3582 return 0;
3583}
3584
3585static int upr_get_failover_log_validator(void *packet)
3586{
3587 protocol_binary_request_upr_get_failover_log *req = packet;
3588 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3589 req->message.header.request.extlen != 0 ||
3590 req->message.header.request.keylen != 0 ||
3591 req->message.header.request.bodylen != 0 ||
3592 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3593 return -1;
3594 }
3595
3596 return 0;
3597}
3598
3599static int upr_stream_req_validator(void *packet)
3600{
3601 protocol_binary_request_upr_stream_req *req = packet;
3602 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3603 req->message.header.request.extlen != 5*sizeof(uint64_t) + 2*sizeof(uint32_t) ||
3604 req->message.header.request.keylen != 0 ||
3605 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3606 /* INCORRECT FORMAT */
3607 return -1;
3608 }
3609 return 0;
3610}
3611
3612static int upr_stream_end_validator(void *packet)
3613{
3614 protocol_binary_request_upr_stream_end *req = packet;
3615 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3616 req->message.header.request.extlen != 4 ||
3617 req->message.header.request.keylen != 0 ||
3618 req->message.header.request.bodylen != htonl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
||
3619 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3620 return -1;
3621 }
3622
3623 return 0;
3624}
3625
3626static int upr_snapshot_marker_validator(void *packet)
3627{
3628 protocol_binary_request_upr_snapshot_marker *req = packet;
3629 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3630 req->message.header.request.extlen != 20 ||
3631 req->message.header.request.keylen != 0 ||
3632 req->message.header.request.bodylen != htonl(20)(__builtin_constant_p(20) ? ((__uint32_t)((((__uint32_t)(20) &
0xff000000) >> 24) | (((__uint32_t)(20) & 0x00ff0000
) >> 8) | (((__uint32_t)(20) & 0x0000ff00) <<
8) | (((__uint32_t)(20) & 0x000000ff) << 24))) : _OSSwapInt32
(20))
||
3633 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3634 abort();
3635 return -1;
3636 }
3637
3638 return 0;
3639}
3640
3641static int upr_mutation_validator(void *packet)
3642{
3643 protocol_binary_request_upr_mutation *req = packet;
3644 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3645 req->message.header.request.extlen != (2*sizeof(uint64_t) + 3 * sizeof(uint32_t) + sizeof(uint16_t)) + sizeof(uint8_t) ||
3646 req->message.header.request.keylen == 0 ||
3647 req->message.header.request.bodylen == 0) {
3648 return -1;
3649 }
3650
3651 return 0;
3652}
3653
3654static int upr_deletion_validator(void *packet)
3655{
3656 protocol_binary_request_upr_deletion *req = packet;
3657 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3658 uint32_t bodylen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- klen;
3659 bodylen -= req->message.header.request.extlen;
3660
3661 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3662 req->message.header.request.extlen != (2*sizeof(uint64_t) + sizeof(uint16_t)) ||
3663 req->message.header.request.keylen == 0 ||
3664 bodylen != 0) {
3665 return -1;
3666 }
3667
3668 return 0;
3669}
3670
3671static int upr_expiration_validator(void *packet)
3672{
3673 protocol_binary_request_upr_deletion *req = packet;
3674 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3675 uint32_t bodylen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- klen;
3676 bodylen -= req->message.header.request.extlen;
3677 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3678 req->message.header.request.extlen != (2*sizeof(uint64_t) + sizeof(uint16_t)) ||
3679 req->message.header.request.keylen == 0 ||
3680 bodylen != 0) {
3681 return -1;
3682 }
3683
3684 return 0;
3685}
3686
3687static int upr_flush_validator(void *packet)
3688{
3689 protocol_binary_request_upr_flush *req = packet;
3690 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3691 req->message.header.request.extlen != 0 ||
3692 req->message.header.request.keylen != 0 ||
3693 req->message.header.request.bodylen != 0 ||
3694 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3695 return -1;
3696 }
3697
3698 return 0;
3699}
3700
3701static int upr_set_vbucket_state_validator(void *packet)
3702{
3703 protocol_binary_request_upr_set_vbucket_state *req = packet;
3704 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3705 req->message.header.request.extlen != 1 ||
3706 req->message.header.request.keylen != 0 ||
3707 ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
!= 1 ||
3708 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3709 return -1;
3710 }
3711
3712 if (req->message.body.state < 1 || req->message.body.state > 4) {
3713 return -1;
3714 }
3715
3716 return 0;
3717}
3718
3719static int upr_noop_validator(void *packet)
3720{
3721 protocol_binary_request_upr_noop *req = packet;
3722 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3723 req->message.header.request.extlen != 0 ||
3724 req->message.header.request.keylen != 0 ||
3725 req->message.header.request.bodylen != 0 ||
3726 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3727 return -1;
3728 }
3729
3730 return 0;
3731}
3732
3733static int upr_buffer_acknowledgement_validator(void *packet)
3734{
3735 protocol_binary_request_upr_buffer_acknowledgement *req = packet;
3736 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3737 req->message.header.request.extlen != 0 ||
3738 req->message.header.request.keylen != 0 ||
3739 req->message.header.request.bodylen != ntohl(4)(__builtin_constant_p(4) ? ((__uint32_t)((((__uint32_t)(4) &
0xff000000) >> 24) | (((__uint32_t)(4) & 0x00ff0000
) >> 8) | (((__uint32_t)(4) & 0x0000ff00) << 8
) | (((__uint32_t)(4) & 0x000000ff) << 24))) : _OSSwapInt32
(4))
||
3740 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3741 return -1;
3742 }
3743
3744 return 0;
3745}
3746
3747static int upr_control_validator(void *packet)
3748{
3749 protocol_binary_request_upr_control *req = packet;
3750 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3751 uint32_t nval = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- nkey;
3752
3753 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3754 req->message.header.request.extlen != 0 || nkey == 0 || nval == 0 ||
3755 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3756 return -1;
3757 }
3758
3759 return 0;
3760}
3761
3762static int isasl_refresh_validator(void *packet)
3763{
3764 protocol_binary_request_no_extras *req = packet;
3765 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3766 req->message.header.request.extlen != 0 ||
3767 req->message.header.request.keylen != 0 ||
3768 req->message.header.request.bodylen != 0 ||
3769 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3770 return -1;
3771 }
3772
3773 return 0;
3774}
3775
3776static int ssl_certs_refresh_validator(void *packet)
3777{
3778 protocol_binary_request_no_extras *req = packet;
3779 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3780 req->message.header.request.extlen != 0 ||
3781 req->message.header.request.keylen != 0 ||
3782 req->message.header.request.bodylen != 0 ||
3783 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3784 return -1;
3785 }
3786
3787 return 0;
3788}
3789
3790static int verbosity_validator(void *packet)
3791{
3792 protocol_binary_request_no_extras *req = packet;
3793 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3794 req->message.header.request.extlen != 4 ||
3795 req->message.header.request.keylen != 0 ||
3796 ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
!= 4 ||
3797 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3798 return -1;
3799 }
3800
3801 return 0;
3802}
3803
3804static int hello_validator(void *packet)
3805{
3806 protocol_binary_request_no_extras *req = packet;
3807 uint32_t len = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3808 len -= ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3809
3810 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3811 req->message.header.request.extlen != 0 || (len % 2) != 0 ||
3812 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3813 return -1;
3814 }
3815
3816 return 0;
3817}
3818
3819static int version_validator(void *packet)
3820{
3821 protocol_binary_request_no_extras *req = packet;
3822
3823 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3824 req->message.header.request.extlen != 0 ||
3825 req->message.header.request.keylen != 0 ||
3826 req->message.header.request.bodylen != 0 ||
3827 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3828 return -1;
3829 }
3830
3831 return 0;
3832}
3833
3834static int quit_validator(void *packet)
3835{
3836 protocol_binary_request_no_extras *req = packet;
3837
3838 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3839 req->message.header.request.extlen != 0 ||
3840 req->message.header.request.keylen != 0 ||
3841 req->message.header.request.bodylen != 0 ||
3842 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3843 return -1;
3844 }
3845
3846 return 0;
3847}
3848
3849static int sasl_list_mech_validator(void *packet)
3850{
3851 protocol_binary_request_no_extras *req = packet;
3852
3853 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3854 req->message.header.request.extlen != 0 ||
3855 req->message.header.request.keylen != 0 ||
3856 req->message.header.request.bodylen != 0 ||
3857 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3858 return -1;
3859 }
3860
3861 return 0;
3862}
3863
3864static int noop_validator(void *packet)
3865{
3866 protocol_binary_request_no_extras *req = packet;
3867
3868 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3869 req->message.header.request.extlen != 0 ||
3870 req->message.header.request.keylen != 0 ||
3871 req->message.header.request.bodylen != 0 ||
3872 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3873 return -1;
3874 }
3875
3876 return 0;
3877}
3878
3879static int flush_validator(void *packet)
3880{
3881 protocol_binary_request_no_extras *req = packet;
3882 uint8_t extlen = req->message.header.request.extlen;
3883 uint32_t bodylen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3884
3885 if (extlen != 0 && extlen != 4) {
3886 return -1;
3887 }
3888
3889 if (bodylen != extlen) {
3890 return -1;
3891 }
3892
3893 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3894 req->message.header.request.keylen != 0 ||
3895 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3896 return -1;
3897 }
3898
3899 return 0;
3900}
3901
3902static int get_validator(void *packet)
3903{
3904 protocol_binary_request_no_extras *req = packet;
3905 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3906 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3907
3908 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3909 req->message.header.request.extlen != 0 ||
3910 klen == 0 || klen != blen ||
3911 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3912 return -1;
3913 }
3914
3915 return 0;
3916}
3917
3918static int delete_validator(void *packet)
3919{
3920 protocol_binary_request_no_extras *req = packet;
3921 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3922 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3923
3924 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3925 req->message.header.request.extlen != 0 ||
3926 klen == 0 || klen != blen ||
3927 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3928 return -1;
3929 }
3930
3931 return 0;
3932}
3933
3934static int stat_validator(void *packet)
3935{
3936 protocol_binary_request_no_extras *req = packet;
3937 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3938 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3939
3940 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3941 req->message.header.request.extlen != 0 || klen != blen ||
3942 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3943 return -1;
3944 }
3945
3946 return 0;
3947}
3948
3949static int arithmetic_validator(void *packet)
3950{
3951 protocol_binary_request_no_extras *req = packet;
3952 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3953 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3954 uint8_t extlen = req->message.header.request.extlen;
3955
3956 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3957 extlen != 20 || klen == 0 || (klen + extlen) != blen ||
3958 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3959 return -1;
3960 }
3961
3962 return 0;
3963}
3964
3965static int get_cmd_timer_validator(void *packet)
3966{
3967 protocol_binary_request_no_extras *req = packet;
3968 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
3969 uint32_t blen = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
;
3970 uint8_t extlen = req->message.header.request.extlen;
3971
3972 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3973 extlen != 1 || klen != 0 || (klen + extlen) != blen ||
3974 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3975 return -1;
3976 }
3977
3978 return 0;
3979}
3980
3981static int set_ctrl_token_validator(void *packet)
3982{
3983 protocol_binary_request_no_extras *req = packet;
3984 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3985 req->message.header.request.extlen != sizeof(uint64_t) ||
3986 req->message.header.request.keylen != 0 ||
3987 ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
!= sizeof(uint64_t) ||
3988 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
3989 return -1;
3990 }
3991
3992 return 0;
3993}
3994
3995static int get_ctrl_token_validator(void *packet)
3996{
3997 protocol_binary_request_no_extras *req = packet;
3998 if (req->message.header.request.magic != PROTOCOL_BINARY_REQ ||
3999 req->message.header.request.extlen != 0 ||
4000 req->message.header.request.keylen != 0 ||
4001 req->message.header.request.bodylen != 0 ||
4002 req->message.header.request.datatype != PROTOCOL_BINARY_RAW_BYTES) {
4003 return -1;
4004 }
4005
4006 return 0;
4007}
4008
4009/*******************************************************************************
4010 * UPR packet executors *
4011 ******************************************************************************/
4012static void upr_open_executor(conn *c, void *packet)
4013{
4014 protocol_binary_request_upr_open *req = (void*)packet;
4015
4016 if (settings.engine.v1->upr.open == NULL((void*)0)) {
4017 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4018 } else {
4019 ENGINE_ERROR_CODE ret = c->aiostat;
4020 c->aiostat = ENGINE_SUCCESS;
4021 c->ewouldblock = false0;
4022 c->supports_datatype = true1;
4023
4024 if (ret == ENGINE_SUCCESS) {
4025 ret = settings.engine.v1->upr.open(settings.engine.v0, c,
4026 req->message.header.request.opaque,
4027 ntohl(req->message.body.seqno)(__builtin_constant_p(req->message.body.seqno) ? ((__uint32_t
)((((__uint32_t)(req->message.body.seqno) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.seqno) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.seqno) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.seqno) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.seqno))
,
4028 ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
,
4029 (void*)(req->bytes + sizeof(req->bytes)),
4030 ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
);
4031 }
4032
4033 switch (ret) {
4034 case ENGINE_SUCCESS:
4035 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4036 break;
4037
4038 case ENGINE_DISCONNECT:
4039 conn_set_state(c, conn_closing);
4040 break;
4041
4042 case ENGINE_EWOULDBLOCK:
4043 c->ewouldblock = true1;
4044 break;
4045
4046 default:
4047 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4048 }
4049 }
4050}
4051
4052static void upr_add_stream_executor(conn *c, void *packet)
4053{
4054 protocol_binary_request_upr_add_stream *req = (void*)packet;
4055
4056 if (settings.engine.v1->upr.add_stream == NULL((void*)0)) {
4057 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4058 } else {
4059 ENGINE_ERROR_CODE ret = c->aiostat;
4060 c->aiostat = ENGINE_SUCCESS;
4061 c->ewouldblock = false0;
4062
4063 if (ret == ENGINE_SUCCESS) {
4064 ret = settings.engine.v1->upr.add_stream(settings.engine.v0, c,
4065 req->message.header.request.opaque,
4066 ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
,
4067 ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
);
4068 }
4069
4070 switch (ret) {
4071 case ENGINE_SUCCESS:
4072 c->upr = 1;
4073 conn_set_state(c, conn_ship_log);
4074 break;
4075 case ENGINE_DISCONNECT:
4076 conn_set_state(c, conn_closing);
4077 break;
4078
4079 case ENGINE_EWOULDBLOCK:
4080 c->ewouldblock = true1;
4081 break;
4082
4083 default:
4084 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4085 }
4086 }
4087}
4088
4089static void upr_close_stream_executor(conn *c, void *packet)
4090{
4091 protocol_binary_request_upr_close_stream *req = (void*)packet;
4092
4093 if (settings.engine.v1->upr.close_stream == NULL((void*)0)) {
4094 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4095 } else {
4096 ENGINE_ERROR_CODE ret = c->aiostat;
4097 c->aiostat = ENGINE_SUCCESS;
4098 c->ewouldblock = false0;
4099
4100 if (ret == ENGINE_SUCCESS) {
4101 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4102 uint32_t opaque = ntohl(req->message.header.request.opaque)(__builtin_constant_p(req->message.header.request.opaque) ?
((__uint32_t)((((__uint32_t)(req->message.header.request.
opaque) & 0xff000000) >> 24) | (((__uint32_t)(req->
message.header.request.opaque) & 0x00ff0000) >> 8) |
(((__uint32_t)(req->message.header.request.opaque) & 0x0000ff00
) << 8) | (((__uint32_t)(req->message.header.request
.opaque) & 0x000000ff) << 24))) : _OSSwapInt32(req->
message.header.request.opaque))
;
4103 ret = settings.engine.v1->upr.close_stream(settings.engine.v0, c,
4104 opaque, vbucket);
4105 }
4106
4107 switch (ret) {
4108 case ENGINE_SUCCESS:
4109 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4110 break;
4111
4112 case ENGINE_DISCONNECT:
4113 conn_set_state(c, conn_closing);
4114 break;
4115
4116 case ENGINE_EWOULDBLOCK:
4117 c->ewouldblock = true1;
4118 break;
4119
4120 default:
4121 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4122 }
4123 }
4124}
4125
4126/** Callback from the engine adding the response */
4127static ENGINE_ERROR_CODE add_failover_log(vbucket_failover_t*entries,
4128 size_t nentries,
4129 const void *cookie)
4130{
4131 ENGINE_ERROR_CODE ret;
4132 size_t ii;
4133 for (ii = 0; ii < nentries; ++ii) {
4134 entries[ii].uuid = htonll(entries[ii].uuid);
4135 entries[ii].seqno = htonll(entries[ii].seqno);
4136 }
4137
4138 if (binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, entries,
4139 (uint32_t)(nentries * sizeof(vbucket_failover_t)), 0,
4140 PROTOCOL_BINARY_RESPONSE_SUCCESS, 0,
4141 (void*)cookie)) {
4142 ret = ENGINE_SUCCESS;
4143 } else {
4144 ret = ENGINE_ENOMEM;
4145 }
4146
4147 for (ii = 0; ii < nentries; ++ii) {
4148 entries[ii].uuid = htonll(entries[ii].uuid);
4149 entries[ii].seqno = htonll(entries[ii].seqno);
4150 }
4151
4152 return ret;
4153}
4154
4155static void upr_get_failover_log_executor(conn *c, void *packet) {
4156 protocol_binary_request_upr_get_failover_log *req = (void*)packet;
4157
4158 if (settings.engine.v1->upr.get_failover_log == NULL((void*)0)) {
4159 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4160 } else {
4161 ENGINE_ERROR_CODE ret = c->aiostat;
4162 c->aiostat = ENGINE_SUCCESS;
4163 c->ewouldblock = false0;
4164
4165 if (ret == ENGINE_SUCCESS) {
4166 ret = settings.engine.v1->upr.get_failover_log(settings.engine.v0, c,
4167 req->message.header.request.opaque,
4168 ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
,
4169 add_failover_log);
4170 }
4171
4172 switch (ret) {
4173 case ENGINE_SUCCESS:
4174 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4175 write_and_free(c, c->dynamic_buffer.buffer,
4176 c->dynamic_buffer.offset);
4177 c->dynamic_buffer.buffer = NULL((void*)0);
4178 } else {
4179 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4180 }
4181 break;
4182
4183 case ENGINE_DISCONNECT:
4184 conn_set_state(c, conn_closing);
4185 break;
4186
4187 case ENGINE_EWOULDBLOCK:
4188 c->ewouldblock = true1;
4189 break;
4190
4191 default:
4192 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4193 }
4194 }
4195}
4196
4197static void upr_stream_req_executor(conn *c, void *packet)
4198{
4199 protocol_binary_request_upr_stream_req *req = (void*)packet;
4200
4201 if (settings.engine.v1->upr.stream_req == NULL((void*)0)) {
1
Taking false branch
4202 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4203 } else {
4204 uint32_t flags = ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
;
4205 uint64_t start_seqno = ntohll(req->message.body.start_seqno);
4206 uint64_t end_seqno = ntohll(req->message.body.end_seqno);
4207 uint64_t vbucket_uuid = ntohll(req->message.body.vbucket_uuid);
4208 uint64_t snap_start_seqno = ntohll(req->message.body.snap_start_seqno);
4209 uint64_t snap_end_seqno = ntohll(req->message.body.snap_end_seqno);
4210 uint64_t rollback_seqno;
2
'rollback_seqno' declared without an initial value
4211
4212 ENGINE_ERROR_CODE ret = c->aiostat;
4213 c->aiostat = ENGINE_SUCCESS;
4214 c->ewouldblock = false0;
4215
4216 if (ret == ENGINE_SUCCESS) {
3
Assuming 'ret' is not equal to ENGINE_SUCCESS
4
Taking false branch
4217 ret = settings.engine.v1->upr.stream_req(settings.engine.v0, c,
4218 flags,
4219 c->binary_header.request.opaque,
4220 c->binary_header.request.vbucket,
4221 start_seqno, end_seqno,
4222 vbucket_uuid,
4223 snap_start_seqno,
4224 snap_end_seqno,
4225 &rollback_seqno,
4226 add_failover_log);
4227 }
4228
4229 switch (ret) {
5
Control jumps to 'case ENGINE_ROLLBACK:' at line 4241
4230 case ENGINE_SUCCESS:
4231 c->upr = 1;
4232 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4233 write_and_free(c, c->dynamic_buffer.buffer,
4234 c->dynamic_buffer.offset);
4235 c->dynamic_buffer.buffer = NULL((void*)0);
4236 } else {
4237 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4238 }
4239 break;
4240
4241 case ENGINE_ROLLBACK:
4242 rollback_seqno = htonll(rollback_seqno);
6
Function call argument is an uninitialized value
4243 if (binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, &rollback_seqno,
4244 sizeof(rollback_seqno), 0,
4245 PROTOCOL_BINARY_RESPONSE_ROLLBACK, 0,
4246 c)) {
4247 write_and_free(c, c->dynamic_buffer.buffer,
4248 c->dynamic_buffer.offset);
4249 c->dynamic_buffer.buffer = NULL((void*)0);
4250 } else {
4251 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
4252 }
4253 break;
4254
4255 case ENGINE_DISCONNECT:
4256 conn_set_state(c, conn_closing);
4257 break;
4258
4259 case ENGINE_EWOULDBLOCK:
4260 c->ewouldblock = true1;
4261 break;
4262
4263 default:
4264 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4265 }
4266 }
4267}
4268
4269static void upr_stream_end_executor(conn *c, void *packet)
4270{
4271 protocol_binary_request_upr_stream_end *req = (void*)packet;
4272
4273 if (settings.engine.v1->upr.stream_end == NULL((void*)0)) {
4274 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4275 } else {
4276 ENGINE_ERROR_CODE ret = c->aiostat;
4277 c->aiostat = ENGINE_SUCCESS;
4278 c->ewouldblock = false0;
4279
4280 if (ret == ENGINE_SUCCESS) {
4281 ret = settings.engine.v1->upr.stream_end(settings.engine.v0, c,
4282 req->message.header.request.opaque,
4283 ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
,
4284 ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
);
4285 }
4286
4287 switch (ret) {
4288 case ENGINE_SUCCESS:
4289 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4290 write_and_free(c, c->dynamic_buffer.buffer,
4291 c->dynamic_buffer.offset);
4292 c->dynamic_buffer.buffer = NULL((void*)0);
4293 } else {
4294 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4295 }
4296 break;
4297
4298 case ENGINE_DISCONNECT:
4299 conn_set_state(c, conn_closing);
4300 break;
4301
4302 case ENGINE_EWOULDBLOCK:
4303 c->ewouldblock = true1;
4304 break;
4305
4306 default:
4307 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4308 }
4309 }
4310}
4311
4312static void upr_snapshot_marker_executor(conn *c, void *packet)
4313{
4314 protocol_binary_request_upr_snapshot_marker *req = (void*)packet;
4315
4316 if (settings.engine.v1->upr.snapshot_marker == NULL((void*)0)) {
4317 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4318 } else {
4319 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4320 uint32_t opaque = req->message.header.request.opaque;
4321 uint32_t flags = ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
;
4322 uint64_t start_seqno = ntohll(req->message.body.start_seqno);
4323 uint64_t end_seqno = ntohll(req->message.body.end_seqno);
4324
4325 ENGINE_ERROR_CODE ret = c->aiostat;
4326 c->aiostat = ENGINE_SUCCESS;
4327 c->ewouldblock = false0;
4328
4329 if (ret == ENGINE_SUCCESS) {
4330 ret = settings.engine.v1->upr.snapshot_marker(settings.engine.v0, c,
4331 opaque, vbucket,
4332 start_seqno,
4333 end_seqno, flags);
4334 }
4335
4336 switch (ret) {
4337 case ENGINE_SUCCESS:
4338 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4339 write_and_free(c, c->dynamic_buffer.buffer,
4340 c->dynamic_buffer.offset);
4341 c->dynamic_buffer.buffer = NULL((void*)0);
4342 } else {
4343 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4344 }
4345 break;
4346
4347 case ENGINE_DISCONNECT:
4348 conn_set_state(c, conn_closing);
4349 break;
4350
4351 case ENGINE_EWOULDBLOCK:
4352 c->ewouldblock = true1;
4353 break;
4354
4355 default:
4356 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4357 }
4358 }
4359}
4360
4361static void upr_mutation_executor(conn *c, void *packet)
4362{
4363 protocol_binary_request_upr_mutation *req = (void*)packet;
4364
4365 if (settings.engine.v1->upr.mutation == NULL((void*)0)) {
4366 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4367 } else {
4368 ENGINE_ERROR_CODE ret = c->aiostat;
4369 c->aiostat = ENGINE_SUCCESS;
4370 c->ewouldblock = false0;
4371
4372 if (ret == ENGINE_SUCCESS) {
4373 char *key = (char*)packet + sizeof(req->bytes);
4374 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4375 void *value = key + nkey;
4376 uint64_t cas = ntohll(req->message.header.request.cas);
4377 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4378 uint32_t flags = ntohl(req->message.body.flags)(__builtin_constant_p(req->message.body.flags) ? ((__uint32_t
)((((__uint32_t)(req->message.body.flags) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.flags) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.flags) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.flags) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.flags))
;
4379 uint8_t datatype = req->message.header.request.datatype;
4380 uint64_t by_seqno = ntohll(req->message.body.by_seqno);
4381 uint64_t rev_seqno = ntohll(req->message.body.rev_seqno);
4382 uint32_t expiration = ntohl(req->message.body.expiration)(__builtin_constant_p(req->message.body.expiration) ? ((__uint32_t
)((((__uint32_t)(req->message.body.expiration) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.expiration
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.expiration) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.expiration) & 0x000000ff) <<
24))) : _OSSwapInt32(req->message.body.expiration))
;
4383 uint32_t lock_time = ntohl(req->message.body.lock_time)(__builtin_constant_p(req->message.body.lock_time) ? ((__uint32_t
)((((__uint32_t)(req->message.body.lock_time) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.lock_time
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.lock_time) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.lock_time) & 0x000000ff) << 24
))) : _OSSwapInt32(req->message.body.lock_time))
;
4384 uint16_t nmeta = ntohs(req->message.body.nmeta)((__uint16_t)(__builtin_constant_p(req->message.body.nmeta
) ? ((__uint16_t)((((__uint16_t)(req->message.body.nmeta) &
0xff00) >> 8) | (((__uint16_t)(req->message.body.nmeta
) & 0x00ff) << 8))) : _OSSwapInt16(req->message.
body.nmeta)))
;
4385 uint32_t nvalue = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- nkey
4386 - req->message.header.request.extlen - nmeta;
4387
4388 ret = settings.engine.v1->upr.mutation(settings.engine.v0, c,
4389 req->message.header.request.opaque,
4390 key, nkey, value, nvalue, cas, vbucket,
4391 flags, datatype, by_seqno, rev_seqno,
4392 expiration, lock_time,
4393 (char*)value + nvalue, nmeta,
4394 req->message.body.nru);
4395 }
4396
4397 switch (ret) {
4398 case ENGINE_SUCCESS:
4399 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4400 write_and_free(c, c->dynamic_buffer.buffer,
4401 c->dynamic_buffer.offset);
4402 c->dynamic_buffer.buffer = NULL((void*)0);
4403 } else {
4404 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4405 }
4406 break;
4407
4408 case ENGINE_DISCONNECT:
4409 conn_set_state(c, conn_closing);
4410 break;
4411
4412 case ENGINE_EWOULDBLOCK:
4413 c->ewouldblock = true1;
4414 break;
4415
4416 default:
4417 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4418 }
4419 }
4420}
4421
4422static void upr_deletion_executor(conn *c, void *packet)
4423{
4424 protocol_binary_request_upr_deletion *req = (void*)packet;
4425
4426 if (settings.engine.v1->upr.deletion == NULL((void*)0)) {
4427 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4428 } else {
4429 ENGINE_ERROR_CODE ret = c->aiostat;
4430 c->aiostat = ENGINE_SUCCESS;
4431 c->ewouldblock = false0;
4432
4433 if (ret == ENGINE_SUCCESS) {
4434 char *key = (char*)packet + sizeof(req->bytes);
4435 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4436 uint64_t cas = ntohll(req->message.header.request.cas);
4437 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4438 uint64_t by_seqno = ntohll(req->message.body.by_seqno);
4439 uint64_t rev_seqno = ntohll(req->message.body.rev_seqno);
4440 uint16_t nmeta = ntohs(req->message.body.nmeta)((__uint16_t)(__builtin_constant_p(req->message.body.nmeta
) ? ((__uint16_t)((((__uint16_t)(req->message.body.nmeta) &
0xff00) >> 8) | (((__uint16_t)(req->message.body.nmeta
) & 0x00ff) << 8))) : _OSSwapInt16(req->message.
body.nmeta)))
;
4441
4442 ret = settings.engine.v1->upr.deletion(settings.engine.v0, c,
4443 req->message.header.request.opaque,
4444 key, nkey, cas, vbucket,
4445 by_seqno, rev_seqno, key + nkey, nmeta);
4446 }
4447
4448 switch (ret) {
4449 case ENGINE_SUCCESS:
4450 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4451 write_and_free(c, c->dynamic_buffer.buffer,
4452 c->dynamic_buffer.offset);
4453 c->dynamic_buffer.buffer = NULL((void*)0);
4454 } else {
4455 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4456 }
4457 break;
4458
4459 case ENGINE_DISCONNECT:
4460 conn_set_state(c, conn_closing);
4461 break;
4462
4463 case ENGINE_EWOULDBLOCK:
4464 c->ewouldblock = true1;
4465 break;
4466
4467 default:
4468 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4469 }
4470 }
4471}
4472
4473static void upr_expiration_executor(conn *c, void *packet)
4474{
4475 protocol_binary_request_upr_expiration *req = (void*)packet;
4476
4477 if (settings.engine.v1->upr.expiration == NULL((void*)0)) {
4478 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4479 } else {
4480 ENGINE_ERROR_CODE ret = c->aiostat;
4481 c->aiostat = ENGINE_SUCCESS;
4482 c->ewouldblock = false0;
4483
4484 if (ret == ENGINE_SUCCESS) {
4485 char *key = (char*)packet + sizeof(req->bytes);
4486 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4487 uint64_t cas = ntohll(req->message.header.request.cas);
4488 uint16_t vbucket = ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
;
4489 uint64_t by_seqno = ntohll(req->message.body.by_seqno);
4490 uint64_t rev_seqno = ntohll(req->message.body.rev_seqno);
4491 uint16_t nmeta = ntohs(req->message.body.nmeta)((__uint16_t)(__builtin_constant_p(req->message.body.nmeta
) ? ((__uint16_t)((((__uint16_t)(req->message.body.nmeta) &
0xff00) >> 8) | (((__uint16_t)(req->message.body.nmeta
) & 0x00ff) << 8))) : _OSSwapInt16(req->message.
body.nmeta)))
;
4492
4493 ret = settings.engine.v1->upr.expiration(settings.engine.v0, c,
4494 req->message.header.request.opaque,
4495 key, nkey, cas, vbucket,
4496 by_seqno, rev_seqno, key + nkey, nmeta);
4497 }
4498
4499 switch (ret) {
4500 case ENGINE_SUCCESS:
4501 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4502 write_and_free(c, c->dynamic_buffer.buffer,
4503 c->dynamic_buffer.offset);
4504 c->dynamic_buffer.buffer = NULL((void*)0);
4505 } else {
4506 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4507 }
4508 break;
4509
4510 case ENGINE_DISCONNECT:
4511 conn_set_state(c, conn_closing);
4512 break;
4513
4514 case ENGINE_EWOULDBLOCK:
4515 c->ewouldblock = true1;
4516 break;
4517
4518 default:
4519 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4520 }
4521 }
4522}
4523
4524static void upr_flush_executor(conn *c, void *packet)
4525{
4526 protocol_binary_request_upr_flush *req = (void*)packet;
4527
4528 if (settings.engine.v1->upr.flush == NULL((void*)0)) {
4529 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4530 } else {
4531 ENGINE_ERROR_CODE ret = c->aiostat;
4532 c->aiostat = ENGINE_SUCCESS;
4533 c->ewouldblock = false0;
4534
4535 if (ret == ENGINE_SUCCESS) {
4536 ret = settings.engine.v1->upr.flush(settings.engine.v0, c,
4537 req->message.header.request.opaque,
4538 ntohs(req->message.header.request.vbucket)((__uint16_t)(__builtin_constant_p(req->message.header.request
.vbucket) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.vbucket) & 0xff00) >> 8) | (((__uint16_t)(
req->message.header.request.vbucket) & 0x00ff) <<
8))) : _OSSwapInt16(req->message.header.request.vbucket))
)
);
4539 }
4540
4541 switch (ret) {
4542 case ENGINE_SUCCESS:
4543 if (c->dynamic_buffer.buffer != NULL((void*)0)) {
4544 write_and_free(c, c->dynamic_buffer.buffer,
4545 c->dynamic_buffer.offset);
4546 c->dynamic_buffer.buffer = NULL((void*)0);
4547 } else {
4548 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4549 }
4550 break;
4551
4552 case ENGINE_DISCONNECT:
4553 conn_set_state(c, conn_closing);
4554 break;
4555
4556 case ENGINE_EWOULDBLOCK:
4557 c->ewouldblock = true1;
4558 break;
4559
4560 default:
4561 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4562 }
4563 }
4564}
4565
4566static void upr_set_vbucket_state_executor(conn *c, void *packet)
4567{
4568 protocol_binary_request_upr_set_vbucket_state *req = (void*)packet;
4569
4570 if (settings.engine.v1->upr.set_vbucket_state== NULL((void*)0)) {
4571 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4572 } else {
4573 ENGINE_ERROR_CODE ret = c->aiostat;
4574 c->aiostat = ENGINE_SUCCESS;
4575 c->ewouldblock = false0;
4576
4577 if (ret == ENGINE_SUCCESS) {
4578 vbucket_state_t state = (vbucket_state_t)req->message.body.state;
4579 ret = settings.engine.v1->upr.set_vbucket_state(settings.engine.v0, c,
4580 c->binary_header.request.opaque,
4581 c->binary_header.request.vbucket,
4582 state);
4583 }
4584
4585 switch (ret) {
4586 case ENGINE_SUCCESS:
4587 conn_set_state(c, conn_ship_log);
4588 break;
4589 case ENGINE_DISCONNECT:
4590 conn_set_state(c, conn_closing);
4591 break;
4592
4593 case ENGINE_EWOULDBLOCK:
4594 c->ewouldblock = true1;
4595 break;
4596
4597 default:
4598 conn_set_state(c, conn_closing);
4599 break;
4600 }
4601 }
4602}
4603
4604static void upr_noop_executor(conn *c, void *packet)
4605{
4606 if (settings.engine.v1->upr.noop == NULL((void*)0)) {
4607 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4608 } else {
4609 ENGINE_ERROR_CODE ret = c->aiostat;
4610 c->aiostat = ENGINE_SUCCESS;
4611 c->ewouldblock = false0;
4612
4613 if (ret == ENGINE_SUCCESS) {
4614 ret = settings.engine.v1->upr.noop(settings.engine.v0, c,
4615 c->binary_header.request.opaque);
4616 }
4617
4618 switch (ret) {
4619 case ENGINE_SUCCESS:
4620 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4621 break;
4622
4623 case ENGINE_DISCONNECT:
4624 conn_set_state(c, conn_closing);
4625 break;
4626
4627 case ENGINE_EWOULDBLOCK:
4628 c->ewouldblock = true1;
4629 break;
4630
4631 default:
4632 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4633 }
4634 }
4635}
4636
4637static void upr_buffer_acknowledgement_executor(conn *c, void *packet)
4638{
4639 protocol_binary_request_upr_buffer_acknowledgement *req = (void*)packet;
4640
4641 if (settings.engine.v1->upr.buffer_acknowledgement == NULL((void*)0)) {
4642 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4643 } else {
4644 ENGINE_ERROR_CODE ret = c->aiostat;
4645 c->aiostat = ENGINE_SUCCESS;
4646 c->ewouldblock = false0;
4647
4648 if (ret == ENGINE_SUCCESS) {
4649 uint32_t bbytes;
4650 memcpy(&bbytes, &req->message.body.buffer_bytes, 4)__builtin___memcpy_chk (&bbytes, &req->message.body
.buffer_bytes, 4, __builtin_object_size (&bbytes, 0))
;
4651 ret = settings.engine.v1->upr.buffer_acknowledgement(settings.engine.v0, c,
4652 c->binary_header.request.opaque,
4653 c->binary_header.request.vbucket,
4654 ntohl(bbytes)(__builtin_constant_p(bbytes) ? ((__uint32_t)((((__uint32_t)(
bbytes) & 0xff000000) >> 24) | (((__uint32_t)(bbytes
) & 0x00ff0000) >> 8) | (((__uint32_t)(bbytes) &
0x0000ff00) << 8) | (((__uint32_t)(bbytes) & 0x000000ff
) << 24))) : _OSSwapInt32(bbytes))
);
4655 }
4656
4657 switch (ret) {
4658 case ENGINE_SUCCESS:
4659 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4660 break;
4661
4662 case ENGINE_DISCONNECT:
4663 conn_set_state(c, conn_closing);
4664 break;
4665
4666 case ENGINE_EWOULDBLOCK:
4667 c->ewouldblock = true1;
4668 break;
4669
4670 default:
4671 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4672 }
4673 }
4674}
4675
4676static void upr_control_executor(conn *c, void *packet)
4677{
4678 if (settings.engine.v1->upr.control == NULL((void*)0)) {
4679 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4680 } else {
4681 ENGINE_ERROR_CODE ret = c->aiostat;
4682 c->aiostat = ENGINE_SUCCESS;
4683 c->ewouldblock = false0;
4684
4685 if (ret == ENGINE_SUCCESS) {
4686 protocol_binary_request_upr_control *req = (void*)packet;
4687 const uint8_t *key = req->bytes + sizeof(req->bytes);
4688 uint16_t nkey = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4689 const uint8_t *value = key + nkey;
4690 uint32_t nvalue = ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- nkey;
4691 ret = settings.engine.v1->upr.control(settings.engine.v0, c,
4692 c->binary_header.request.opaque,
4693 key, nkey, value, nvalue);
4694 }
4695
4696 switch (ret) {
4697 case ENGINE_SUCCESS:
4698 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4699 break;
4700
4701 case ENGINE_DISCONNECT:
4702 conn_set_state(c, conn_closing);
4703 break;
4704
4705 case ENGINE_EWOULDBLOCK:
4706 c->ewouldblock = true1;
4707 break;
4708
4709 default:
4710 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4711 }
4712 }
4713}
4714
4715static void isasl_refresh_executor(conn *c, void *packet)
4716{
4717 ENGINE_ERROR_CODE ret = c->aiostat;
4718 c->aiostat = ENGINE_SUCCESS;
4719 c->ewouldblock = false0;
4720
4721 if (ret == ENGINE_SUCCESS) {
4722 ret = refresh_cbsasl(c);
4723 }
4724
4725 switch (ret) {
4726 case ENGINE_SUCCESS:
4727 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4728 break;
4729 case ENGINE_EWOULDBLOCK:
4730 c->ewouldblock = true1;
4731 conn_set_state(c, conn_refresh_cbsasl);
4732 break;
4733 case ENGINE_DISCONNECT:
4734 conn_set_state(c, conn_closing);
4735 break;
4736 default:
4737 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4738 }
4739}
4740
4741static void ssl_certs_refresh_executor(conn *c, void *packet)
4742{
4743 ENGINE_ERROR_CODE ret = c->aiostat;
4744 c->aiostat = ENGINE_SUCCESS;
4745 c->ewouldblock = false0;
4746
4747 if (ret == ENGINE_SUCCESS) {
4748 ret = refresh_ssl_certs(c);
4749 }
4750
4751 switch (ret) {
4752 case ENGINE_SUCCESS:
4753 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4754 break;
4755 case ENGINE_EWOULDBLOCK:
4756 c->ewouldblock = true1;
4757 conn_set_state(c, conn_refresh_ssl_certs);
4758 break;
4759 case ENGINE_DISCONNECT:
4760 conn_set_state(c, conn_closing);
4761 break;
4762 default:
4763 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
4764 }
4765}
4766
4767static void verbosity_executor(conn *c, void *packet)
4768{
4769 protocol_binary_request_verbosity *req = packet;
4770 uint32_t level = (uint32_t)ntohl(req->message.body.level)(__builtin_constant_p(req->message.body.level) ? ((__uint32_t
)((((__uint32_t)(req->message.body.level) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.level) &
0x00ff0000) >> 8) | (((__uint32_t)(req->message.body
.level) & 0x0000ff00) << 8) | (((__uint32_t)(req->
message.body.level) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.body.level))
;
4771 if (level > MAX_VERBOSITY_LEVEL2) {
4772 level = MAX_VERBOSITY_LEVEL2;
4773 }
4774 settings.verbose = (int)level;
4775 perform_callbacks(ON_LOG_LEVEL, NULL((void*)0), NULL((void*)0));
4776 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4777}
4778
4779static void process_hello_packet_executor(conn *c, void *packet) {
4780 protocol_binary_request_hello *req = packet;
4781 char log_buffer[512];
4782 int offset = snprintf(log_buffer, sizeof(log_buffer), "HELO ")__builtin___snprintf_chk (log_buffer, sizeof(log_buffer), 0, __builtin_object_size
(log_buffer, 2 > 1 ? 1 : 0), "HELO ")
;
4783 char *key = (char*)packet + sizeof(*req);
4784 uint16_t klen = ntohs(req->message.header.request.keylen)((__uint16_t)(__builtin_constant_p(req->message.header.request
.keylen) ? ((__uint16_t)((((__uint16_t)(req->message.header
.request.keylen) & 0xff00) >> 8) | (((__uint16_t)(req
->message.header.request.keylen) & 0x00ff) << 8)
)) : _OSSwapInt16(req->message.header.request.keylen)))
;
4785 uint32_t total = (ntohl(req->message.header.request.bodylen)(__builtin_constant_p(req->message.header.request.bodylen)
? ((__uint32_t)((((__uint32_t)(req->message.header.request
.bodylen) & 0xff000000) >> 24) | (((__uint32_t)(req
->message.header.request.bodylen) & 0x00ff0000) >>
8) | (((__uint32_t)(req->message.header.request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->message.header
.request.bodylen) & 0x000000ff) << 24))) : _OSSwapInt32
(req->message.header.request.bodylen))
- klen) / 2;
4786 uint32_t ii;
4787 char *curr = key + klen;
4788 uint16_t out[2]; /* We're currently only supporting two features */
4789 int jj = 0;
4790#if 0
4791 int added_tls = 0;
4792#endif
4793 memset((char*)out, 0, sizeof(out))__builtin___memset_chk ((char*)out, 0, sizeof(out), __builtin_object_size
((char*)out, 0))
;
4794
4795 /*
4796 * Disable all features the hello packet may enable, so that
4797 * the client can toggle features on/off during a connection
4798 */
4799 c->supports_datatype = false0;
4800
4801 if (klen) {
4802 if (klen > 256) {
4803 klen = 256;
4804 }
4805 log_buffer[offset++] = '[';
4806 memcpy(log_buffer + offset, key, klen)__builtin___memcpy_chk (log_buffer + offset, key, klen, __builtin_object_size
(log_buffer + offset, 0))
;
4807 offset += klen;
4808 log_buffer[offset++] = ']';
4809 log_buffer[offset++] = ' ';
4810 }
4811
4812 for (ii = 0; ii < total; ++ii) {
4813 uint16_t in;
4814 /* to avoid alignment */
4815 memcpy(&in, curr, 2)__builtin___memcpy_chk (&in, curr, 2, __builtin_object_size
(&in, 0))
;
4816 curr += 2;
4817 switch (ntohs(in)((__uint16_t)(__builtin_constant_p(in) ? ((__uint16_t)((((__uint16_t
)(in) & 0xff00) >> 8) | (((__uint16_t)(in) & 0x00ff
) << 8))) : _OSSwapInt16(in)))
) {
4818 case PROTOCOL_BINARY_FEATURE_TLS:
4819#if 0
4820 /* Not implemented */
4821 if (added_tls == 0) {
4822 out[jj++] = htons(PROTOCOL_BINARY_FEATURE_TLS)((__uint16_t)(__builtin_constant_p(PROTOCOL_BINARY_FEATURE_TLS
) ? ((__uint16_t)((((__uint16_t)(PROTOCOL_BINARY_FEATURE_TLS)
& 0xff00) >> 8) | (((__uint16_t)(PROTOCOL_BINARY_FEATURE_TLS
) & 0x00ff) << 8))) : _OSSwapInt16(PROTOCOL_BINARY_FEATURE_TLS
)))
;
4823 added_sls++;
4824 }
4825 break;
4826#endif
4827 case PROTOCOL_BINARY_FEATURE_DATATYPE:
4828 if (!c->supports_datatype) {
4829 offset += snprintf(log_buffer + offset,__builtin___snprintf_chk (log_buffer + offset, sizeof(log_buffer
) - offset, 0, __builtin_object_size (log_buffer + offset, 2 >
1 ? 1 : 0), "datatype ")
4830 sizeof(log_buffer) - offset,__builtin___snprintf_chk (log_buffer + offset, sizeof(log_buffer
) - offset, 0, __builtin_object_size (log_buffer + offset, 2 >
1 ? 1 : 0), "datatype ")
4831 "datatype ")__builtin___snprintf_chk (log_buffer + offset, sizeof(log_buffer
) - offset, 0, __builtin_object_size (log_buffer + offset, 2 >
1 ? 1 : 0), "datatype ")
;
4832 out[jj++] = htons(PROTOCOL_BINARY_FEATURE_DATATYPE)((__uint16_t)(__builtin_constant_p(PROTOCOL_BINARY_FEATURE_DATATYPE
) ? ((__uint16_t)((((__uint16_t)(PROTOCOL_BINARY_FEATURE_DATATYPE
) & 0xff00) >> 8) | (((__uint16_t)(PROTOCOL_BINARY_FEATURE_DATATYPE
) & 0x00ff) << 8))) : _OSSwapInt16(PROTOCOL_BINARY_FEATURE_DATATYPE
)))
;
4833 c->supports_datatype = true1;
4834 }
4835 break;
4836 }
4837 }
4838
4839 if (jj == 0) {
4840 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_SUCCESS, 0);
4841 } else {
4842 binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, out, 2 * jj,
4843 PROTOCOL_BINARY_RAW_BYTES,
4844 PROTOCOL_BINARY_RESPONSE_SUCCESS,
4845 0, c);
4846 write_and_free(c, c->dynamic_buffer.buffer,
4847 c->dynamic_buffer.offset);
4848 c->dynamic_buffer.buffer = NULL((void*)0);
4849 }
4850
4851 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
4852 "%d: %s", c->sfd, log_buffer);
4853}
4854
4855static void version_executor(conn *c, void *packet)
4856{
4857 write_bin_response(c, get_server_version(), 0, 0,
4858 (uint32_t)strlen(get_server_version()));
4859}
4860
4861static void quit_executor(conn *c, void *packet)
4862{
4863 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4864 c->write_and_go = conn_closing;
4865}
4866
4867static void quitq_executor(conn *c, void *packet)
4868{
4869 conn_set_state(c, conn_closing);
4870}
4871
4872static void sasl_list_mech_executor(conn *c, void *packet)
4873{
4874 const char *result_string = NULL((void*)0);
4875 unsigned int string_length = 0;
4876
4877 if (cbsasl_list_mechs(&result_string, &string_length) != SASL_OK) {
4878 /* Perhaps there's a better error for this... */
4879 if (settings.verbose) {
4880 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
4881 "%d: Failed to list SASL mechanisms.\n",
4882 c->sfd);
4883 }
4884 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0);
4885 return;
4886 }
4887 write_bin_response(c, (char*)result_string, 0, 0, string_length);
4888}
4889
4890static void noop_executor(conn *c, void *packet)
4891{
4892 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4893}
4894
4895static void flush_executor(conn *c, void *packet)
4896{
4897 ENGINE_ERROR_CODE ret;
4898 time_t exptime = 0;
4899 protocol_binary_request_flush* req = packet;
4900
4901 if (c->cmd == PROTOCOL_BINARY_CMD_FLUSHQ) {
4902 c->noreply = true1;
4903 }
4904
4905 if (c->binary_header.request.extlen == sizeof(req->message.body)) {
4906 exptime = ntohl(req->message.body.expiration)(__builtin_constant_p(req->message.body.expiration) ? ((__uint32_t
)((((__uint32_t)(req->message.body.expiration) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.expiration
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.expiration) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.expiration) & 0x000000ff) <<
24))) : _OSSwapInt32(req->message.body.expiration))
;
4907 }
4908
4909 if (settings.verbose > 1) {
4910 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
4911 "%d: flush %ld", c->sfd,
4912 (long)exptime);
4913 }
4914
4915 ret = settings.engine.v1->flush(settings.engine.v0, c, exptime);
4916
4917 if (ret == ENGINE_SUCCESS) {
4918 write_bin_response(c, NULL((void*)0), 0, 0, 0);
4919 } else if (ret == ENGINE_ENOTSUP) {
4920 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4921 } else {
4922 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
4923 }
4924 STATS_NOKEY(c, cmd_flush){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->cmd_flush++; cb_mutex_exit
(&thread_stats->mutex); }
;
4925}
4926
4927static void get_executor(conn *c, void *packet)
4928{
4929 switch (c->cmd) {
4930 case PROTOCOL_BINARY_CMD_GETQ:
4931 c->cmd = PROTOCOL_BINARY_CMD_GET;
4932 c->noreply = true1;
4933 break;
4934 case PROTOCOL_BINARY_CMD_GET:
4935 c->noreply = false0;
4936 break;
4937 case PROTOCOL_BINARY_CMD_GETKQ:
4938 c->cmd = PROTOCOL_BINARY_CMD_GETK;
4939 c->noreply = true1;
4940 break;
4941 case PROTOCOL_BINARY_CMD_GETK:
4942 c->noreply = false0;
4943 break;
4944 default:
4945 abort();
4946 }
4947
4948 process_bin_get(c);
4949}
4950
4951static void process_bin_delete(conn *c);
4952static void delete_executor(conn *c, void *packet)
4953{
4954 if (c->cmd == PROTOCOL_BINARY_CMD_DELETEQ) {
4955 c->noreply = true1;
4956 }
4957
4958 process_bin_delete(c);
4959}
4960
4961static void stat_executor(conn *c, void *packet)
4962{
4963 char *subcommand = binary_get_key(c);
4964 size_t nkey = c->binary_header.request.keylen;
4965 ENGINE_ERROR_CODE ret;
4966
4967 if (settings.verbose > 1) {
4968 char buffer[1024];
4969 if (key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
4970 "STATS", subcommand, nkey) != -1) {
4971 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s\n",
4972 buffer);
4973 }
4974 }
4975
4976 ret = c->aiostat;
4977 c->aiostat = ENGINE_SUCCESS;
4978 c->ewouldblock = false0;
4979
4980 if (ret == ENGINE_SUCCESS) {
4981 if (nkey == 0) {
4982 /* request all statistics */
4983 ret = settings.engine.v1->get_stats(settings.engine.v0, c, NULL((void*)0), 0, append_stats);
4984 if (ret == ENGINE_SUCCESS) {
4985 server_stats(&append_stats, c, false0);
4986 }
4987 } else if (strncmp(subcommand, "reset", 5) == 0) {
4988 stats_reset(c);
4989 settings.engine.v1->reset_stats(settings.engine.v0, c);
4990 } else if (strncmp(subcommand, "settings", 8) == 0) {
4991 process_stat_settings(&append_stats, c);
4992 } else if (strncmp(subcommand, "cachedump", 9) == 0) {
4993 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
4994 return;
4995 } else if (strncmp(subcommand, "detail", 6) == 0) {
4996 char *subcmd_pos = subcommand + 6;
4997 if (settings.allow_detailed) {
4998 if (strncmp(subcmd_pos, " dump", 5) == 0) {
4999 int len;
5000 char *dump_buf = stats_prefix_dump(&len);
5001 if (dump_buf == NULL((void*)0) || len <= 0) {
5002 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
5003 return ;
5004 } else {
5005 append_stats("detailed", (uint16_t)strlen("detailed"), dump_buf, len, c);
5006 free(dump_buf);
5007 }
5008 } else if (strncmp(subcmd_pos, " on", 3) == 0) {
5009 settings.detail_enabled = 1;
5010 } else if (strncmp(subcmd_pos, " off", 4) == 0) {
5011 settings.detail_enabled = 0;
5012 } else {
5013 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
5014 return;
5015 }
5016 } else {
5017 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
5018 return;
5019 }
5020 } else if (strncmp(subcommand, "aggregate", 9) == 0) {
5021 server_stats(&append_stats, c, true1);
5022 } else if (strncmp(subcommand, "connections", 11) == 0) {
5023 connection_stats(&append_stats, c);
5024 } else {
5025 ret = settings.engine.v1->get_stats(settings.engine.v0, c,
5026 subcommand, (int)nkey,
5027 append_stats);
5028 }
5029 }
5030
5031 switch (ret) {
5032 case ENGINE_SUCCESS:
5033 append_stats(NULL((void*)0), 0, NULL((void*)0), 0, c);
5034 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
5035 c->dynamic_buffer.buffer = NULL((void*)0);
5036 break;
5037 case ENGINE_ENOMEM:
5038 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
5039 break;
5040 case ENGINE_TMPFAIL:
5041 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ETMPFAIL, 0);
5042 break;
5043 case ENGINE_KEY_ENOENT:
5044 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
5045 break;
5046 case ENGINE_NOT_MY_VBUCKET:
5047 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, 0);
5048 break;
5049 case ENGINE_DISCONNECT:
5050 c->state = conn_closing;
5051 break;
5052 case ENGINE_ENOTSUP:
5053 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
5054 break;
5055 case ENGINE_EWOULDBLOCK:
5056 c->ewouldblock = true1;
5057 break;
5058 default:
5059 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5060 }
5061}
5062
5063static void arithmetic_executor(conn *c, void *packet)
5064{
5065 protocol_binary_response_incr* rsp = (protocol_binary_response_incr*)c->wbuf;
5066 protocol_binary_request_incr* req = binary_get_request(c);
5067 ENGINE_ERROR_CODE ret;
5068 uint64_t delta;
5069 uint64_t initial;
5070 rel_time_t expiration;
5071 char *key;
5072 size_t nkey;
5073 bool_Bool incr;
5074
5075 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5075, "c != ((void*)0)") : (void)0)
;
5076 cb_assert(c->wsize >= sizeof(*rsp))(__builtin_expect(!(c->wsize >= sizeof(*rsp)), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5076, "c->wsize >= sizeof(*rsp)") : (void)0)
;
5077
5078
5079 switch (c->cmd) {
5080 case PROTOCOL_BINARY_CMD_INCREMENTQ:
5081 c->cmd = PROTOCOL_BINARY_CMD_INCREMENT;
5082 c->noreply = true1;
5083 break;
5084 case PROTOCOL_BINARY_CMD_INCREMENT:
5085 c->noreply = false0;
5086 break;
5087 case PROTOCOL_BINARY_CMD_DECREMENTQ:
5088 c->cmd = PROTOCOL_BINARY_CMD_DECREMENT;
5089 c->noreply = true1;
5090 break;
5091 case PROTOCOL_BINARY_CMD_DECREMENT:
5092 c->noreply = false0;
5093 break;
5094 default:
5095 abort();
5096 }
5097
5098 if (req->message.header.request.cas != 0) {
5099 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5100 return;
5101 }
5102
5103 /* fix byteorder in the request */
5104 delta = ntohll(req->message.body.delta);
5105 initial = ntohll(req->message.body.initial);
5106 expiration = ntohl(req->message.body.expiration)(__builtin_constant_p(req->message.body.expiration) ? ((__uint32_t
)((((__uint32_t)(req->message.body.expiration) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.expiration
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.expiration) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.expiration) & 0x000000ff) <<
24))) : _OSSwapInt32(req->message.body.expiration))
;
5107 key = binary_get_key(c);
5108 nkey = c->binary_header.request.keylen;
5109 incr = (c->cmd == PROTOCOL_BINARY_CMD_INCREMENT ||
5110 c->cmd == PROTOCOL_BINARY_CMD_INCREMENTQ);
5111
5112 if (settings.verbose > 1) {
5113 char buffer[1024];
5114 ssize_t nw;
5115 nw = key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
5116 incr ? "INCR" : "DECR", key, nkey);
5117 if (nw != -1) {
5118 if (snprintf(buffer + nw, sizeof(buffer) - nw,__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " %"
"ll" "u" ", %" "ll" "u" ", %" "ll" "u" "\n", delta, initial,
(uint64_t)expiration)
5119 " %" PRIu64 ", %" PRIu64 ", %" PRIu64 "\n",__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " %"
"ll" "u" ", %" "ll" "u" ", %" "ll" "u" "\n", delta, initial,
(uint64_t)expiration)
5120 delta, initial, (uint64_t)expiration)__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " %"
"ll" "u" ", %" "ll" "u" ", %" "ll" "u" "\n", delta, initial,
(uint64_t)expiration)
!= -1) {
5121 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s",
5122 buffer);
5123 }
5124 }
5125 }
5126
5127 ret = c->aiostat;
5128 c->aiostat = ENGINE_SUCCESS;
5129 if (ret == ENGINE_SUCCESS) {
5130 ret = settings.engine.v1->arithmetic(settings.engine.v0,
5131 c, key, (int)nkey, incr,
5132 req->message.body.expiration != 0xffffffff,
5133 delta, initial, expiration,
5134 &c->cas,
5135 c->binary_header.request.datatype,
5136 &rsp->message.body.value,
5137 c->binary_header.request.vbucket);
5138 }
5139
5140 switch (ret) {
5141 case ENGINE_SUCCESS:
5142 rsp->message.body.value = htonll(rsp->message.body.value);
5143 write_bin_response(c, &rsp->message.body, 0, 0,
5144 sizeof (rsp->message.body.value));
5145 if (incr) {
5146 STATS_INCR(c, incr_hits, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->incr_hits++;;
cb_mutex_exit(&thread_stats->mutex); }
;
5147 } else {
5148 STATS_INCR(c, decr_hits, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->decr_hits++;;
cb_mutex_exit(&thread_stats->mutex); }
;
5149 }
5150 break;
5151 case ENGINE_KEY_EEXISTS:
5152 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, 0);
5153 break;
5154 case ENGINE_KEY_ENOENT:
5155 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
5156 if (c->cmd == PROTOCOL_BINARY_CMD_INCREMENT) {
5157 STATS_INCR(c, incr_misses, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->incr_misses++
;; cb_mutex_exit(&thread_stats->mutex); }
;
5158 } else {
5159 STATS_INCR(c, decr_misses, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->decr_misses++
;; cb_mutex_exit(&thread_stats->mutex); }
;
5160 }
5161 break;
5162 case ENGINE_ENOMEM:
5163 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0);
5164 break;
5165 case ENGINE_TMPFAIL:
5166 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ETMPFAIL, 0);
5167 break;
5168 case ENGINE_EINVAL:
5169 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL, 0);
5170 break;
5171 case ENGINE_NOT_STORED:
5172 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_STORED, 0);
5173 break;
5174 case ENGINE_DISCONNECT:
5175 c->state = conn_closing;
5176 break;
5177 case ENGINE_ENOTSUP:
5178 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
5179 break;
5180 case ENGINE_NOT_MY_VBUCKET:
5181 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, 0);
5182 break;
5183 case ENGINE_EWOULDBLOCK:
5184 c->ewouldblock = true1;
5185 break;
5186 default:
5187 abort();
5188 }
5189}
5190
5191static void get_cmd_timer_executor(conn *c, void *packet)
5192{
5193 protocol_binary_request_get_cmd_timer *req = packet;
5194
5195 generate_timings(req->message.body.opcode, c);
5196 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
5197 c->dynamic_buffer.buffer = NULL((void*)0);
5198}
5199
5200static void set_ctrl_token_executor(conn *c, void *packet)
5201{
5202 protocol_binary_request_set_ctrl_token *req = packet;
5203
5204 uint64_t old_cas = ntohll(req->message.header.request.cas);
5205
5206 uint16_t ret = PROTOCOL_BINARY_RESPONSE_SUCCESS;
5207 cb_mutex_enter(&(session_cas.mutex));
5208 if (session_cas.ctr > 0) {
5209 ret = PROTOCOL_BINARY_RESPONSE_EBUSY;
5210 } else {
5211 if (old_cas == session_cas.value) {
5212 session_cas.value = ntohll(req->message.body.new_cas);
5213 } else {
5214 ret = PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS;
5215 }
5216 }
5217
5218 binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, NULL((void*)0), 0,
5219 PROTOCOL_BINARY_RAW_BYTES,
5220 ret, session_cas.value, c);
5221 cb_mutex_exit(&(session_cas.mutex));
5222
5223 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
5224 c->dynamic_buffer.buffer = NULL((void*)0);
5225}
5226
5227static void get_ctrl_token_executor(conn *c, void *packet)
5228{
5229 cb_mutex_enter(&(session_cas.mutex));
5230 binary_response_handler(NULL((void*)0), 0, NULL((void*)0), 0, NULL((void*)0), 0,
5231 PROTOCOL_BINARY_RAW_BYTES,
5232 PROTOCOL_BINARY_RESPONSE_SUCCESS,
5233 session_cas.value, c);
5234 cb_mutex_exit(&(session_cas.mutex));
5235 write_and_free(c, c->dynamic_buffer.buffer, c->dynamic_buffer.offset);
5236 c->dynamic_buffer.buffer = NULL((void*)0);
5237}
5238
5239static void not_supported_executor(conn *c, void *packet)
5240{
5241 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_SUPPORTED, 0);
5242}
5243
5244
5245typedef int (*bin_package_validate)(void *packet);
5246typedef void (*bin_package_execute)(conn *c, void *packet);
5247
5248bin_package_validate validators[0xff];
5249bin_package_execute executors[0xff];
5250
5251static void setup_bin_packet_handlers(void) {
5252 validators[PROTOCOL_BINARY_CMD_UPR_OPEN] = upr_open_validator;
5253 validators[PROTOCOL_BINARY_CMD_UPR_ADD_STREAM] = upr_add_stream_validator;
5254 validators[PROTOCOL_BINARY_CMD_UPR_CLOSE_STREAM] = upr_close_stream_validator;
5255 validators[PROTOCOL_BINARY_CMD_UPR_SNAPSHOT_MARKER] = upr_snapshot_marker_validator;
5256 validators[PROTOCOL_BINARY_CMD_UPR_DELETION] = upr_deletion_validator;
5257 validators[PROTOCOL_BINARY_CMD_UPR_EXPIRATION] = upr_expiration_validator;
5258 validators[PROTOCOL_BINARY_CMD_UPR_FLUSH] = upr_flush_validator;
5259 validators[PROTOCOL_BINARY_CMD_UPR_GET_FAILOVER_LOG] = upr_get_failover_log_validator;
5260 validators[PROTOCOL_BINARY_CMD_UPR_MUTATION] = upr_mutation_validator;
5261 validators[PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE] = upr_set_vbucket_state_validator;
5262 validators[PROTOCOL_BINARY_CMD_UPR_NOOP] = upr_noop_validator;
5263 validators[PROTOCOL_BINARY_CMD_UPR_BUFFER_ACKNOWLEDGEMENT] = upr_buffer_acknowledgement_validator;
5264 validators[PROTOCOL_BINARY_CMD_UPR_CONTROL] = upr_control_validator;
5265 validators[PROTOCOL_BINARY_CMD_UPR_STREAM_END] = upr_stream_end_validator;
5266 validators[PROTOCOL_BINARY_CMD_UPR_STREAM_REQ] = upr_stream_req_validator;
5267 validators[PROTOCOL_BINARY_CMD_ISASL_REFRESH] = isasl_refresh_validator;
5268 validators[PROTOCOL_BINARY_CMD_SSL_CERTS_REFRESH] = ssl_certs_refresh_validator;
5269 validators[PROTOCOL_BINARY_CMD_VERBOSITY] = verbosity_validator;
5270 validators[PROTOCOL_BINARY_CMD_HELLO] = hello_validator;
5271 validators[PROTOCOL_BINARY_CMD_VERSION] = version_validator;
5272 validators[PROTOCOL_BINARY_CMD_QUIT] = quit_validator;
5273 validators[PROTOCOL_BINARY_CMD_QUITQ] = quit_validator;
5274 validators[PROTOCOL_BINARY_CMD_SASL_LIST_MECHS] = sasl_list_mech_validator;
5275 validators[PROTOCOL_BINARY_CMD_NOOP] = noop_validator;
5276 validators[PROTOCOL_BINARY_CMD_FLUSH] = flush_validator;
5277 validators[PROTOCOL_BINARY_CMD_FLUSHQ] = flush_validator;
5278 validators[PROTOCOL_BINARY_CMD_GET] = get_validator;
5279 validators[PROTOCOL_BINARY_CMD_GETQ] = get_validator;
5280 validators[PROTOCOL_BINARY_CMD_GETK] = get_validator;
5281 validators[PROTOCOL_BINARY_CMD_GETKQ] = get_validator;
5282 validators[PROTOCOL_BINARY_CMD_DELETE] = delete_validator;
5283 validators[PROTOCOL_BINARY_CMD_DELETEQ] = delete_validator;
5284 validators[PROTOCOL_BINARY_CMD_STAT] = stat_validator;
5285 validators[PROTOCOL_BINARY_CMD_INCREMENT] = arithmetic_validator;
5286 validators[PROTOCOL_BINARY_CMD_INCREMENTQ] = arithmetic_validator;
5287 validators[PROTOCOL_BINARY_CMD_DECREMENT] = arithmetic_validator;
5288 validators[PROTOCOL_BINARY_CMD_DECREMENTQ] = arithmetic_validator;
5289 validators[PROTOCOL_BINARY_CMD_GET_CMD_TIMER] = get_cmd_timer_validator;
5290 validators[PROTOCOL_BINARY_CMD_SET_CTRL_TOKEN] = set_ctrl_token_validator;
5291 validators[PROTOCOL_BINARY_CMD_GET_CTRL_TOKEN] = get_ctrl_token_validator;
5292
5293 executors[PROTOCOL_BINARY_CMD_UPR_OPEN] = upr_open_executor;
5294 executors[PROTOCOL_BINARY_CMD_UPR_ADD_STREAM] = upr_add_stream_executor;
5295 executors[PROTOCOL_BINARY_CMD_UPR_CLOSE_STREAM] = upr_close_stream_executor;
5296 executors[PROTOCOL_BINARY_CMD_UPR_SNAPSHOT_MARKER] = upr_snapshot_marker_executor;
5297 executors[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END] = tap_checkpoint_end_executor;
5298 executors[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START] = tap_checkpoint_start_executor;
5299 executors[PROTOCOL_BINARY_CMD_TAP_CONNECT] = tap_connect_executor;
5300 executors[PROTOCOL_BINARY_CMD_TAP_DELETE] = tap_delete_executor;
5301 executors[PROTOCOL_BINARY_CMD_TAP_FLUSH] = tap_flush_executor;
5302 executors[PROTOCOL_BINARY_CMD_TAP_MUTATION] = tap_mutation_executor;
5303 executors[PROTOCOL_BINARY_CMD_TAP_OPAQUE] = tap_opaque_executor;
5304 executors[PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET] = tap_vbucket_set_executor;
5305 executors[PROTOCOL_BINARY_CMD_UPR_DELETION] = upr_deletion_executor;
5306 executors[PROTOCOL_BINARY_CMD_UPR_EXPIRATION] = upr_expiration_executor;
5307 executors[PROTOCOL_BINARY_CMD_UPR_FLUSH] = upr_flush_executor;
5308 executors[PROTOCOL_BINARY_CMD_UPR_GET_FAILOVER_LOG] = upr_get_failover_log_executor;
5309 executors[PROTOCOL_BINARY_CMD_UPR_MUTATION] = upr_mutation_executor;
5310 executors[PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE] = upr_set_vbucket_state_executor;
5311 executors[PROTOCOL_BINARY_CMD_UPR_NOOP] = upr_noop_executor;
5312 executors[PROTOCOL_BINARY_CMD_UPR_BUFFER_ACKNOWLEDGEMENT] = upr_buffer_acknowledgement_executor;
5313 executors[PROTOCOL_BINARY_CMD_UPR_CONTROL] = upr_control_executor;
5314 executors[PROTOCOL_BINARY_CMD_UPR_STREAM_END] = upr_stream_end_executor;
5315 executors[PROTOCOL_BINARY_CMD_UPR_STREAM_REQ] = upr_stream_req_executor;
5316 executors[PROTOCOL_BINARY_CMD_ISASL_REFRESH] = isasl_refresh_executor;
5317 executors[PROTOCOL_BINARY_CMD_SSL_CERTS_REFRESH] = ssl_certs_refresh_executor;
5318 executors[PROTOCOL_BINARY_CMD_VERBOSITY] = verbosity_executor;
5319 executors[PROTOCOL_BINARY_CMD_HELLO] = process_hello_packet_executor;
5320 executors[PROTOCOL_BINARY_CMD_VERSION] = version_executor;
5321 executors[PROTOCOL_BINARY_CMD_QUIT] = quit_executor;
5322 executors[PROTOCOL_BINARY_CMD_QUITQ] = quitq_executor;
5323 executors[PROTOCOL_BINARY_CMD_SASL_LIST_MECHS] = sasl_list_mech_executor;
5324 executors[PROTOCOL_BINARY_CMD_NOOP] = noop_executor;
5325 executors[PROTOCOL_BINARY_CMD_FLUSH] = flush_executor;
5326 executors[PROTOCOL_BINARY_CMD_FLUSHQ] = flush_executor;
5327 executors[PROTOCOL_BINARY_CMD_GET] = get_executor;
5328 executors[PROTOCOL_BINARY_CMD_GETQ] = get_executor;
5329 executors[PROTOCOL_BINARY_CMD_GETK] = get_executor;
5330 executors[PROTOCOL_BINARY_CMD_GETKQ] = get_executor;
5331 executors[PROTOCOL_BINARY_CMD_DELETE] = delete_executor;
5332 executors[PROTOCOL_BINARY_CMD_DELETEQ] = delete_executor;
5333 executors[PROTOCOL_BINARY_CMD_STAT] = stat_executor;
5334 executors[PROTOCOL_BINARY_CMD_INCREMENT] = arithmetic_executor;
5335 executors[PROTOCOL_BINARY_CMD_INCREMENTQ] = arithmetic_executor;
5336 executors[PROTOCOL_BINARY_CMD_DECREMENT] = arithmetic_executor;
5337 executors[PROTOCOL_BINARY_CMD_DECREMENTQ] = arithmetic_executor;
5338 executors[PROTOCOL_BINARY_CMD_GET_CMD_TIMER] = get_cmd_timer_executor;
5339 executors[PROTOCOL_BINARY_CMD_SET_CTRL_TOKEN] = set_ctrl_token_executor;
5340 executors[PROTOCOL_BINARY_CMD_GET_CTRL_TOKEN] = get_ctrl_token_executor;
5341}
5342
5343static void setup_not_supported_handlers(void) {
5344 if (settings.engine.v1->get_tap_iterator == NULL((void*)0)) {
5345 executors[PROTOCOL_BINARY_CMD_TAP_CONNECT] = not_supported_executor;
5346 }
5347
5348 if (settings.engine.v1->tap_notify == NULL((void*)0)) {
5349 executors[PROTOCOL_BINARY_CMD_TAP_MUTATION] = not_supported_executor;
5350 executors[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START] = not_supported_executor;
5351 executors[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END] = not_supported_executor;
5352 executors[PROTOCOL_BINARY_CMD_TAP_DELETE] = not_supported_executor;
5353 executors[PROTOCOL_BINARY_CMD_TAP_FLUSH] = not_supported_executor;
5354 executors[PROTOCOL_BINARY_CMD_TAP_OPAQUE] = not_supported_executor;
5355 executors[PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET] = not_supported_executor;
5356 }
5357}
5358
5359static int invalid_datatype(conn *c) {
5360 switch (c->binary_header.request.datatype) {
5361 case PROTOCOL_BINARY_RAW_BYTES:
5362 return 0;
5363
5364 case PROTOCOL_BINARY_DATATYPE_JSON:
5365 case PROTOCOL_BINARY_DATATYPE_COMPRESSED:
5366 case PROTOCOL_BINARY_DATATYPE_COMPRESSED_JSON:
5367 if (c->supports_datatype) {
5368 return 0;
5369 }
5370 /* FALLTHROUGH */
5371 default:
5372 return 1;
5373 }
5374}
5375
5376static void process_bin_packet(conn *c) {
5377
5378 char *packet = (c->rcurr - (c->binary_header.request.bodylen +
5379 sizeof(c->binary_header)));
5380
5381 uint8_t opcode = c->binary_header.request.opcode;
5382
5383 bin_package_validate validator = validators[opcode];
5384 bin_package_execute executor = executors[opcode];
5385
5386 if (validator != NULL((void*)0) && validator(packet) != 0) {
5387 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5388 } else if (executor != NULL((void*)0)) {
5389 executor(c, packet);
5390 } else {
5391 process_bin_unknown_packet(c);
5392 }
5393}
5394
5395static void dispatch_bin_command(conn *c) {
5396 int protocol_error = 0;
5397
5398 int extlen = c->binary_header.request.extlen;
5399 uint16_t keylen = c->binary_header.request.keylen;
5400 uint32_t bodylen = c->binary_header.request.bodylen;
5401
5402 if (settings.require_sasl && !authenticated(c)) {
5403 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0);
5404 c->write_and_go = conn_closing;
5405 return;
5406 }
5407
5408 if (invalid_datatype(c)) {
5409 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5410 c->write_and_go = conn_closing;
5411 return;
5412 }
5413
5414 if (c->start == 0) {
5415 c->start = gethrtime();
5416 }
5417
5418 MEMCACHED_PROCESS_COMMAND_START(c->sfd, c->rcurr, c->rbytes);
5419
5420 /* binprot supports 16bit keys, but internals are still 8bit */
5421 if (keylen > KEY_MAX_LENGTH250) {
5422 handle_binary_protocol_error(c);
5423 return;
5424 }
5425
5426 if (executors[c->cmd] != NULL((void*)0)) {
5427 c->noreply = false0;
5428 bin_read_chunk(c, bin_reading_packet, c->binary_header.request.bodylen);
5429 return;
5430 }
5431
5432 c->noreply = true1;
5433
5434 switch (c->cmd) {
5435 case PROTOCOL_BINARY_CMD_SETQ:
5436 c->cmd = PROTOCOL_BINARY_CMD_SET;
5437 break;
5438 case PROTOCOL_BINARY_CMD_ADDQ:
5439 c->cmd = PROTOCOL_BINARY_CMD_ADD;
5440 break;
5441 case PROTOCOL_BINARY_CMD_REPLACEQ:
5442 c->cmd = PROTOCOL_BINARY_CMD_REPLACE;
5443 break;
5444 case PROTOCOL_BINARY_CMD_APPENDQ:
5445 c->cmd = PROTOCOL_BINARY_CMD_APPEND;
5446 break;
5447 case PROTOCOL_BINARY_CMD_PREPENDQ:
5448 c->cmd = PROTOCOL_BINARY_CMD_PREPEND;
5449 break;
5450 default:
5451 c->noreply = false0;
5452 }
5453
5454 switch (c->cmd) {
5455 case PROTOCOL_BINARY_CMD_SET: /* FALLTHROUGH */
5456 case PROTOCOL_BINARY_CMD_ADD: /* FALLTHROUGH */
5457 case PROTOCOL_BINARY_CMD_REPLACE:
5458 if (extlen == 8 && keylen != 0 && bodylen >= (uint32_t)(keylen + 8)) {
5459 bin_read_key(c, bin_reading_set_header, 8);
5460 } else {
5461 protocol_error = 1;
5462 }
5463
5464 break;
5465 case PROTOCOL_BINARY_CMD_APPEND:
5466 case PROTOCOL_BINARY_CMD_PREPEND:
5467 if (keylen > 0 && extlen == 0) {
5468 bin_read_key(c, bin_reading_set_header, 0);
5469 } else {
5470 protocol_error = 1;
5471 }
5472 break;
5473
5474 case PROTOCOL_BINARY_CMD_SASL_AUTH:
5475 case PROTOCOL_BINARY_CMD_SASL_STEP:
5476 if (extlen == 0 && keylen != 0) {
5477 bin_read_key(c, bin_reading_sasl_auth, 0);
5478 } else {
5479 protocol_error = 1;
5480 }
5481 break;
5482
5483 default:
5484 if (settings.engine.v1->unknown_command == NULL((void*)0)) {
5485 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND,
5486 bodylen);
5487 } else {
5488 bin_read_chunk(c, bin_reading_packet, c->binary_header.request.bodylen);
5489 }
5490 }
5491
5492 if (protocol_error) {
5493 handle_binary_protocol_error(c);
5494 }
5495}
5496
5497static void process_bin_update(conn *c) {
5498 char *key;
5499 uint16_t nkey;
5500 uint32_t vlen;
5501 item *it;
5502 protocol_binary_request_set* req = binary_get_request(c);
5503 ENGINE_ERROR_CODE ret;
5504 item_info_holder info;
5505 rel_time_t expiration;
5506
5507 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5507, "c != ((void*)0)") : (void)0)
;
5508 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
5509 info.info.nvalue = 1;
5510 key = binary_get_key(c);
5511 nkey = c->binary_header.request.keylen;
5512
5513 /* fix byteorder in the request */
5514 req->message.body.flags = req->message.body.flags;
5515 expiration = ntohl(req->message.body.expiration)(__builtin_constant_p(req->message.body.expiration) ? ((__uint32_t
)((((__uint32_t)(req->message.body.expiration) & 0xff000000
) >> 24) | (((__uint32_t)(req->message.body.expiration
) & 0x00ff0000) >> 8) | (((__uint32_t)(req->message
.body.expiration) & 0x0000ff00) << 8) | (((__uint32_t
)(req->message.body.expiration) & 0x000000ff) <<
24))) : _OSSwapInt32(req->message.body.expiration))
;
5516
5517 vlen = c->binary_header.request.bodylen - (nkey + c->binary_header.request.extlen);
5518
5519 if (settings.verbose > 1) {
5520 size_t nw;
5521 char buffer[1024];
5522 const char *prefix;
5523 if (c->cmd == PROTOCOL_BINARY_CMD_ADD) {
5524 prefix = "ADD";
5525 } else if (c->cmd == PROTOCOL_BINARY_CMD_SET) {
5526 prefix = "SET";
5527 } else {
5528 prefix = "REPLACE";
5529 }
5530
5531 nw = key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
5532 prefix, key, nkey);
5533
5534 if (nw != -1) {
5535 if (snprintf(buffer + nw, sizeof(buffer) - nw,__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " Value len is %d\n"
, vlen)
5536 " Value len is %d\n", vlen)__builtin___snprintf_chk (buffer + nw, sizeof(buffer) - nw, 0
, __builtin_object_size (buffer + nw, 2 > 1 ? 1 : 0), " Value len is %d\n"
, vlen)
) {
5537 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s",
5538 buffer);
5539 }
5540 }
5541 }
5542
5543 if (settings.detail_enabled) {
5544 stats_prefix_record_set(key, nkey);
5545 }
5546
5547 ret = c->aiostat;
5548 c->aiostat = ENGINE_SUCCESS;
5549 c->ewouldblock = false0;
5550
5551 if (ret == ENGINE_SUCCESS) {
5552 ret = settings.engine.v1->allocate(settings.engine.v0, c,
5553 &it, key, nkey,
5554 vlen,
5555 req->message.body.flags,
5556 expiration,
5557 c->binary_header.request.datatype);
5558 if (ret == ENGINE_SUCCESS && !settings.engine.v1->get_item_info(settings.engine.v0,
5559 c, it,
5560 (void*)&info)) {
5561 settings.engine.v1->release(settings.engine.v0, c, it);
5562 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
5563 return;
5564 }
5565 }
5566
5567 switch (ret) {
5568 case ENGINE_SUCCESS:
5569 item_set_cas(c, it, c->binary_header.request.cas);
5570
5571 switch (c->cmd) {
5572 case PROTOCOL_BINARY_CMD_ADD:
5573 c->store_op = OPERATION_ADD;
5574 break;
5575 case PROTOCOL_BINARY_CMD_SET:
5576 if (c->binary_header.request.cas != 0) {
5577 c->store_op = OPERATION_CAS;
5578 } else {
5579 c->store_op = OPERATION_SET;
5580 }
5581 break;
5582 case PROTOCOL_BINARY_CMD_REPLACE:
5583 if (c->binary_header.request.cas != 0) {
5584 c->store_op = OPERATION_CAS;
5585 } else {
5586 c->store_op = OPERATION_REPLACE;
5587 }
5588 break;
5589 default:
5590 cb_assert(0)(__builtin_expect(!(0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5590, "0") : (void)0)
;
5591 }
5592
5593 c->item = it;
5594 c->ritem = info.info.value[0].iov_base;
5595 c->rlbytes = vlen;
5596 conn_set_state(c, conn_nread);
5597 c->substate = bin_read_set_value;
5598 break;
5599 case ENGINE_EWOULDBLOCK:
5600 c->ewouldblock = true1;
5601 break;
5602 case ENGINE_DISCONNECT:
5603 c->state = conn_closing;
5604 break;
5605 default:
5606 if (ret == ENGINE_E2BIG) {
5607 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_E2BIG, vlen);
5608 } else {
5609 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, vlen);
5610 }
5611
5612 /* swallow the data line */
5613 c->write_and_go = conn_swallow;
5614 }
5615}
5616
5617static void process_bin_append_prepend(conn *c) {
5618 ENGINE_ERROR_CODE ret;
5619 char *key;
5620 int nkey;
5621 int vlen;
5622 item *it;
5623 item_info_holder info;
5624 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
5625 info.info.nvalue = 1;
5626
5627 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5627, "c != ((void*)0)") : (void)0)
;
5628
5629 key = binary_get_key(c);
5630 nkey = c->binary_header.request.keylen;
5631 vlen = c->binary_header.request.bodylen - nkey;
5632
5633 if (settings.verbose > 1) {
5634 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
5635 "Value len is %d\n", vlen);
5636 }
5637
5638 if (settings.detail_enabled) {
5639 stats_prefix_record_set(key, nkey);
5640 }
5641
5642 ret = c->aiostat;
5643 c->aiostat = ENGINE_SUCCESS;
5644 c->ewouldblock = false0;
5645
5646 if (ret == ENGINE_SUCCESS) {
5647 ret = settings.engine.v1->allocate(settings.engine.v0, c,
5648 &it, key, nkey,
5649 vlen, 0, 0,
5650 c->binary_header.request.datatype);
5651 if (ret == ENGINE_SUCCESS && !settings.engine.v1->get_item_info(settings.engine.v0,
5652 c, it,
5653 (void*)&info)) {
5654 settings.engine.v1->release(settings.engine.v0, c, it);
5655 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINTERNAL, 0);
5656 return;
5657 }
5658 }
5659
5660 switch (ret) {
5661 case ENGINE_SUCCESS:
5662 item_set_cas(c, it, c->binary_header.request.cas);
5663
5664 switch (c->cmd) {
5665 case PROTOCOL_BINARY_CMD_APPEND:
5666 c->store_op = OPERATION_APPEND;
5667 break;
5668 case PROTOCOL_BINARY_CMD_PREPEND:
5669 c->store_op = OPERATION_PREPEND;
5670 break;
5671 default:
5672 cb_assert(0)(__builtin_expect(!(0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5672, "0") : (void)0)
;
5673 }
5674
5675 c->item = it;
5676 c->ritem = info.info.value[0].iov_base;
5677 c->rlbytes = vlen;
5678 conn_set_state(c, conn_nread);
5679 c->substate = bin_read_set_value;
5680 break;
5681 case ENGINE_EWOULDBLOCK:
5682 c->ewouldblock = true1;
5683 break;
5684 case ENGINE_DISCONNECT:
5685 c->state = conn_closing;
5686 break;
5687 default:
5688 if (ret == ENGINE_E2BIG) {
5689 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_E2BIG, vlen);
5690 } else {
5691 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, vlen);
5692 }
5693 /* swallow the data line */
5694 c->write_and_go = conn_swallow;
5695 }
5696}
5697
5698static void process_bin_delete(conn *c) {
5699 ENGINE_ERROR_CODE ret;
5700 protocol_binary_request_delete* req = binary_get_request(c);
5701 char* key = binary_get_key(c);
5702 size_t nkey = c->binary_header.request.keylen;
5703 uint64_t cas = ntohll(req->message.header.request.cas);
5704 item_info_holder info;
5705 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
5706
5707 info.info.nvalue = 1;
5708
5709 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5709, "c != ((void*)0)") : (void)0)
;
5710
5711 if (settings.verbose > 1) {
5712 char buffer[1024];
5713 if (key_to_printable_buffer(buffer, sizeof(buffer), c->sfd, true1,
5714 "DELETE", key, nkey) != -1) {
5715 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c, "%s\n",
5716 buffer);
5717 }
5718 }
5719
5720 ret = c->aiostat;
5721 c->aiostat = ENGINE_SUCCESS;
5722 c->ewouldblock = false0;
5723
5724 if (ret == ENGINE_SUCCESS) {
5725 if (settings.detail_enabled) {
5726 stats_prefix_record_delete(key, nkey);
5727 }
5728 ret = settings.engine.v1->remove(settings.engine.v0, c, key, nkey,
5729 &cas, c->binary_header.request.vbucket);
5730 }
5731
5732 /* For some reason the SLAB_INCR tries to access this... */
5733 switch (ret) {
5734 case ENGINE_SUCCESS:
5735 c->cas = cas;
5736 write_bin_response(c, NULL((void*)0), 0, 0, 0);
5737 SLAB_INCR(c, delete_hits, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->slab_stats[info
.info.clsid].delete_hits++;; cb_mutex_exit(&thread_stats->
mutex); }
;
5738 break;
5739 case ENGINE_KEY_EEXISTS:
5740 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, 0);
5741 break;
5742 case ENGINE_KEY_ENOENT:
5743 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0);
5744 STATS_INCR(c, delete_misses, key, nkey){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->delete_misses
++;; cb_mutex_exit(&thread_stats->mutex); }
;
5745 break;
5746 case ENGINE_NOT_MY_VBUCKET:
5747 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, 0);
5748 break;
5749 case ENGINE_TMPFAIL:
5750 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_ETMPFAIL, 0);
5751 break;
5752 case ENGINE_EWOULDBLOCK:
5753 c->ewouldblock = true1;
5754 break;
5755 default:
5756 write_bin_packet(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0);
5757 }
5758}
5759
5760static void complete_nread(conn *c) {
5761 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5761, "c != ((void*)0)") : (void)0)
;
5762 cb_assert(c->cmd >= 0)(__builtin_expect(!(c->cmd >= 0), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5762, "c->cmd >= 0") : (void)0)
;
5763
5764 switch(c->substate) {
5765 case bin_reading_set_header:
5766 if (c->cmd == PROTOCOL_BINARY_CMD_APPEND ||
5767 c->cmd == PROTOCOL_BINARY_CMD_PREPEND) {
5768 process_bin_append_prepend(c);
5769 } else {
5770 process_bin_update(c);
5771 }
5772 break;
5773 case bin_read_set_value:
5774 complete_update_bin(c);
5775 break;
5776 case bin_reading_sasl_auth:
5777 process_bin_sasl_auth(c);
5778 break;
5779 case bin_reading_sasl_auth_data:
5780 process_bin_complete_sasl_auth(c);
5781 break;
5782 case bin_reading_packet:
5783 if (c->binary_header.request.magic == PROTOCOL_BINARY_RES) {
5784 RESPONSE_HANDLER handler;
5785 handler = response_handlers[c->binary_header.request.opcode];
5786 if (handler) {
5787 handler(c);
5788 } else {
5789 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
5790 "%d: ERROR: Unsupported response packet received: %u\n",
5791 c->sfd, (unsigned int)c->binary_header.request.opcode);
5792 conn_set_state(c, conn_closing);
5793 }
5794 } else {
5795 process_bin_packet(c);
5796 }
5797 break;
5798 default:
5799 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
5800 "Not handling substate %d\n", c->substate);
5801 abort();
5802 }
5803}
5804
5805static void reset_cmd_handler(conn *c) {
5806 c->sbytes = 0;
5807 c->cmd = -1;
5808 c->substate = bin_no_state;
5809 if(c->item != NULL((void*)0)) {
5810 settings.engine.v1->release(settings.engine.v0, c, c->item);
5811 c->item = NULL((void*)0);
5812 }
5813 conn_shrink(c);
5814 if (c->rbytes > 0) {
5815 conn_set_state(c, conn_parse_cmd);
5816 } else {
5817 conn_set_state(c, conn_waiting);
5818 }
5819}
5820
5821/* set up a connection to write a buffer then free it, used for stats */
5822static void write_and_free(conn *c, char *buf, size_t bytes) {
5823 if (buf) {
5824 c->write_and_free = buf;
5825 c->wcurr = buf;
5826 c->wbytes = (uint32_t)bytes;
5827 conn_set_state(c, conn_write);
5828 c->write_and_go = conn_new_cmd;
5829 } else {
5830 conn_set_state(c, conn_closing);
5831 }
5832}
5833
5834void append_stat(const char *name, ADD_STAT add_stats, conn *c,
5835 const char *fmt, ...) {
5836 char val_str[STAT_VAL_LEN128];
5837 int vlen;
5838 va_list ap;
5839
5840 cb_assert(name)(__builtin_expect(!(name), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5840, "name") : (void)0)
;
5841 cb_assert(add_stats)(__builtin_expect(!(add_stats), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5841, "add_stats") : (void)0)
;
5842 cb_assert(c)(__builtin_expect(!(c), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5842, "c") : (void)0)
;
5843 cb_assert(fmt)(__builtin_expect(!(fmt), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 5843, "fmt") : (void)0)
;
5844
5845 va_start(ap, fmt)__builtin_va_start(ap, fmt);
5846 vlen = vsnprintf(val_str, sizeof(val_str) - 1, fmt, ap)__builtin___vsnprintf_chk (val_str, sizeof(val_str) - 1, 0, __builtin_object_size
(val_str, 2 > 1 ? 1 : 0), fmt, ap)
;
5847 va_end(ap)__builtin_va_end(ap);
5848
5849 add_stats(name, (uint16_t)strlen(name), val_str, vlen, c);
5850}
5851
5852static void aggregate_callback(void *in, void *out) {
5853 threadlocal_stats_aggregate(in, out);
5854}
5855
5856/* return server specific stats only */
5857static void server_stats(ADD_STAT add_stats, conn *c, bool_Bool aggregate) {
5858#ifdef WIN32
5859 long pid = GetCurrentProcessId();
5860#else
5861 struct rusage usage;
5862 long pid = (long)getpid();
5863#endif
5864 struct slab_stats slab_stats;
5865 char stat_key[1024];
5866 int i;
5867 struct tap_stats ts;
5868 rel_time_t now = current_time;
5869
5870 struct thread_stats thread_stats;
5871 threadlocal_stats_clear(&thread_stats);
5872
5873 if (aggregate && settings.engine.v1->aggregate_stats != NULL((void*)0)) {
5874 settings.engine.v1->aggregate_stats(settings.engine.v0,
5875 (const void *)c,
5876 aggregate_callback,
5877 &thread_stats);
5878 } else {
5879 threadlocal_stats_aggregate(get_independent_stats(c),
5880 &thread_stats);
5881 }
5882
5883 slab_stats_aggregate(&thread_stats, &slab_stats);
5884
5885#ifndef WIN32
5886 getrusage(RUSAGE_SELF0, &usage);
5887#endif
5888
5889 STATS_LOCK();
5890
5891 APPEND_STAT("pid", "%lu", pid)append_stat("pid", add_stats, c, "%lu", pid);;
5892 APPEND_STAT("uptime", "%u", now)append_stat("uptime", add_stats, c, "%u", now);;
5893 APPEND_STAT("time", "%ld", now + (long)process_started)append_stat("time", add_stats, c, "%ld", now + (long)process_started
);
;
5894 APPEND_STAT("version", "%s", get_server_version())append_stat("version", add_stats, c, "%s", get_server_version
());
;
5895 APPEND_STAT("memcached_version", "%s", MEMCACHED_VERSION)append_stat("memcached_version", add_stats, c, "%s", "2.0.1-macosx-206-g899e18c"
);
;
5896 APPEND_STAT("libevent", "%s", event_get_version())append_stat("libevent", add_stats, c, "%s", event_get_version
());
;
5897 APPEND_STAT("pointer_size", "%d", (int)(8 * sizeof(void *)))append_stat("pointer_size", add_stats, c, "%d", (int)(8 * sizeof
(void *)));
;
5898
5899#ifndef WIN32
5900 append_stat("rusage_user", add_stats, c, "%ld.%06ld",
5901 (long)usage.ru_utime.tv_sec,
5902 (long)usage.ru_utime.tv_usec);
5903 append_stat("rusage_system", add_stats, c, "%ld.%06ld",
5904 (long)usage.ru_stime.tv_sec,
5905 (long)usage.ru_stime.tv_usec);
5906#endif
5907
5908 APPEND_STAT("daemon_connections", "%u", stats.daemon_conns)append_stat("daemon_connections", add_stats, c, "%u", stats.daemon_conns
);
;
5909 APPEND_STAT("curr_connections", "%u", stats.curr_conns)append_stat("curr_connections", add_stats, c, "%u", stats.curr_conns
);
;
5910 for (i = 0; i < settings.num_interfaces; ++i) {
5911 sprintf(stat_key, "%s", "max_conns_on_port_")__builtin___sprintf_chk (stat_key, 0, __builtin_object_size (
stat_key, 2 > 1 ? 1 : 0), "%s", "max_conns_on_port_")
;
5912 sprintf(stat_key + strlen(stat_key), "%d", stats.listening_ports[i].port)__builtin___sprintf_chk (stat_key + strlen(stat_key), 0, __builtin_object_size
(stat_key + strlen(stat_key), 2 > 1 ? 1 : 0), "%d", stats
.listening_ports[i].port)
;
5913 APPEND_STAT(stat_key, "%d", stats.listening_ports[i].maxconns)append_stat(stat_key, add_stats, c, "%d", stats.listening_ports
[i].maxconns);
;
5914 sprintf(stat_key, "%s", "curr_conns_on_port_")__builtin___sprintf_chk (stat_key, 0, __builtin_object_size (
stat_key, 2 > 1 ? 1 : 0), "%s", "curr_conns_on_port_")
;
5915 sprintf(stat_key + strlen(stat_key), "%d", stats.listening_ports[i].port)__builtin___sprintf_chk (stat_key + strlen(stat_key), 0, __builtin_object_size
(stat_key + strlen(stat_key), 2 > 1 ? 1 : 0), "%d", stats
.listening_ports[i].port)
;
5916 APPEND_STAT(stat_key, "%d", stats.listening_ports[i].curr_conns)append_stat(stat_key, add_stats, c, "%d", stats.listening_ports
[i].curr_conns);
;
5917 }
5918 APPEND_STAT("total_connections", "%u", stats.total_conns)append_stat("total_connections", add_stats, c, "%u", stats.total_conns
);
;
5919 APPEND_STAT("connection_structures", "%u", stats.conn_structs)append_stat("connection_structures", add_stats, c, "%u", stats
.conn_structs);
;
5920 APPEND_STAT("cmd_get", "%"PRIu64, thread_stats.cmd_get)append_stat("cmd_get", add_stats, c, "%""ll" "u", thread_stats
.cmd_get);
;
5921 APPEND_STAT("cmd_set", "%"PRIu64, slab_stats.cmd_set)append_stat("cmd_set", add_stats, c, "%""ll" "u", slab_stats.
cmd_set);
;
5922 APPEND_STAT("cmd_flush", "%"PRIu64, thread_stats.cmd_flush)append_stat("cmd_flush", add_stats, c, "%""ll" "u", thread_stats
.cmd_flush);
;
5923 APPEND_STAT("auth_cmds", "%"PRIu64, thread_stats.auth_cmds)append_stat("auth_cmds", add_stats, c, "%""ll" "u", thread_stats
.auth_cmds);
;
5924 APPEND_STAT("auth_errors", "%"PRIu64, thread_stats.auth_errors)append_stat("auth_errors", add_stats, c, "%""ll" "u", thread_stats
.auth_errors);
;
5925 APPEND_STAT("get_hits", "%"PRIu64, slab_stats.get_hits)append_stat("get_hits", add_stats, c, "%""ll" "u", slab_stats
.get_hits);
;
5926 APPEND_STAT("get_misses", "%"PRIu64, thread_stats.get_misses)append_stat("get_misses", add_stats, c, "%""ll" "u", thread_stats
.get_misses);
;
5927 APPEND_STAT("delete_misses", "%"PRIu64, thread_stats.delete_misses)append_stat("delete_misses", add_stats, c, "%""ll" "u", thread_stats
.delete_misses);
;
5928 APPEND_STAT("delete_hits", "%"PRIu64, slab_stats.delete_hits)append_stat("delete_hits", add_stats, c, "%""ll" "u", slab_stats
.delete_hits);
;
5929 APPEND_STAT("incr_misses", "%"PRIu64, thread_stats.incr_misses)append_stat("incr_misses", add_stats, c, "%""ll" "u", thread_stats
.incr_misses);
;
5930 APPEND_STAT("incr_hits", "%"PRIu64, thread_stats.incr_hits)append_stat("incr_hits", add_stats, c, "%""ll" "u", thread_stats
.incr_hits);
;
5931 APPEND_STAT("decr_misses", "%"PRIu64, thread_stats.decr_misses)append_stat("decr_misses", add_stats, c, "%""ll" "u", thread_stats
.decr_misses);
;
5932 APPEND_STAT("decr_hits", "%"PRIu64, thread_stats.decr_hits)append_stat("decr_hits", add_stats, c, "%""ll" "u", thread_stats
.decr_hits);
;
5933 APPEND_STAT("cas_misses", "%"PRIu64, thread_stats.cas_misses)append_stat("cas_misses", add_stats, c, "%""ll" "u", thread_stats
.cas_misses);
;
5934 APPEND_STAT("cas_hits", "%"PRIu64, slab_stats.cas_hits)append_stat("cas_hits", add_stats, c, "%""ll" "u", slab_stats
.cas_hits);
;
5935 APPEND_STAT("cas_badval", "%"PRIu64, slab_stats.cas_badval)append_stat("cas_badval", add_stats, c, "%""ll" "u", slab_stats
.cas_badval);
;
5936 APPEND_STAT("bytes_read", "%"PRIu64, thread_stats.bytes_read)append_stat("bytes_read", add_stats, c, "%""ll" "u", thread_stats
.bytes_read);
;
5937 APPEND_STAT("bytes_written", "%"PRIu64, thread_stats.bytes_written)append_stat("bytes_written", add_stats, c, "%""ll" "u", thread_stats
.bytes_written);
;
5938 APPEND_STAT("accepting_conns", "%u", is_listen_disabled() ? 0 : 1)append_stat("accepting_conns", add_stats, c, "%u", is_listen_disabled
() ? 0 : 1);
;
5939 APPEND_STAT("listen_disabled_num", "%"PRIu64, get_listen_disabled_num())append_stat("listen_disabled_num", add_stats, c, "%""ll" "u",
get_listen_disabled_num());
;
5940 APPEND_STAT("rejected_conns", "%" PRIu64, (uint64_t)stats.rejected_conns)append_stat("rejected_conns", add_stats, c, "%" "ll" "u", (uint64_t
)stats.rejected_conns);
;
5941 APPEND_STAT("threads", "%d", settings.num_threads)append_stat("threads", add_stats, c, "%d", settings.num_threads
);
;
5942 APPEND_STAT("conn_yields", "%" PRIu64, (uint64_t)thread_stats.conn_yields)append_stat("conn_yields", add_stats, c, "%" "ll" "u", (uint64_t
)thread_stats.conn_yields);
;
5943 STATS_UNLOCK();
5944
5945 /*
5946 * Add tap stats (only if non-zero)
5947 */
5948 cb_mutex_enter(&tap_stats.mutex);
5949 ts = tap_stats;
5950 cb_mutex_exit(&tap_stats.mutex);
5951
5952 if (ts.sent.connect) {
5953 APPEND_STAT("tap_connect_sent", "%"PRIu64, ts.sent.connect)append_stat("tap_connect_sent", add_stats, c, "%""ll" "u", ts
.sent.connect);
;
5954 }
5955 if (ts.sent.mutation) {
5956 APPEND_STAT("tap_mutation_sent", "%"PRIu64, ts.sent.mutation)append_stat("tap_mutation_sent", add_stats, c, "%""ll" "u", ts
.sent.mutation);
;
5957 }
5958 if (ts.sent.checkpoint_start) {
5959 APPEND_STAT("tap_checkpoint_start_sent", "%"PRIu64, ts.sent.checkpoint_start)append_stat("tap_checkpoint_start_sent", add_stats, c, "%""ll"
"u", ts.sent.checkpoint_start);
;
5960 }
5961 if (ts.sent.checkpoint_end) {
5962 APPEND_STAT("tap_checkpoint_end_sent", "%"PRIu64, ts.sent.checkpoint_end)append_stat("tap_checkpoint_end_sent", add_stats, c, "%""ll" "u"
, ts.sent.checkpoint_end);
;
5963 }
5964 if (ts.sent.delete) {
5965 APPEND_STAT("tap_delete_sent", "%"PRIu64, ts.sent.delete)append_stat("tap_delete_sent", add_stats, c, "%""ll" "u", ts.
sent.delete);
;
5966 }
5967 if (ts.sent.flush) {
5968 APPEND_STAT("tap_flush_sent", "%"PRIu64, ts.sent.flush)append_stat("tap_flush_sent", add_stats, c, "%""ll" "u", ts.sent
.flush);
;
5969 }
5970 if (ts.sent.opaque) {
5971 APPEND_STAT("tap_opaque_sent", "%"PRIu64, ts.sent.opaque)append_stat("tap_opaque_sent", add_stats, c, "%""ll" "u", ts.
sent.opaque);
;
5972 }
5973 if (ts.sent.vbucket_set) {
5974 APPEND_STAT("tap_vbucket_set_sent", "%"PRIu64,append_stat("tap_vbucket_set_sent", add_stats, c, "%""ll" "u"
, ts.sent.vbucket_set);
5975 ts.sent.vbucket_set)append_stat("tap_vbucket_set_sent", add_stats, c, "%""ll" "u"
, ts.sent.vbucket_set);
;
5976 }
5977 if (ts.received.connect) {
5978 APPEND_STAT("tap_connect_received", "%"PRIu64, ts.received.connect)append_stat("tap_connect_received", add_stats, c, "%""ll" "u"
, ts.received.connect);
;
5979 }
5980 if (ts.received.mutation) {
5981 APPEND_STAT("tap_mutation_received", "%"PRIu64, ts.received.mutation)append_stat("tap_mutation_received", add_stats, c, "%""ll" "u"
, ts.received.mutation);
;
5982 }
5983 if (ts.received.checkpoint_start) {
5984 APPEND_STAT("tap_checkpoint_start_received", "%"PRIu64, ts.received.checkpoint_start)append_stat("tap_checkpoint_start_received", add_stats, c, "%"
"ll" "u", ts.received.checkpoint_start);
;
5985 }
5986 if (ts.received.checkpoint_end) {
5987 APPEND_STAT("tap_checkpoint_end_received", "%"PRIu64, ts.received.checkpoint_end)append_stat("tap_checkpoint_end_received", add_stats, c, "%""ll"
"u", ts.received.checkpoint_end);
;
5988 }
5989 if (ts.received.delete) {
5990 APPEND_STAT("tap_delete_received", "%"PRIu64, ts.received.delete)append_stat("tap_delete_received", add_stats, c, "%""ll" "u",
ts.received.delete);
;
5991 }
5992 if (ts.received.flush) {
5993 APPEND_STAT("tap_flush_received", "%"PRIu64, ts.received.flush)append_stat("tap_flush_received", add_stats, c, "%""ll" "u", ts
.received.flush);
;
5994 }
5995 if (ts.received.opaque) {
5996 APPEND_STAT("tap_opaque_received", "%"PRIu64, ts.received.opaque)append_stat("tap_opaque_received", add_stats, c, "%""ll" "u",
ts.received.opaque);
;
5997 }
5998 if (ts.received.vbucket_set) {
5999 APPEND_STAT("tap_vbucket_set_received", "%"PRIu64,append_stat("tap_vbucket_set_received", add_stats, c, "%""ll"
"u", ts.received.vbucket_set);
6000 ts.received.vbucket_set)append_stat("tap_vbucket_set_received", add_stats, c, "%""ll"
"u", ts.received.vbucket_set);
;
6001 }
6002}
6003
6004static void process_stat_settings(ADD_STAT add_stats, void *c) {
6005 int ii;
6006 cb_assert(add_stats)(__builtin_expect(!(add_stats), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6006, "add_stats") : (void)0)
;
6007 APPEND_STAT("maxconns", "%d", settings.maxconns)append_stat("maxconns", add_stats, c, "%d", settings.maxconns
);
;
6008
6009 for (ii = 0; ii < settings.num_interfaces; ++ii) {
6010 char interface[1024];
6011 int offset;
6012 if (settings.interfaces[ii].host == NULL((void*)0)) {
6013 offset = sprintf(interface, "interface-*:%u", settings.interfaces[ii].port)__builtin___sprintf_chk (interface, 0, __builtin_object_size (
interface, 2 > 1 ? 1 : 0), "interface-*:%u", settings.interfaces
[ii].port)
;
6014 } else {
6015 offset = snprintf(interface, sizeof(interface), "interface-%s:%u",__builtin___snprintf_chk (interface, sizeof(interface), 0, __builtin_object_size
(interface, 2 > 1 ? 1 : 0), "interface-%s:%u", settings.interfaces
[ii].host, settings.interfaces[ii].port)
6016 settings.interfaces[ii].host,__builtin___snprintf_chk (interface, sizeof(interface), 0, __builtin_object_size
(interface, 2 > 1 ? 1 : 0), "interface-%s:%u", settings.interfaces
[ii].host, settings.interfaces[ii].port)
6017 settings.interfaces[ii].port)__builtin___snprintf_chk (interface, sizeof(interface), 0, __builtin_object_size
(interface, 2 > 1 ? 1 : 0), "interface-%s:%u", settings.interfaces
[ii].host, settings.interfaces[ii].port)
;
6018 }
6019
6020 snprintf(interface + offset, sizeof(interface) - offset, "-maxconn")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-maxconn")
;
6021 APPEND_STAT(interface, "%u", settings.interfaces[ii].maxconn)append_stat(interface, add_stats, c, "%u", settings.interfaces
[ii].maxconn);
;
6022 snprintf(interface + offset, sizeof(interface) - offset, "-backlog")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-backlog")
;
6023 APPEND_STAT(interface, "%u", settings.interfaces[ii].backlog)append_stat(interface, add_stats, c, "%u", settings.interfaces
[ii].backlog);
;
6024 snprintf(interface + offset, sizeof(interface) - offset, "-ipv4")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ipv4")
;
6025 APPEND_STAT(interface, "%s", settings.interfaces[ii].ipv4 ?append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ipv4 ? "true" : "false");
6026 "true" : "false")append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ipv4 ? "true" : "false");
;
6027 snprintf(interface + offset, sizeof(interface) - offset, "-ipv6")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ipv6")
;
6028 APPEND_STAT(interface, "%s", settings.interfaces[ii].ipv6 ?append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ipv6 ? "true" : "false");
6029 "true" : "false")append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ipv6 ? "true" : "false");
;
6030
6031 snprintf(interface + offset, sizeof(interface) - offset,__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-tcp_nodelay")
6032 "-tcp_nodelay")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-tcp_nodelay")
;
6033 APPEND_STAT(interface, "%s", settings.interfaces[ii].tcp_nodelay ?append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].tcp_nodelay ? "true" : "false");
6034 "true" : "false")append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].tcp_nodelay ? "true" : "false");
;
6035
6036 if (settings.interfaces[ii].ssl.key) {
6037 snprintf(interface + offset, sizeof(interface) - offset,__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl-pkey")
6038 "-ssl-pkey")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl-pkey")
;
6039 APPEND_STAT(interface, "%s", settings.interfaces[ii].ssl.key)append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ssl.key);
;
6040 snprintf(interface + offset, sizeof(interface) - offset,__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl-cert")
6041 "-ssl-cert")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl-cert")
;
6042 APPEND_STAT(interface, "%s", settings.interfaces[ii].ssl.cert)append_stat(interface, add_stats, c, "%s", settings.interfaces
[ii].ssl.cert);
;
6043 } else {
6044 snprintf(interface + offset, sizeof(interface) - offset,__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl")
6045 "-ssl")__builtin___snprintf_chk (interface + offset, sizeof(interface
) - offset, 0, __builtin_object_size (interface + offset, 2 >
1 ? 1 : 0), "-ssl")
;
6046 APPEND_STAT(interface, "%s", "false")append_stat(interface, add_stats, c, "%s", "false");;
6047 }
6048 }
6049
6050 APPEND_STAT("verbosity", "%d", settings.verbose)append_stat("verbosity", add_stats, c, "%d", settings.verbose
);
;
6051 APPEND_STAT("num_threads", "%d", settings.num_threads)append_stat("num_threads", add_stats, c, "%d", settings.num_threads
);
;
6052 APPEND_STAT("stat_key_prefix", "%c", settings.prefix_delimiter)append_stat("stat_key_prefix", add_stats, c, "%c", settings.prefix_delimiter
);
;
6053 APPEND_STAT("detail_enabled", "%s",append_stat("detail_enabled", add_stats, c, "%s", settings.detail_enabled
? "yes" : "no");
6054 settings.detail_enabled ? "yes" : "no")append_stat("detail_enabled", add_stats, c, "%s", settings.detail_enabled
? "yes" : "no");
;
6055 APPEND_STAT("allow_detailed", "%s",append_stat("allow_detailed", add_stats, c, "%s", settings.allow_detailed
? "yes" : "no");
6056 settings.allow_detailed ? "yes" : "no")append_stat("allow_detailed", add_stats, c, "%s", settings.allow_detailed
? "yes" : "no");
;
6057 APPEND_STAT("reqs_per_event", "%d", settings.reqs_per_event)append_stat("reqs_per_event", add_stats, c, "%d", settings.reqs_per_event
);
;
6058 APPEND_STAT("reqs_per_tap_event", "%d", settings.reqs_per_tap_event)append_stat("reqs_per_tap_event", add_stats, c, "%d", settings
.reqs_per_tap_event);
;
6059 APPEND_STAT("auth_enabled_sasl", "%s", "yes")append_stat("auth_enabled_sasl", add_stats, c, "%s", "yes");;
6060
6061 APPEND_STAT("auth_sasl_engine", "%s", "cbsasl")append_stat("auth_sasl_engine", add_stats, c, "%s", "cbsasl")
;
;
6062 APPEND_STAT("auth_required_sasl", "%s", settings.require_sasl ? "yes" : "no")append_stat("auth_required_sasl", add_stats, c, "%s", settings
.require_sasl ? "yes" : "no");
;
6063 {
6064 EXTENSION_DAEMON_DESCRIPTOR *ptr;
6065 for (ptr = settings.extensions.daemons; ptr != NULL((void*)0); ptr = ptr->next) {
6066 APPEND_STAT("extension", "%s", ptr->get_name())append_stat("extension", add_stats, c, "%s", ptr->get_name
());
;
6067 }
6068 }
6069
6070 APPEND_STAT("logger", "%s", settings.extensions.logger->get_name())append_stat("logger", add_stats, c, "%s", settings.extensions
.logger->get_name());
;
6071 {
6072 EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *ptr;
6073 for (ptr = settings.extensions.binary; ptr != NULL((void*)0); ptr = ptr->next) {
6074 APPEND_STAT("binary_extension", "%s", ptr->get_name())append_stat("binary_extension", add_stats, c, "%s", ptr->get_name
());
;
6075 }
6076 }
6077
6078 if (settings.config) {
6079 add_stats("config", (uint16_t)strlen("config"),
6080 settings.config, strlen(settings.config), c);
6081 }
6082}
6083
6084/*
6085 * if we have a complete line in the buffer, process it.
6086 */
6087static int try_read_command(conn *c) {
6088 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6088, "c != ((void*)0)") : (void)0)
;
6089 cb_assert(c->rcurr <= (c->rbuf + c->rsize))(__builtin_expect(!(c->rcurr <= (c->rbuf + c->rsize
)), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6089, "c->rcurr <= (c->rbuf + c->rsize)") : (void
)0)
;
6090 cb_assert(c->rbytes > 0)(__builtin_expect(!(c->rbytes > 0), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6090, "c->rbytes > 0") : (void)0)
;
6091
6092 /* Do we have the complete packet header? */
6093 if (c->rbytes < sizeof(c->binary_header)) {
6094 /* need more data! */
6095 return 0;
6096 } else {
6097#ifdef NEED_ALIGN
6098 if (((long)(c->rcurr)) % 8 != 0) {
6099 /* must realign input buffer */
6100 memmove(c->rbuf, c->rcurr, c->rbytes)__builtin___memmove_chk (c->rbuf, c->rcurr, c->rbytes
, __builtin_object_size (c->rbuf, 0))
;
6101 c->rcurr = c->rbuf;
6102 if (settings.verbose > 1) {
6103 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
6104 "%d: Realign input buffer\n", c->sfd);
6105 }
6106 }
6107#endif
6108 protocol_binary_request_header* req;
6109 req = (protocol_binary_request_header*)c->rcurr;
6110
6111 if (settings.verbose > 1) {
6112 /* Dump the packet before we convert it to host order */
6113 char buffer[1024];
6114 ssize_t nw;
6115 nw = bytes_to_output_string(buffer, sizeof(buffer), c->sfd,
6116 true1, "Read binary protocol data:",
6117 (const char*)req->bytes,
6118 sizeof(req->bytes));
6119 if (nw != -1) {
6120 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
6121 "%s", buffer);
6122 }
6123 }
6124
6125 c->binary_header = *req;
6126 c->binary_header.request.keylen = ntohs(req->request.keylen)((__uint16_t)(__builtin_constant_p(req->request.keylen) ? (
(__uint16_t)((((__uint16_t)(req->request.keylen) & 0xff00
) >> 8) | (((__uint16_t)(req->request.keylen) & 0x00ff
) << 8))) : _OSSwapInt16(req->request.keylen)))
;
6127 c->binary_header.request.bodylen = ntohl(req->request.bodylen)(__builtin_constant_p(req->request.bodylen) ? ((__uint32_t
)((((__uint32_t)(req->request.bodylen) & 0xff000000) >>
24) | (((__uint32_t)(req->request.bodylen) & 0x00ff0000
) >> 8) | (((__uint32_t)(req->request.bodylen) &
0x0000ff00) << 8) | (((__uint32_t)(req->request.bodylen
) & 0x000000ff) << 24))) : _OSSwapInt32(req->request
.bodylen))
;
6128 c->binary_header.request.vbucket = ntohs(req->request.vbucket)((__uint16_t)(__builtin_constant_p(req->request.vbucket) ?
((__uint16_t)((((__uint16_t)(req->request.vbucket) & 0xff00
) >> 8) | (((__uint16_t)(req->request.vbucket) &
0x00ff) << 8))) : _OSSwapInt16(req->request.vbucket
)))
;
6129 c->binary_header.request.cas = ntohll(req->request.cas);
6130
6131 if (c->binary_header.request.magic != PROTOCOL_BINARY_REQ &&
6132 !(c->binary_header.request.magic == PROTOCOL_BINARY_RES &&
6133 response_handlers[c->binary_header.request.opcode])) {
6134 if (settings.verbose) {
6135 if (c->binary_header.request.magic != PROTOCOL_BINARY_RES) {
6136 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6137 "%d: Invalid magic: %x\n",
6138 c->sfd,
6139 c->binary_header.request.magic);
6140 } else {
6141 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6142 "%d: ERROR: Unsupported response packet received: %u\n",
6143 c->sfd, (unsigned int)c->binary_header.request.opcode);
6144
6145 }
6146 }
6147 conn_set_state(c, conn_closing);
6148 return -1;
6149 }
6150
6151 c->msgcurr = 0;
6152 c->msgused = 0;
6153 c->iovused = 0;
6154 if (add_msghdr(c) != 0) {
6155 conn_set_state(c, conn_closing);
6156 return -1;
6157 }
6158
6159 c->cmd = c->binary_header.request.opcode;
6160 c->keylen = c->binary_header.request.keylen;
6161 c->opaque = c->binary_header.request.opaque;
6162 /* clear the returned cas value */
6163 c->cas = 0;
6164
6165 dispatch_bin_command(c);
6166
6167 c->rbytes -= sizeof(c->binary_header);
6168 c->rcurr += sizeof(c->binary_header);
6169 }
6170
6171 return 1;
6172}
6173
6174static void drain_bio_send_pipe(conn *c) {
6175 int n;
6176 bool_Bool stop = false0;
6177
6178 do {
6179 if (c->ssl.out.current < c->ssl.out.total) {
6180#ifdef WIN32
6181 DWORD error;
6182#else
6183 int error;
6184#endif
6185 n = send(c->sfd, c->ssl.out.buffer + c->ssl.out.current,
6186 c->ssl.out.total - c->ssl.out.current, 0);
6187 if (n > 0) {
6188 c->ssl.out.current += n;
6189 if (c->ssl.out.current == c->ssl.out.total) {
6190 c->ssl.out.current = c->ssl.out.total = 0;
6191 }
6192 } else {
6193 if (n == -1) {
6194#ifdef WIN32
6195 error = WSAGetLastError();
6196#else
6197 error = errno(*__error());
6198#endif
6199 if (!is_blocking(error)) {
6200 c->ssl.error = true1;
6201 }
6202 }
6203 return ;
6204 }
6205 }
6206
6207 if (c->ssl.out.total == 0) {
6208 n = BIO_read(c->ssl.network, c->ssl.out.buffer, c->ssl.out.buffsz);
6209 if (n > 0) {
6210 c->ssl.out.total = n;
6211 } else {
6212 stop = true1;
6213 }
6214 }
6215 } while (!stop);
6216}
6217
6218static void drain_bio_recv_pipe(conn *c) {
6219 int n;
6220 bool_Bool stop = false0;
6221
6222 stop = false0;
6223 do {
6224 if (c->ssl.in.current < c->ssl.in.total) {
6225 n = BIO_write(c->ssl.network, c->ssl.in.buffer + c->ssl.in.current,
6226 c->ssl.in.total - c->ssl.in.current);
6227 if (n > 0) {
6228 c->ssl.in.current += n;
6229 if (c->ssl.in.current == c->ssl.in.total) {
6230 c->ssl.in.current = c->ssl.in.total = 0;
6231 }
6232 } else {
6233 /* Our input BIO is full, no need to grab more data from
6234 * the network at this time..
6235 */
6236 return ;
6237 }
6238 }
6239
6240 if (c->ssl.in.total < c->ssl.in.buffsz) {
6241#ifdef WIN32
6242 DWORD error;
6243#else
6244 int error;
6245#endif
6246 n = recv(c->sfd, c->ssl.in.buffer + c->ssl.in.total,
6247 c->ssl.in.buffsz - c->ssl.in.total, 0);
6248 if (n > 0) {
6249 c->ssl.in.total += n;
6250 } else {
6251 stop = true1;
6252 if (n == 0) {
6253 c->ssl.error = true1; /* read end shutdown */
6254 } else {
6255#ifdef WIN32
6256 error = WSAGetLastError();
6257#else
6258 error = errno(*__error());
6259#endif
6260 if (!is_blocking(error)) {
6261 c->ssl.error = true1;
6262 }
6263 }
6264 }
6265 }
6266 } while (!stop);
6267}
6268
6269static int do_ssl_pre_connection(conn *c) {
6270 int r = SSL_accept(c->ssl.client);
6271 if (r == 1) {
6272 drain_bio_send_pipe(c);
6273 c->ssl.connected = true1;
6274 } else {
6275 if (SSL_get_error(c->ssl.client, r) == SSL_ERROR_WANT_READ2) {
6276 drain_bio_send_pipe(c);
6277 set_ewouldblock();
6278 return -1;
6279 } else {
6280 char *errmsg = malloc(8*1024);
6281 if (errmsg) {
6282 int offset = sprintf(errmsg,__builtin___sprintf_chk (errmsg, 0, __builtin_object_size (errmsg
, 2 > 1 ? 1 : 0), "SSL_accept() returned %d with error %d\n"
, r, SSL_get_error(c->ssl.client, r))
6283 "SSL_accept() returned %d with error %d\n",__builtin___sprintf_chk (errmsg, 0, __builtin_object_size (errmsg
, 2 > 1 ? 1 : 0), "SSL_accept() returned %d with error %d\n"
, r, SSL_get_error(c->ssl.client, r))
6284 r, SSL_get_error(c->ssl.client, r))__builtin___sprintf_chk (errmsg, 0, __builtin_object_size (errmsg
, 2 > 1 ? 1 : 0), "SSL_accept() returned %d with error %d\n"
, r, SSL_get_error(c->ssl.client, r))
;
6285
6286 ERR_error_string_n(ERR_get_error(), errmsg + offset,
6287 8192 - offset);
6288
6289 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6290 "%d: ERROR: %s",
6291 c->sfd, errmsg);
6292 free(errmsg);
6293 }
6294 set_econnreset();
6295 return -1;
6296 }
6297 }
6298
6299 return 0;
6300}
6301
6302static int do_ssl_read(conn *c, char *dest, size_t nbytes) {
6303 int ret = 0;
6304
6305 while (ret < nbytes) {
6306 int n;
6307 drain_bio_recv_pipe(c);
6308 if (c->ssl.error) {
6309 set_econnreset();
6310 return -1;
6311 }
6312 n = SSL_read(c->ssl.client, dest + ret, nbytes - ret);
6313 if (n > 0) {
6314 ret += n;
6315 } else {
6316 if (ret > 0) {
6317 /* I've gotten some data, let the user have that */
6318 return ret;
6319 }
6320
6321 if (n < 0) {
6322 int error = SSL_get_error(c->ssl.client, n);
6323 switch (error) {
6324 case SSL_ERROR_WANT_READ2:
6325 /*
6326 * Drain the buffers and retry if we've got data in
6327 * our input buffers
6328 */
6329 if (c->ssl.in.current >= c->ssl.in.total) {
6330 set_ewouldblock();
6331 return -1;
6332 }
6333 break;
6334
6335 default:
6336 /*
6337 * @todo I don't know how to gracefully recover from this
6338 * let's just shut down the connection
6339 */
6340 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6341 "%d: ERROR: SSL_read returned -1 with error %d",
6342 c->sfd, error);
6343 set_econnreset();
6344 return -1;
6345 }
6346 }
6347 }
6348 }
6349
6350 return ret;
6351}
6352
6353static int do_data_recv(conn *c, void *dest, size_t nbytes) {
6354 int res;
6355 if (c->ssl.enabled) {
6356 drain_bio_recv_pipe(c);
6357
6358 if (!c->ssl.connected) {
6359 res = do_ssl_pre_connection(c);
6360 if (res == -1) {
6361 return -1;
6362 }
6363 }
6364
6365 /* The SSL negotiation might be complete at this time */
6366 if (c->ssl.connected) {
6367 res = do_ssl_read(c, dest, nbytes);
6368 }
6369 } else {
6370 res = recv(c->sfd, dest, nbytes, 0);
6371 }
6372
6373 return res;
6374}
6375
6376static int do_ssl_write(conn *c, char *dest, size_t nbytes) {
6377 int ret = 0;
6378
6379 int chunksize = settings.bio_drain_buffer_sz;
6380
6381 while (ret < nbytes) {
6382 int n;
6383 int chunk;
6384
6385 drain_bio_send_pipe(c);
6386 if (c->ssl.error) {
6387 set_econnreset();
6388 return -1;
6389 }
6390
6391 chunk = nbytes - ret;
6392 if (chunk > chunksize) {
6393 chunk = chunksize;
6394 }
6395
6396 n = SSL_write(c->ssl.client, dest + ret, chunk);
6397 if (n > 0) {
6398 ret += n;
6399 } else {
6400 if (ret > 0) {
6401 /* We've sent some data.. let the caller have them */
6402 return ret;
6403 }
6404
6405 if (n < 0) {
6406 int error = SSL_get_error(c->ssl.client, n);
6407 switch (error) {
6408 case SSL_ERROR_WANT_WRITE3:
6409 set_ewouldblock();
6410 return -1;
6411
6412 default:
6413 /*
6414 * @todo I don't know how to gracefully recover from this
6415 * let's just shut down the connection
6416 */
6417 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6418 "%d: ERROR: SSL_write returned -1 with error %d",
6419 c->sfd, error);
6420 set_econnreset();
6421 return -1;
6422 }
6423 }
6424 }
6425 }
6426
6427 return ret;
6428}
6429
6430
6431static int do_data_sendmsg(conn *c, struct msghdr *m) {
6432 int res;
6433 if (c->ssl.enabled) {
6434 int ii;
6435 res = 0;
6436 for (ii = 0; ii < m->msg_iovlen; ++ii) {
6437 int n = do_ssl_write(c,
6438 m->msg_iov[ii].iov_base,
6439 m->msg_iov[ii].iov_len);
6440 if (n > 0) {
6441 res += n;
6442 } else {
6443 return res > 0 ? res : -1;
6444 }
6445 }
6446
6447 /* @todo figure out how to drain the rest of the data if we
6448 * failed to send all of it...
6449 */
6450 drain_bio_send_pipe(c);
6451 return res;
6452 } else {
6453 res = sendmsg(c->sfd, m, 0);
6454 }
6455
6456 return res;
6457}
6458
6459/*
6460 * read from network as much as we can, handle buffer overflow and connection
6461 * close.
6462 * before reading, move the remaining incomplete fragment of a command
6463 * (if any) to the beginning of the buffer.
6464 *
6465 * To protect us from someone flooding a connection with bogus data causing
6466 * the connection to eat up all available memory, break out and start looking
6467 * at the data I've got after a number of reallocs...
6468 *
6469 * @return enum try_read_result
6470 */
6471static enum try_read_result try_read_network(conn *c) {
6472 enum try_read_result gotdata = READ_NO_DATA_RECEIVED;
6473 int res;
6474 int num_allocs = 0;
6475 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6475, "c != ((void*)0)") : (void)0)
;
6476
6477 if (c->rcurr != c->rbuf) {
6478 if (c->rbytes != 0) /* otherwise there's nothing to copy */
6479 memmove(c->rbuf, c->rcurr, c->rbytes)__builtin___memmove_chk (c->rbuf, c->rcurr, c->rbytes
, __builtin_object_size (c->rbuf, 0))
;
6480 c->rcurr = c->rbuf;
6481 }
6482
6483 while (1) {
6484 int avail;
6485#ifdef WIN32
6486 DWORD error;
6487#else
6488 int error;
6489#endif
6490
6491 if (c->rbytes >= c->rsize) {
6492 char *new_rbuf;
6493
6494 if (num_allocs == 4) {
6495 return gotdata;
6496 }
6497 ++num_allocs;
6498 new_rbuf = realloc(c->rbuf, c->rsize * 2);
6499 if (!new_rbuf) {
6500 if (settings.verbose > 0) {
6501 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6502 "Couldn't realloc input buffer\n");
6503 }
6504 c->rbytes = 0; /* ignore what we read */
6505 conn_set_state(c, conn_closing);
6506 return READ_MEMORY_ERROR;
6507 }
6508 c->rcurr = c->rbuf = new_rbuf;
6509 c->rsize *= 2;
6510 }
6511
6512 avail = c->rsize - c->rbytes;
6513 res = do_data_recv(c, c->rbuf + c->rbytes, avail);
6514 if (res > 0) {
6515 STATS_ADD(c, bytes_read, res){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->bytes_read +=
res; cb_mutex_exit(&thread_stats->mutex); }
;
6516 gotdata = READ_DATA_RECEIVED;
6517 c->rbytes += res;
6518 if (res == avail) {
6519 continue;
6520 } else {
6521 break;
6522 }
6523 }
6524 if (res == 0) {
6525 return READ_ERROR;
6526 }
6527 if (res == -1) {
6528#ifdef WIN32
6529 error = WSAGetLastError();
6530#else
6531 error = errno(*__error());
6532#endif
6533
6534 if (is_blocking(error)) {
6535 break;
6536 }
6537 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6538 "%d Closing connection due to read error: %s",
6539 c->sfd,
6540 strerror(errno(*__error())));
6541 return READ_ERROR;
6542 }
6543 }
6544 return gotdata;
6545}
6546
6547bool_Bool register_event(conn *c, struct timeval *timeout) {
6548 cb_assert(!c->registered_in_libevent)(__builtin_expect(!(!c->registered_in_libevent), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6548, "!c->registered_in_libevent") : (void)0)
;
6549 cb_assert(c->sfd != INVALID_SOCKET)(__builtin_expect(!(c->sfd != -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6549, "c->sfd != -1") : (void)0)
;
6550
6551 if (event_add(&c->event, timeout) == -1) {
6552 log_system_error(EXTENSION_LOG_WARNING,
6553 NULL((void*)0),
6554 "Failed to add connection to libevent: %s");
6555 return false0;
6556 }
6557
6558 c->registered_in_libevent = true1;
6559
6560 return true1;
6561}
6562
6563bool_Bool unregister_event(conn *c) {
6564 cb_assert(c->registered_in_libevent)(__builtin_expect(!(c->registered_in_libevent), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6564, "c->registered_in_libevent") : (void)0)
;
6565 cb_assert(c->sfd != INVALID_SOCKET)(__builtin_expect(!(c->sfd != -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6565, "c->sfd != -1") : (void)0)
;
6566
6567 if (event_del(&c->event) == -1) {
6568 return false0;
6569 }
6570
6571 c->registered_in_libevent = false0;
6572
6573 return true1;
6574}
6575
6576bool_Bool update_event(conn *c, const int new_flags) {
6577 struct event_base *base;
6578
6579 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6579, "c != ((void*)0)") : (void)0)
;
6580 base = c->event.ev_base;
6581
6582 if (c->ssl.enabled && c->ssl.connected && (new_flags & EV_READ0x02)) {
6583 /*
6584 * If we want more data and we have SSL, that data might be inside
6585 * SSL's internal buffers rather than inside the socket buffer. In
6586 * that case signal an EV_READ event without actually polling the
6587 * socket.
6588 */
6589 char dummy;
6590 /* SSL_pending() will not work here despite the name */
6591 int rv = SSL_peek(c->ssl.client, &dummy, 1);
6592 if (rv > 0) {
6593 /* signal a call to the handler */
6594 event_active(&c->event, EV_READ0x02, 0);
6595 return true1;
6596 }
6597 }
6598
6599 if (c->ev_flags == new_flags) {
6600 return true1;
6601 }
6602
6603 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL((void*)0),
6604 "Updated event for %d to read=%s, write=%s\n",
6605 c->sfd, (new_flags & EV_READ0x02 ? "yes" : "no"),
6606 (new_flags & EV_WRITE0x04 ? "yes" : "no"));
6607
6608 if (!unregister_event(c)) {
6609 return false0;
6610 }
6611
6612 event_set(&c->event, c->sfd, new_flags, event_handler, (void *)c);
6613 event_base_set(base, &c->event);
6614 c->ev_flags = new_flags;
6615
6616 return register_event(c, NULL((void*)0));
6617}
6618
6619/*
6620 * Transmit the next chunk of data from our list of msgbuf structures.
6621 *
6622 * Returns:
6623 * TRANSMIT_COMPLETE All done writing.
6624 * TRANSMIT_INCOMPLETE More data remaining to write.
6625 * TRANSMIT_SOFT_ERROR Can't write any more right now.
6626 * TRANSMIT_HARD_ERROR Can't write (c->state is set to conn_closing)
6627 */
6628static enum transmit_result transmit(conn *c) {
6629 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6629, "c != ((void*)0)") : (void)0)
;
6630
6631 while (c->msgcurr < c->msgused &&
6632 c->msglist[c->msgcurr].msg_iovlen == 0) {
6633 /* Finished writing the current msg; advance to the next. */
6634 c->msgcurr++;
6635 }
6636
6637 if (c->msgcurr < c->msgused) {
6638#ifdef WIN32
6639 DWORD error;
6640#else
6641 int error;
6642#endif
6643 ssize_t res;
6644 struct msghdr *m = &c->msglist[c->msgcurr];
6645
6646 res = do_data_sendmsg(c, m);
6647#ifdef WIN32
6648 error = WSAGetLastError();
6649#else
6650 error = errno(*__error());
6651#endif
6652 if (res > 0) {
6653 STATS_ADD(c, bytes_written, res){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->bytes_written
+= res; cb_mutex_exit(&thread_stats->mutex); }
;
6654
6655 /* We've written some of the data. Remove the completed
6656 iovec entries from the list of pending writes. */
6657 while (m->msg_iovlen > 0 && res >= m->msg_iov->iov_len) {
6658 res -= (ssize_t)m->msg_iov->iov_len;
6659 m->msg_iovlen--;
6660 m->msg_iov++;
6661 }
6662
6663 /* Might have written just part of the last iovec entry;
6664 adjust it so the next write will do the rest. */
6665 if (res > 0) {
6666 m->msg_iov->iov_base = (void*)((unsigned char*)m->msg_iov->iov_base + res);
6667 m->msg_iov->iov_len -= res;
6668 }
6669 return TRANSMIT_INCOMPLETE;
6670 }
6671
6672 if (res == -1 && is_blocking(error)) {
6673 if (!update_event(c, EV_WRITE0x04 | EV_PERSIST0x10)) {
6674 if (settings.verbose > 0) {
6675 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
6676 "Couldn't update event\n");
6677 }
6678 conn_set_state(c, conn_closing);
6679 return TRANSMIT_HARD_ERROR;
6680 }
6681 return TRANSMIT_SOFT_ERROR;
6682 }
6683 /* if res == 0 or res == -1 and error is not EAGAIN or EWOULDBLOCK,
6684 we have a real error, on which we close the connection */
6685 if (settings.verbose > 0) {
6686 if (res == -1) {
6687 log_socket_error(EXTENSION_LOG_WARNING, c,
6688 "Failed to write, and not due to blocking: %s");
6689 } else {
6690 int ii;
6691 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6692 "%d - sendmsg returned 0\n",
6693 c->sfd);
6694 for (ii = 0; ii < m->msg_iovlen; ++ii) {
6695 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6696 "\t%d - %zu\n",
6697 c->sfd, m->msg_iov[ii].iov_len);
6698 }
6699
6700 }
6701 }
6702
6703 conn_set_state(c, conn_closing);
6704 return TRANSMIT_HARD_ERROR;
6705 } else {
6706 if (c->ssl.enabled) {
6707 drain_bio_send_pipe(c);
6708 if (c->ssl.out.total) {
6709 if (!update_event(c, EV_WRITE0x04 | EV_PERSIST0x10)) {
6710 if (settings.verbose > 0) {
6711 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
6712 "Couldn't update event");
6713 }
6714 conn_set_state(c, conn_closing);
6715 return TRANSMIT_HARD_ERROR;
6716 }
6717 return TRANSMIT_SOFT_ERROR;
6718 }
6719 }
6720
6721 return TRANSMIT_COMPLETE;
6722 }
6723}
6724
6725bool_Bool conn_listening(conn *c)
6726{
6727 SOCKETint sfd;
6728 struct sockaddr_storage addr;
6729 socklen_t addrlen = sizeof(addr);
6730 int curr_conns;
6731 int port_conns;
6732 struct listening_port *port_instance;
6733
6734 if ((sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen)) == -1) {
6735#ifdef WIN32
6736 DWORD error = WSAGetLastError();
6737#else
6738 int error = errno(*__error());
6739#endif
6740
6741 if (is_emfile(error)) {
6742 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6743 "Too many open files\n");
6744 disable_listen();
6745 } else if (!is_blocking(error)) {
6746 log_socket_error(EXTENSION_LOG_WARNING, c,
6747 "Failed to accept new client: %s");
6748 }
6749
6750 return false0;
6751 }
6752
6753 STATS_LOCK();
6754 curr_conns = ++stats.curr_conns;
6755 port_instance = get_listening_port_instance(c->parent_port);
6756 cb_assert(port_instance)(__builtin_expect(!(port_instance), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 6756, "port_instance") : (void)0)
;
6757 port_conns = ++port_instance->curr_conns;
6758 STATS_UNLOCK();
6759
6760 if (curr_conns >= settings.maxconns || port_conns >= port_instance->maxconns) {
6761 STATS_LOCK();
6762 ++stats.rejected_conns;
6763 --port_instance->curr_conns;
6764 STATS_UNLOCK();
6765
6766 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
6767 "Too many open connections\n");
6768
6769 safe_close(sfd);
6770 return false0;
6771 }
6772
6773 if (evutil_make_socket_nonblocking(sfd) == -1) {
6774 STATS_LOCK();
6775 --port_instance->curr_conns;
6776 STATS_UNLOCK();
6777 safe_close(sfd);
6778 return false0;
6779 }
6780
6781 dispatch_conn_new(sfd, c->parent_port, conn_new_cmd, EV_READ0x02 | EV_PERSIST0x10,
6782 DATA_BUFFER_SIZE2048);
6783
6784 return false0;
6785}
6786
6787/**
6788 * Ship tap log to the other end. This state differs with all other states
6789 * in the way that it support full duplex dialog. We're listening to both read
6790 * and write events from libevent most of the time. If a read event occurs we
6791 * switch to the conn_read state to read and execute the input message (that would
6792 * be an ack message from the other side). If a write event occurs we continue to
6793 * send tap log to the other end.
6794 * @param c the tap connection to drive
6795 * @return true if we should continue to process work for this connection, false
6796 * if we should start processing events for other connections.
6797 */
6798bool_Bool conn_ship_log(conn *c) {
6799 bool_Bool cont = false0;
6800 short mask = EV_READ0x02 | EV_PERSIST0x10 | EV_WRITE0x04;
6801
6802 if (c->sfd == INVALID_SOCKET-1) {
6803 return false0;
6804 }
6805
6806 if (c->which & EV_READ0x02 || c->rbytes > 0) {
6807 if (c->rbytes > 0) {
6808 if (try_read_command(c) == 0) {
6809 conn_set_state(c, conn_read);
6810 }
6811 } else {
6812 conn_set_state(c, conn_read);
6813 }
6814
6815 /* we're going to process something.. let's proceed */
6816 cont = true1;
6817
6818 /* We have a finite number of messages in the input queue */
6819 /* so let's process all of them instead of backing off after */
6820 /* reading a subset of them. */
6821 /* Why? Because we've got every time we're calling ship_tap_log */
6822 /* we try to send a chunk of items.. This means that if we end */
6823 /* up in a situation where we're receiving a burst of nack messages */
6824 /* we'll only process a subset of messages in our input queue, */
6825 /* and it will slowly grow.. */
6826 c->nevents = settings.reqs_per_tap_event;
6827 } else if (c->which & EV_WRITE0x04) {
6828 --c->nevents;
6829 if (c->nevents >= 0) {
6830 c->ewouldblock = false0;
6831 if (c->upr) {
6832 ship_upr_log(c);
6833 } else {
6834 ship_tap_log(c);
6835 }
6836 if (c->ewouldblock) {
6837 mask = EV_READ0x02 | EV_PERSIST0x10;
6838 } else {
6839 cont = true1;
6840 }
6841 }
6842 }
6843
6844 if (!update_event(c, mask)) {
6845 if (settings.verbose > 0) {
6846 settings.extensions.logger->log(EXTENSION_LOG_INFO,
6847 c, "Couldn't update event\n");
6848 }
6849 conn_set_state(c, conn_closing);
6850 }
6851
6852 return cont;
6853}
6854
6855bool_Bool conn_waiting(conn *c) {
6856 if (!update_event(c, EV_READ0x02 | EV_PERSIST0x10)) {
6857 if (settings.verbose > 0) {
6858 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6859 "Couldn't update event\n");
6860 }
6861 conn_set_state(c, conn_closing);
6862 return true1;
6863 }
6864 conn_set_state(c, conn_read);
6865 return false0;
6866}
6867
6868bool_Bool conn_read(conn *c) {
6869 int res = try_read_network(c);
6870 switch (res) {
6871 case READ_NO_DATA_RECEIVED:
6872 conn_set_state(c, conn_waiting);
6873 break;
6874 case READ_DATA_RECEIVED:
6875 conn_set_state(c, conn_parse_cmd);
6876 break;
6877 case READ_ERROR:
6878 conn_set_state(c, conn_closing);
6879 break;
6880 case READ_MEMORY_ERROR: /* Failed to allocate more memory */
6881 /* State already set by try_read_network */
6882 break;
6883 }
6884
6885 return true1;
6886}
6887
6888bool_Bool conn_parse_cmd(conn *c) {
6889 if (try_read_command(c) == 0) {
6890 /* wee need more data! */
6891 conn_set_state(c, conn_waiting);
6892 }
6893
6894 return !c->ewouldblock;
6895}
6896
6897bool_Bool conn_new_cmd(conn *c) {
6898 /* Only process nreqs at a time to avoid starving other connections */
6899 c->start = 0;
6900 --c->nevents;
6901 if (c->nevents >= 0) {
6902 reset_cmd_handler(c);
6903 } else {
6904 STATS_NOKEY(c, conn_yields){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->conn_yields++
; cb_mutex_exit(&thread_stats->mutex); }
;
6905 if (c->rbytes > 0) {
6906 /* We have already read in data into the input buffer,
6907 so libevent will most likely not signal read events
6908 on the socket (unless more data is available. As a
6909 hack we should just put in a request to write data,
6910 because that should be possible ;-)
6911 */
6912 if (!update_event(c, EV_WRITE0x04 | EV_PERSIST0x10)) {
6913 if (settings.verbose > 0) {
6914 settings.extensions.logger->log(EXTENSION_LOG_INFO,
6915 c, "Couldn't update event\n");
6916 }
6917 conn_set_state(c, conn_closing);
6918 return true1;
6919 }
6920 }
6921 return false0;
6922 }
6923
6924 return true1;
6925}
6926
6927bool_Bool conn_swallow(conn *c) {
6928 ssize_t res;
6929#ifdef WIN32
6930 DWORD error;
6931#else
6932 int error;
6933#endif
6934 /* we are reading sbytes and throwing them away */
6935 if (c->sbytes == 0) {
6936 conn_set_state(c, conn_new_cmd);
6937 return true1;
6938 }
6939
6940 /* first check if we have leftovers in the conn_read buffer */
6941 if (c->rbytes > 0) {
6942 uint32_t tocopy = c->rbytes > c->sbytes ? c->sbytes : c->rbytes;
6943 c->sbytes -= tocopy;
6944 c->rcurr += tocopy;
6945 c->rbytes -= tocopy;
6946 return true1;
6947 }
6948
6949 /* now try reading from the socket */
6950 res = do_data_recv(c, c->rbuf, c->rsize > c->sbytes ? c->sbytes : c->rsize);
6951#ifdef WIN32
6952 error = WSAGetLastError();
6953#else
6954 error = errno(*__error());
6955#endif
6956 if (res > 0) {
6957 STATS_ADD(c, bytes_read, res){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->bytes_read +=
res; cb_mutex_exit(&thread_stats->mutex); }
;
6958 c->sbytes -= res;
6959 return true1;
6960 }
6961 if (res == 0) { /* end of stream */
6962 conn_set_state(c, conn_closing);
6963 return true1;
6964 }
6965 if (res == -1 && is_blocking(error)) {
6966 if (!update_event(c, EV_READ0x02 | EV_PERSIST0x10)) {
6967 if (settings.verbose > 0) {
6968 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
6969 "Couldn't update event\n");
6970 }
6971 conn_set_state(c, conn_closing);
6972 return true1;
6973 }
6974 return false0;
6975 }
6976
6977 /* otherwise we have a real error, on which we close the connection */
6978 if (!is_closed_conn(error)) {
6979 char msg[80];
6980 snprintf(msg, sizeof(msg),__builtin___snprintf_chk (msg, sizeof(msg), 0, __builtin_object_size
(msg, 2 > 1 ? 1 : 0), "%d Failed to read, and not due to blocking (%%s)"
, (int)c->sfd)
6981 "%d Failed to read, and not due to blocking (%%s)",__builtin___snprintf_chk (msg, sizeof(msg), 0, __builtin_object_size
(msg, 2 > 1 ? 1 : 0), "%d Failed to read, and not due to blocking (%%s)"
, (int)c->sfd)
6982 (int)c->sfd)__builtin___snprintf_chk (msg, sizeof(msg), 0, __builtin_object_size
(msg, 2 > 1 ? 1 : 0), "%d Failed to read, and not due to blocking (%%s)"
, (int)c->sfd)
;
6983
6984 log_socket_error(EXTENSION_LOG_INFO, c, msg);
6985 }
6986
6987 conn_set_state(c, conn_closing);
6988
6989 return true1;
6990}
6991
6992bool_Bool conn_nread(conn *c) {
6993 ssize_t res;
6994#ifdef WIN32
6995 DWORD error;
6996#else
6997 int error;
6998#endif
6999
7000 if (c->rlbytes == 0) {
7001 bool_Bool block = c->ewouldblock = false0;
7002 complete_nread(c);
7003 if (c->ewouldblock) {
7004 unregister_event(c);
7005 block = true1;
7006 }
7007 return !block;
7008 }
7009 /* first check if we have leftovers in the conn_read buffer */
7010 if (c->rbytes > 0) {
7011 uint32_t tocopy = c->rbytes > c->rlbytes ? c->rlbytes : c->rbytes;
7012 if (c->ritem != c->rcurr) {
7013 memmove(c->ritem, c->rcurr, tocopy)__builtin___memmove_chk (c->ritem, c->rcurr, tocopy, __builtin_object_size
(c->ritem, 0))
;
7014 }
7015 c->ritem += tocopy;
7016 c->rlbytes -= tocopy;
7017 c->rcurr += tocopy;
7018 c->rbytes -= tocopy;
7019 if (c->rlbytes == 0) {
7020 return true1;
7021 }
7022 }
7023
7024 /* now try reading from the socket */
7025 res = do_data_recv(c, c->ritem, c->rlbytes);
7026#ifdef WIN32
7027 error = WSAGetLastError();
7028#else
7029 error = errno(*__error());
7030#endif
7031 if (res > 0) {
7032 STATS_ADD(c, bytes_read, res){ struct thread_stats *thread_stats = get_thread_stats(c); cb_mutex_enter
(&thread_stats->mutex); thread_stats->bytes_read +=
res; cb_mutex_exit(&thread_stats->mutex); }
;
7033 if (c->rcurr == c->ritem) {
7034 c->rcurr += res;
7035 }
7036 c->ritem += res;
7037 c->rlbytes -= res;
7038 return true1;
7039 }
7040 if (res == 0) { /* end of stream */
7041 conn_set_state(c, conn_closing);
7042 return true1;
7043 }
7044
7045 if (res == -1 && is_blocking(error)) {
7046 if (!update_event(c, EV_READ0x02 | EV_PERSIST0x10)) {
7047 if (settings.verbose > 0) {
7048 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
7049 "Couldn't update event\n");
7050 }
7051 conn_set_state(c, conn_closing);
7052 return true1;
7053 }
7054 return false0;
7055 }
7056
7057 /* otherwise we have a real error, on which we close the connection */
7058 if (!is_closed_conn(error)) {
7059 settings.extensions.logger->log(EXTENSION_LOG_WARNING, c,
7060 "%d Failed to read, and not due to blocking:\n"
7061 "errno: %d %s \n"
7062 "rcurr=%lx ritem=%lx rbuf=%lx rlbytes=%d rsize=%d\n",
7063 c->sfd, errno(*__error()), strerror(errno(*__error())),
7064 (long)c->rcurr, (long)c->ritem, (long)c->rbuf,
7065 (int)c->rlbytes, (int)c->rsize);
7066 }
7067 conn_set_state(c, conn_closing);
7068 return true1;
7069}
7070
7071bool_Bool conn_write(conn *c) {
7072 /*
7073 * We want to write out a simple response. If we haven't already,
7074 * assemble it into a msgbuf list (this will be a single-entry
7075 * list for TCP).
7076 */
7077 if (c->iovused == 0) {
7078 if (add_iov(c, c->wcurr, c->wbytes) != 0) {
7079 if (settings.verbose > 0) {
7080 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
7081 "Couldn't build response\n");
7082 }
7083 conn_set_state(c, conn_closing);
7084 return true1;
7085 }
7086 }
7087
7088 return conn_mwrite(c);
7089}
7090
7091bool_Bool conn_mwrite(conn *c) {
7092 switch (transmit(c)) {
7093 case TRANSMIT_COMPLETE:
7094 if (c->state == conn_mwrite) {
7095 while (c->ileft > 0) {
7096 item *it = *(c->icurr);
7097 settings.engine.v1->release(settings.engine.v0, c, it);
7098 c->icurr++;
7099 c->ileft--;
7100 }
7101 while (c->temp_alloc_left > 0) {
7102 char *temp_alloc_ = *(c->temp_alloc_curr);
7103 free(temp_alloc_);
7104 c->temp_alloc_curr++;
7105 c->temp_alloc_left--;
7106 }
7107 /* XXX: I don't know why this wasn't the general case */
7108 conn_set_state(c, c->write_and_go);
7109 } else if (c->state == conn_write) {
7110 if (c->write_and_free) {
7111 free(c->write_and_free);
7112 c->write_and_free = 0;
7113 }
7114 conn_set_state(c, c->write_and_go);
7115 } else {
7116 if (settings.verbose > 0) {
7117 settings.extensions.logger->log(EXTENSION_LOG_INFO, c,
7118 "Unexpected state %d\n", c->state);
7119 }
7120 conn_set_state(c, conn_closing);
7121 }
7122 break;
7123
7124 case TRANSMIT_INCOMPLETE:
7125 case TRANSMIT_HARD_ERROR:
7126 break; /* Continue in state machine. */
7127
7128 case TRANSMIT_SOFT_ERROR:
7129 return false0;
7130 }
7131
7132 return true1;
7133}
7134
7135bool_Bool conn_pending_close(conn *c) {
7136 cb_assert(c->sfd == INVALID_SOCKET)(__builtin_expect(!(c->sfd == -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7136, "c->sfd == -1") : (void)0)
;
7137 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
7138 "Awaiting clients to release the cookie (pending close for %p)",
7139 (void*)c);
7140 /*
7141 * tell the tap connection that we're disconnecting it now,
7142 * but give it a grace period
7143 */
7144 perform_callbacks(ON_DISCONNECT, NULL((void*)0), c);
7145
7146 if (c->refcount > 1) {
7147 return false0;
7148 }
7149
7150 conn_set_state(c, conn_immediate_close);
7151 return true1;
7152}
7153
7154bool_Bool conn_immediate_close(conn *c) {
7155 struct listening_port *port_instance;
7156 cb_assert(c->sfd == INVALID_SOCKET)(__builtin_expect(!(c->sfd == -1), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7156, "c->sfd == -1") : (void)0)
;
7157 settings.extensions.logger->log(EXTENSION_LOG_DETAIL, c,
7158 "Releasing connection %p",
7159 c);
7160
7161 STATS_LOCK();
7162 port_instance = get_listening_port_instance(c->parent_port);
7163 cb_assert(port_instance)(__builtin_expect(!(port_instance), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7163, "port_instance") : (void)0)
;
7164 --port_instance->curr_conns;
7165 STATS_UNLOCK();
7166
7167 perform_callbacks(ON_DISCONNECT, NULL((void*)0), c);
7168 conn_close(c);
7169
7170 return false0;
7171}
7172
7173bool_Bool conn_closing(conn *c) {
7174 /* We don't want any network notifications anymore.. */
7175 unregister_event(c);
7176 safe_close(c->sfd);
7177 c->sfd = INVALID_SOCKET-1;
7178
7179 if (c->refcount > 1 || c->ewouldblock) {
7180 conn_set_state(c, conn_pending_close);
7181 } else {
7182 conn_set_state(c, conn_immediate_close);
7183 }
7184 return true1;
7185}
7186
7187bool_Bool conn_setup_tap_stream(conn *c) {
7188 process_bin_tap_connect(c);
7189 return true1;
7190}
7191
7192bool_Bool conn_refresh_cbsasl(conn *c) {
7193 ENGINE_ERROR_CODE ret = c->aiostat;
7194 c->aiostat = ENGINE_SUCCESS;
7195 c->ewouldblock = false0;
7196
7197 cb_assert(ret != ENGINE_EWOULDBLOCK)(__builtin_expect(!(ret != ENGINE_EWOULDBLOCK), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7197, "ret != ENGINE_EWOULDBLOCK") : (void)0)
;
7198
7199 switch (ret) {
7200 case ENGINE_SUCCESS:
7201 write_bin_response(c, NULL((void*)0), 0, 0, 0);
7202 break;
7203 case ENGINE_DISCONNECT:
7204 conn_set_state(c, conn_closing);
7205 break;
7206 default:
7207 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
7208 }
7209
7210 return true1;
7211}
7212
7213bool_Bool conn_refresh_ssl_certs(conn *c) {
7214 ENGINE_ERROR_CODE ret = c->aiostat;
7215 c->aiostat = ENGINE_SUCCESS;
7216 c->ewouldblock = false0;
7217
7218 cb_assert(ret != ENGINE_EWOULDBLOCK)(__builtin_expect(!(ret != ENGINE_EWOULDBLOCK), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7218, "ret != ENGINE_EWOULDBLOCK") : (void)0)
;
7219
7220 switch (ret) {
7221 case ENGINE_SUCCESS:
7222 write_bin_response(c, NULL((void*)0), 0, 0, 0);
7223 break;
7224 case ENGINE_DISCONNECT:
7225 conn_set_state(c, conn_closing);
7226 break;
7227 default:
7228 write_bin_packet(c, engine_error_2_protocol_error(ret), 0);
7229 }
7230
7231 return true1;
7232}
7233
7234void event_handler(evutil_socket_tint fd, short which, void *arg) {
7235 conn *c = arg;
7236 LIBEVENT_THREAD *thr;
7237
7238 cb_assert(c != NULL)(__builtin_expect(!(c != ((void*)0)), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7238, "c != ((void*)0)") : (void)0)
;
7239
7240 if (memcached_shutdown) {
7241 event_base_loopbreak(c->event.ev_base);
7242 return ;
7243 }
7244
7245 thr = c->thread;
7246 if (!is_listen_thread()) {
7247 cb_assert(thr)(__builtin_expect(!(thr), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7247, "thr") : (void)0)
;
7248 LOCK_THREAD(thr)cb_mutex_enter(&thr->mutex); (__builtin_expect(!(thr->
is_locked == 0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7248, "thr->is_locked == 0") : (void)0); thr->is_locked
= 1;
;
7249 /*
7250 * Remove the list from the list of pending io's (in case the
7251 * object was scheduled to run in the dispatcher before the
7252 * callback for the worker thread is executed.
7253 */
7254 c->thread->pending_io = list_remove(c->thread->pending_io, c);
7255 }
7256
7257 c->which = which;
7258
7259 /* sanity */
7260 cb_assert(fd == c->sfd)(__builtin_expect(!(fd == c->sfd), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7260, "fd == c->sfd") : (void)0)
;
7261 perform_callbacks(ON_SWITCH_CONN, c, c);
7262
7263
7264 c->nevents = settings.reqs_per_event;
7265 if (c->state == conn_ship_log) {
7266 c->nevents = settings.reqs_per_tap_event;
7267 }
7268
7269 do {
7270 if (settings.verbose) {
7271 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, c,
7272 "%d - Running task: (%s)\n",
7273 c->sfd, state_text(c->state));
7274 }
7275 } while (c->state(c));
7276
7277 if (thr) {
7278 UNLOCK_THREAD(thr)(__builtin_expect(!(thr->is_locked == 1), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7278, "thr->is_locked == 1") : (void)0); thr->is_locked
= 0; cb_mutex_exit(&thr->mutex);
;
7279 }
7280}
7281
7282static void dispatch_event_handler(evutil_socket_tint fd, short which, void *arg) {
7283 char buffer[80];
7284 ssize_t nr = recv(fd, buffer, sizeof(buffer), 0);
7285
7286 if (nr != -1 && is_listen_disabled()) {
7287 bool_Bool enable = false0;
7288 cb_mutex_enter(&listen_state.mutex);
7289 listen_state.count -= nr;
7290 if (listen_state.count <= 0) {
7291 enable = true1;
7292 listen_state.disabled = false0;
7293 }
7294 cb_mutex_exit(&listen_state.mutex);
7295 if (enable) {
7296 conn *next;
7297 for (next = listen_conn; next; next = next->next) {
7298 int backlog = 1024;
7299 int ii;
7300 update_event(next, EV_READ0x02 | EV_PERSIST0x10);
7301 for (ii = 0; ii < settings.num_interfaces; ++ii) {
7302 if (next->parent_port == settings.interfaces[ii].port) {
7303 backlog = settings.interfaces[ii].backlog;
7304 break;
7305 }
7306 }
7307
7308 if (listen(next->sfd, backlog) != 0) {
7309 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7310 "listen() failed",
7311 strerror(errno(*__error())));
7312 }
7313 }
7314 }
7315 }
7316}
7317
7318/*
7319 * Sets a socket's send buffer size to the maximum allowed by the system.
7320 */
7321static void maximize_sndbuf(const SOCKETint sfd) {
7322 socklen_t intsize = sizeof(int);
7323 int last_good = 0;
7324 int min, max, avg;
7325 int old_size;
7326
7327 /* Start with the default size. */
7328 if (getsockopt(sfd, SOL_SOCKET0xffff, SO_SNDBUF0x1001, (void *)&old_size, &intsize) != 0) {
7329 if (settings.verbose > 0) {
7330 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7331 "getsockopt(SO_SNDBUF): %s",
7332 strerror(errno(*__error())));
7333 }
7334
7335 return;
7336 }
7337
7338 /* Binary-search for the real maximum. */
7339 min = old_size;
7340 max = MAX_SENDBUF_SIZE(256 * 1024 * 1024);
7341
7342 while (min <= max) {
7343 avg = ((unsigned int)(min + max)) / 2;
7344 if (setsockopt(sfd, SOL_SOCKET0xffff, SO_SNDBUF0x1001, (void *)&avg, intsize) == 0) {
7345 last_good = avg;
7346 min = avg + 1;
7347 } else {
7348 max = avg - 1;
7349 }
7350 }
7351
7352 if (settings.verbose > 1) {
7353 settings.extensions.logger->log(EXTENSION_LOG_DEBUG, NULL((void*)0),
7354 "<%d send buffer was %d, now %d\n", sfd, old_size, last_good);
7355 }
7356}
7357
7358static SOCKETint new_socket(struct addrinfo *ai) {
7359 SOCKETint sfd;
7360
7361 sfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
7362 if (sfd == INVALID_SOCKET-1) {
7363 return INVALID_SOCKET-1;
7364 }
7365
7366 if (evutil_make_socket_nonblocking(sfd) == -1) {
7367 safe_close(sfd);
7368 return INVALID_SOCKET-1;
7369 }
7370
7371 maximize_sndbuf(sfd);
7372
7373 return sfd;
7374}
7375
7376/**
7377 * Create a socket and bind it to a specific port number
7378 * @param interface the interface to bind to
7379 * @param port the port number to bind to
7380 * @param portnumber_file A filepointer to write the port numbers to
7381 * when they are successfully added to the list of ports we
7382 * listen on.
7383 */
7384static int server_socket(struct interface *interf, FILE *portnumber_file) {
7385 SOCKETint sfd;
7386 struct linger ling = {0, 0};
7387 struct addrinfo *ai;
7388 struct addrinfo *next;
7389 struct addrinfo hints;
7390 char port_buf[NI_MAXSERV32];
7391 int error;
7392 int success = 0;
7393 int flags =1;
7394 char *host = NULL((void*)0);
7395
7396 memset(&hints, 0, sizeof(hints))__builtin___memset_chk (&hints, 0, sizeof(hints), __builtin_object_size
(&hints, 0))
;
7397 hints.ai_flags = AI_PASSIVE0x00000001;
7398 hints.ai_protocol = IPPROTO_TCP6;
7399 hints.ai_socktype = SOCK_STREAM1;
7400
7401 if (interf->ipv4 && interf->ipv6) {
7402 hints.ai_family = AF_UNSPEC0;
7403 } else if (interf->ipv4) {
7404 hints.ai_family = AF_INET2;
7405 } else if (interf->ipv6) {
7406 hints.ai_family = AF_INET630;
7407 }
7408
7409 snprintf(port_buf, sizeof(port_buf), "%u", (unsigned int)interf->port)__builtin___snprintf_chk (port_buf, sizeof(port_buf), 0, __builtin_object_size
(port_buf, 2 > 1 ? 1 : 0), "%u", (unsigned int)interf->
port)
;
7410
7411 if (interf->host) {
7412 if (strlen(interf->host) > 0 && strcmp(interf->host, "*") != 0) {
7413 host = interf->host;
7414 }
7415 }
7416 error = getaddrinfo(host, port_buf, &hints, &ai);
7417 if (error != 0) {
7418#ifdef WIN32
7419 log_errcode_error(EXTENSION_LOG_WARNING, NULL((void*)0),
7420 "getaddrinfo(): %s", error);
7421#else
7422 if (error != EAI_SYSTEM11) {
7423 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7424 "getaddrinfo(): %s", gai_strerror(error));
7425 } else {
7426 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7427 "getaddrinfo(): %s", strerror(error));
7428 }
7429#endif
7430 return 1;
7431 }
7432
7433 for (next= ai; next; next= next->ai_next) {
7434 struct listening_port *port_instance;
7435 conn *listen_conn_add;
7436 if ((sfd = new_socket(next)) == INVALID_SOCKET-1) {
7437 /* getaddrinfo can return "junk" addresses,
7438 * we make sure at least one works before erroring.
7439 */
7440 continue;
7441 }
7442
7443#ifdef IPV6_V6ONLY27
7444 if (next->ai_family == AF_INET630) {
7445 error = setsockopt(sfd, IPPROTO_IPV641, IPV6_V6ONLY27, (char *) &flags, sizeof(flags));
7446 if (error != 0) {
7447 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7448 "setsockopt(IPV6_V6ONLY): %s",
7449 strerror(errno(*__error())));
7450 safe_close(sfd);
7451 continue;
7452 }
7453 }
7454#endif
7455
7456 setsockopt(sfd, SOL_SOCKET0xffff, SO_REUSEADDR0x0004, (void *)&flags, sizeof(flags));
7457 error = setsockopt(sfd, SOL_SOCKET0xffff, SO_KEEPALIVE0x0008, (void *)&flags, sizeof(flags));
7458 if (error != 0) {
7459 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7460 "setsockopt(SO_KEEPALIVE): %s",
7461 strerror(errno(*__error())));
7462 }
7463
7464 error = setsockopt(sfd, SOL_SOCKET0xffff, SO_LINGER0x0080, (void *)&ling, sizeof(ling));
7465 if (error != 0) {
7466 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7467 "setsockopt(SO_LINGER): %s",
7468 strerror(errno(*__error())));
7469 }
7470
7471 if (interf->tcp_nodelay) {
7472 error = setsockopt(sfd, IPPROTO_TCP6,
7473 TCP_NODELAY0x01, (void *)&flags, sizeof(flags));
7474 if (error != 0) {
7475 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7476 "setsockopt(TCP_NODELAY): %s",
7477 strerror(errno(*__error())));
7478 }
7479 }
7480
7481 if (bind(sfd, next->ai_addr, (socklen_t)next->ai_addrlen) == SOCKET_ERROR-1) {
7482#ifdef WIN32
7483 DWORD error = WSAGetLastError();
7484#else
7485 int error = errno(*__error());
7486#endif
7487 if (!is_addrinuse(error)) {
7488 log_errcode_error(EXTENSION_LOG_WARNING, NULL((void*)0),
7489 "bind(): %s", error);
7490 safe_close(sfd);
7491 freeaddrinfo(ai);
7492 return 1;
7493 }
7494 safe_close(sfd);
7495 continue;
7496 } else {
7497 success++;
7498 if (listen(sfd, interf->backlog) == SOCKET_ERROR-1) {
7499 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7500 "listen(): %s",
7501 strerror(errno(*__error())));
7502 safe_close(sfd);
7503 freeaddrinfo(ai);
7504 return 1;
7505 }
7506 if (portnumber_file != NULL((void*)0) &&
7507 (next->ai_addr->sa_family == AF_INET2 ||
7508 next->ai_addr->sa_family == AF_INET630)) {
7509 union {
7510 struct sockaddr_in in;
7511 struct sockaddr_in6 in6;
7512 } my_sockaddr;
7513 socklen_t len = sizeof(my_sockaddr);
7514 if (getsockname(sfd, (struct sockaddr*)&my_sockaddr, &len)==0) {
7515 if (next->ai_addr->sa_family == AF_INET2) {
7516 fprintf(portnumber_file, "%s INET: %u\n", "TCP",
7517 ntohs(my_sockaddr.in.sin_port)((__uint16_t)(__builtin_constant_p(my_sockaddr.in.sin_port) ?
((__uint16_t)((((__uint16_t)(my_sockaddr.in.sin_port) & 0xff00
) >> 8) | (((__uint16_t)(my_sockaddr.in.sin_port) &
0x00ff) << 8))) : _OSSwapInt16(my_sockaddr.in.sin_port
)))
);
7518 } else {
7519 fprintf(portnumber_file, "%s INET6: %u\n", "TCP",
7520 ntohs(my_sockaddr.in6.sin6_port)((__uint16_t)(__builtin_constant_p(my_sockaddr.in6.sin6_port)
? ((__uint16_t)((((__uint16_t)(my_sockaddr.in6.sin6_port) &
0xff00) >> 8) | (((__uint16_t)(my_sockaddr.in6.sin6_port
) & 0x00ff) << 8))) : _OSSwapInt16(my_sockaddr.in6.
sin6_port)))
);
7521 }
7522 }
7523 }
7524 }
7525
7526 if (!(listen_conn_add = conn_new(sfd, interf->port, conn_listening,
7527 EV_READ0x02 | EV_PERSIST0x10, 1,
7528 main_base, NULL((void*)0)))) {
7529 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7530 "failed to create listening connection\n");
7531 exit(EXIT_FAILURE1);
7532 }
7533 listen_conn_add->next = listen_conn;
7534 listen_conn = listen_conn_add;
7535 STATS_LOCK();
7536 ++stats.curr_conns;
7537 ++stats.daemon_conns;
7538 port_instance = get_listening_port_instance(interf->port);
7539 cb_assert(port_instance)(__builtin_expect(!(port_instance), 0) ? __assert_rtn(__func__
, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7539, "port_instance") : (void)0)
;
7540 ++port_instance->curr_conns;
7541 STATS_UNLOCK();
7542 }
7543
7544 freeaddrinfo(ai);
7545
7546 /* Return zero iff we detected no errors in starting up connections */
7547 return success == 0;
7548}
7549
7550static int server_sockets(FILE *portnumber_file) {
7551 int ret = 0;
7552 int ii = 0;
7553
7554 for (ii = 0; ii < settings.num_interfaces; ++ii) {
7555 stats.listening_ports[ii].port = settings.interfaces[ii].port;
7556 stats.listening_ports[ii].maxconns = settings.interfaces[ii].maxconn;
7557 ret |= server_socket(settings.interfaces + ii, portnumber_file);
7558 }
7559
7560 return ret;
7561}
7562
7563static struct event clockevent;
7564
7565/* time-sensitive callers can call it by hand with this, outside the normal ever-1-second timer */
7566static void set_current_time(void) {
7567 struct timeval timer;
7568
7569 gettimeofday(&timer, NULL((void*)0));
7570 current_time = (rel_time_t) (timer.tv_sec - process_started);
7571}
7572
7573static void clock_handler(evutil_socket_tint fd, short which, void *arg) {
7574 struct timeval t;
7575 static bool_Bool initialized = false0;
7576
7577 t.tv_sec = 1;
7578 t.tv_usec = 0;
7579
7580 if (memcached_shutdown) {
7581 event_base_loopbreak(main_base);
7582 return ;
7583 }
7584
7585 if (initialized) {
7586 /* only delete the event if it's actually there. */
7587 evtimer_del(&clockevent)event_del(&clockevent);
7588 } else {
7589 initialized = true1;
7590 }
7591
7592 evtimer_set(&clockevent, clock_handler, 0)event_set((&clockevent), -1, 0, (clock_handler), (0));
7593 event_base_set(main_base, &clockevent);
7594 evtimer_add(&clockevent, &t)event_add((&clockevent), (&t));
7595
7596 set_current_time();
7597}
7598
7599static void usage(void) {
7600 printf("memcached %s\n", get_server_version());
7601 printf("-C file Read configuration from file\n");
7602 printf("-h print this help and exit\n");
7603 printf("\nEnvironment variables:\n");
7604 printf("MEMCACHED_PORT_FILENAME File to write port information to\n");
7605 printf("MEMCACHED_REQS_TAP_EVENT Similar to -R but for tap_ship_log\n");
7606}
7607
7608#ifndef WIN32
7609static void save_pid(const char *pid_file) {
7610 FILE *fp;
7611
7612 if (access(pid_file, F_OK0) == 0) {
7613 if ((fp = fopen(pid_file, "r")) != NULL((void*)0)) {
7614 char buffer[1024];
7615 if (fgets(buffer, sizeof(buffer), fp) != NULL((void*)0)) {
7616 unsigned int pid;
7617 if (safe_strtoul(buffer, &pid) && kill((pid_t)pid, 0) == 0) {
7618 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7619 "WARNING: The pid file contained the following (running) pid: %u\n", pid);
7620 }
7621 }
7622 fclose(fp);
7623 }
7624 }
7625
7626 if ((fp = fopen(pid_file, "w")) == NULL((void*)0)) {
7627 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7628 "Could not open the pid file %s for writing: %s\n",
7629 pid_file, strerror(errno(*__error())));
7630 return;
7631 }
7632
7633 fprintf(fp,"%ld\n", (long)getpid());
7634 if (fclose(fp) == -1) {
7635 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7636 "Could not close the pid file %s: %s\n",
7637 pid_file, strerror(errno(*__error())));
7638 }
7639}
7640
7641static void remove_pidfile(const char *pid_file) {
7642 if (pid_file != NULL((void*)0)) {
7643 if (unlink(pid_file) != 0) {
7644 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
7645 "Could not remove the pid file %s: %s\n",
7646 pid_file, strerror(errno(*__error())));
7647 }
7648 }
7649}
7650#endif
7651
7652#ifndef WIN32
7653
7654#ifndef HAVE_SIGIGNORE1
7655static int sigignore(int sig) {
7656 struct sigaction sa;
7657 memset(&sa, 0, sizeof(sa))__builtin___memset_chk (&sa, 0, sizeof(sa), __builtin_object_size
(&sa, 0))
;
7658 sa.sa_handler__sigaction_u.__sa_handler = SIG_IGN(void (*)(int))1;
7659
7660 if (sigemptyset(&sa.sa_mask)(*(&sa.sa_mask) = 0, 0) == -1 || sigaction(sig, &sa, 0) == -1) {
7661 return -1;
7662 }
7663 return 0;
7664}
7665#endif /* !HAVE_SIGIGNORE */
7666
7667static void sigterm_handler(int sig) {
7668 cb_assert(sig == SIGTERM || sig == SIGINT)(__builtin_expect(!(sig == 15 || sig == 2), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7668, "sig == 15 || sig == 2") : (void)0)
;
7669 memcached_shutdown = 1;
7670}
7671#endif
7672
7673static int install_sigterm_handler(void) {
7674#ifndef WIN32
7675 struct sigaction sa;
7676 memset(&sa, 0, sizeof(sa))__builtin___memset_chk (&sa, 0, sizeof(sa), __builtin_object_size
(&sa, 0))
;
7677 sa.sa_handler__sigaction_u.__sa_handler = sigterm_handler;
7678
7679 if (sigemptyset(&sa.sa_mask)(*(&sa.sa_mask) = 0, 0) == -1 || sigaction(SIGTERM15, &sa, 0) == -1 ||
7680 sigaction(SIGINT2, &sa, 0) == -1) {
7681 return -1;
7682 }
7683#endif
7684
7685 return 0;
7686}
7687
7688static const char* get_server_version(void) {
7689 if (strlen(PRODUCT_VERSION"") == 0) {
7690 return "unknown";
7691 } else {
7692 return PRODUCT_VERSION"";
7693 }
7694}
7695
7696static void store_engine_specific(const void *cookie,
7697 void *engine_data) {
7698 conn *c = (conn*)cookie;
7699 c->engine_storage = engine_data;
7700}
7701
7702static void *get_engine_specific(const void *cookie) {
7703 conn *c = (conn*)cookie;
7704 return c->engine_storage;
7705}
7706
7707static bool_Bool is_datatype_supported(const void *cookie) {
7708 conn *c = (conn*)cookie;
7709 return c->supports_datatype;
7710}
7711
7712static uint8_t get_opcode_if_ewouldblock_set(const void *cookie) {
7713 conn *c = (conn*)cookie;
7714 uint8_t opcode = PROTOCOL_BINARY_CMD_INVALID;
7715 if (c->ewouldblock) {
7716 opcode = c->binary_header.request.opcode;
7717 }
7718 return opcode;
7719}
7720
7721static bool_Bool validate_session_cas(const uint64_t cas) {
7722 bool_Bool ret = true1;
7723 cb_mutex_enter(&(session_cas.mutex));
7724 if (cas != 0) {
7725 if (session_cas.value != cas) {
7726 ret = false0;
7727 } else {
7728 session_cas.ctr++;
7729 }
7730 } else {
7731 session_cas.ctr++;
7732 }
7733 cb_mutex_exit(&(session_cas.mutex));
7734 return ret;
7735}
7736
7737static void decrement_session_ctr() {
7738 cb_mutex_enter(&(session_cas.mutex));
7739 cb_assert(session_cas.ctr != 0)(__builtin_expect(!(session_cas.ctr != 0), 0) ? __assert_rtn(
__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7739, "session_cas.ctr != 0") : (void)0)
;
7740 session_cas.ctr--;
7741 cb_mutex_exit(&(session_cas.mutex));
7742}
7743
7744static SOCKETint get_socket_fd(const void *cookie) {
7745 conn *c = (conn *)cookie;
7746 return c->sfd;
7747}
7748
7749static ENGINE_ERROR_CODE reserve_cookie(const void *cookie) {
7750 conn *c = (conn *)cookie;
7751 ++c->refcount;
7752 return ENGINE_SUCCESS;
7753}
7754
7755static ENGINE_ERROR_CODE release_cookie(const void *cookie) {
7756 conn *c = (conn *)cookie;
7757 int notify;
7758 LIBEVENT_THREAD *thr;
7759
7760 cb_assert(c)(__builtin_expect(!(c), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7760, "c") : (void)0)
;
7761 thr = c->thread;
7762 cb_assert(thr)(__builtin_expect(!(thr), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7762, "thr") : (void)0)
;
7763 LOCK_THREAD(thr)cb_mutex_enter(&thr->mutex); (__builtin_expect(!(thr->
is_locked == 0), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7763, "thr->is_locked == 0") : (void)0); thr->is_locked
= 1;
;
7764 --c->refcount;
7765
7766 /* Releasing the refererence to the object may cause it to change
7767 * state. (NOTE: the release call shall never be called from the
7768 * worker threads), so should put the connection in the pool of
7769 * pending IO and have the system retry the operation for the
7770 * connection
7771 */
7772 notify = add_conn_to_pending_io_list(c);
7773 UNLOCK_THREAD(thr)(__builtin_expect(!(thr->is_locked == 1), 0) ? __assert_rtn
(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7773, "thr->is_locked == 1") : (void)0); thr->is_locked
= 0; cb_mutex_exit(&thr->mutex);
;
7774
7775 /* kick the thread in the butt */
7776 if (notify) {
7777 notify_thread(thr);
7778 }
7779
7780 return ENGINE_SUCCESS;
7781}
7782
7783static int num_independent_stats(void) {
7784 return settings.num_threads + 1;
7785}
7786
7787static void *new_independent_stats(void) {
7788 int nrecords = num_independent_stats();
7789 struct thread_stats *ts = calloc(nrecords, sizeof(struct thread_stats));
7790 int ii;
7791 for (ii = 0; ii < nrecords; ii++) {
7792 cb_mutex_initialize(&ts[ii].mutex);
7793 }
7794 return ts;
7795}
7796
7797static void release_independent_stats(void *stats) {
7798 int nrecords = num_independent_stats();
7799 struct thread_stats *ts = stats;
7800 int ii;
7801 for (ii = 0; ii < nrecords; ii++) {
7802 cb_mutex_destroy(&ts[ii].mutex);
7803 }
7804 free(ts);
7805}
7806
7807static struct thread_stats* get_independent_stats(conn *c) {
7808 struct thread_stats *independent_stats;
7809 if (settings.engine.v1->get_stats_struct != NULL((void*)0)) {
7810 independent_stats = settings.engine.v1->get_stats_struct(settings.engine.v0, (const void *)c);
7811 if (independent_stats == NULL((void*)0)) {
7812 independent_stats = default_independent_stats;
7813 }
7814 } else {
7815 independent_stats = default_independent_stats;
7816 }
7817 return independent_stats;
7818}
7819
7820static struct thread_stats *get_thread_stats(conn *c) {
7821 struct thread_stats *independent_stats;
7822 cb_assert(c->thread->index < num_independent_stats())(__builtin_expect(!(c->thread->index < num_independent_stats
()), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7822, "c->thread->index < num_independent_stats()"
) : (void)0)
;
7823 independent_stats = get_independent_stats(c);
7824 return &independent_stats[c->thread->index];
7825}
7826
7827static void register_callback(ENGINE_HANDLE *eh,
7828 ENGINE_EVENT_TYPE type,
7829 EVENT_CALLBACK cb, const void *cb_data) {
7830 struct engine_event_handler *h =
7831 calloc(sizeof(struct engine_event_handler), 1);
7832
7833 cb_assert(h)(__builtin_expect(!(h), 0) ? __assert_rtn(__func__, "/Users/dave/repos/couchbase/server/source/memcached/daemon/memcached.c"
, 7833, "h") : (void)0)
;
7834 h->cb = cb;
7835 h->cb_data = cb_data;
7836 h->next = engine_event_handlers[type];
7837 engine_event_handlers[type] = h;
7838}
7839
7840static rel_time_t get_current_time(void)
7841{
7842 return current_time;
7843}
7844
7845static void count_eviction(const void *cookie, const void *key, const int nkey) {
7846 (void)cookie;
7847 (void)key;
7848 (void)nkey;
7849}
7850
7851/**
7852 * To make it easy for engine implementors that doesn't want to care about
7853 * writing their own incr/decr code, they can just set the arithmetic function
7854 * to NULL and use this implementation. It is not efficient, due to the fact
7855 * that it does multiple calls through the interface (get and then cas store).
7856 * If you don't care, feel free to use it..
7857 */
7858static ENGINE_ERROR_CODE internal_arithmetic(ENGINE_HANDLE* handle,
7859 const void* cookie,
7860 const void* key,
7861 const int nkey,
7862 const bool_Bool increment,
7863 const bool_Bool create,
7864 const uint64_t delta,
7865 const uint64_t initial,
7866 const rel_time_t exptime,
7867 uint64_t *cas,
7868 uint8_t datatype,
7869 uint64_t *result,
7870 uint16_t vbucket)
7871{
7872 ENGINE_HANDLE_V1 *e = (ENGINE_HANDLE_V1*)handle;
7873 item *it = NULL((void*)0);
7874 ENGINE_ERROR_CODE ret;
7875
7876 ret = e->get(handle, cookie, &it, key, nkey, vbucket);
7877
7878 if (ret == ENGINE_SUCCESS) {
7879 size_t nb;
7880 item *nit;
7881 char value[80];
7882 uint64_t val;
7883 item_info_holder info;
7884 item_info_holder i2;
7885 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
7886 memset(&i2, 0, sizeof(i2))__builtin___memset_chk (&i2, 0, sizeof(i2), __builtin_object_size
(&i2, 0))
;
7887
7888 info.info.nvalue = 1;
7889
7890 if (!e->get_item_info(handle, cookie, it, (void*)&info)) {
7891 e->release(handle, cookie, it);
7892 return ENGINE_FAILED;
7893 }
7894
7895 if (info.info.value[0].iov_len > (sizeof(value) - 1)) {
7896 e->release(handle, cookie, it);
7897 return ENGINE_EINVAL;
7898 }
7899
7900 memcpy(value, info.info.value[0].iov_base, info.info.value[0].iov_len)__builtin___memcpy_chk (value, info.info.value[0].iov_base, info
.info.value[0].iov_len, __builtin_object_size (value, 0))
;
7901 value[info.info.value[0].iov_len] = '\0';
7902
7903 if (!safe_strtoull(value, &val)) {
7904 e->release(handle, cookie, it);
7905 return ENGINE_EINVAL;
7906 }
7907
7908 if (increment) {
7909 val += delta;
7910 } else {
7911 if (delta > val) {
7912 val = 0;
7913 } else {
7914 val -= delta;
7915 }
7916 }
7917
7918 nb = snprintf(value, sizeof(value), "%"PRIu64, val)__builtin___snprintf_chk (value, sizeof(value), 0, __builtin_object_size
(value, 2 > 1 ? 1 : 0), "%""ll" "u", val)
;
7919 *result = val;
7920 nit = NULL((void*)0);
7921 if (e->allocate(handle, cookie, &nit, key,
7922 nkey, nb, info.info.flags, info.info.exptime,
7923 datatype) != ENGINE_SUCCESS) {
7924 e->release(handle, cookie, it);
7925 return ENGINE_ENOMEM;
7926 }
7927
7928 i2.info.nvalue = 1;
7929 if (!e->get_item_info(handle, cookie, nit, (void*)&i2)) {
7930 e->release(handle, cookie, it);
7931 e->release(handle, cookie, nit);
7932 return ENGINE_FAILED;
7933 }
7934
7935 memcpy(i2.info.value[0].iov_base, value, nb)__builtin___memcpy_chk (i2.info.value[0].iov_base, value, nb,
__builtin_object_size (i2.info.value[0].iov_base, 0))
;
7936 e->item_set_cas(handle, cookie, nit, info.info.cas);
7937 ret = e->store(handle, cookie, nit, cas, OPERATION_CAS, vbucket);
7938 e->release(handle, cookie, it);
7939 e->release(handle, cookie, nit);
7940 } else if (ret == ENGINE_KEY_ENOENT && create) {
7941 char value[80];
7942 size_t nb = snprintf(value, sizeof(value), "%"PRIu64"\r\n", initial)__builtin___snprintf_chk (value, sizeof(value), 0, __builtin_object_size
(value, 2 > 1 ? 1 : 0), "%""ll" "u""\r\n", initial)
;
7943 item_info_holder info;
7944 memset(&info, 0, sizeof(info))__builtin___memset_chk (&info, 0, sizeof(info), __builtin_object_size
(&info, 0))
;
7945 info.info.nvalue = 1;
7946
7947 *result = initial;
7948 if (e->allocate(handle, cookie, &it, key, nkey, nb, 0, exptime,
7949 datatype) != ENGINE_SUCCESS) {
7950 e->release(handle, cookie, it);
7951 return ENGINE_ENOMEM;
7952 }
7953
7954 if (!e->get_item_info(handle, cookie, it, (void*)&info)) {
7955 e->release(handle, cookie, it);
7956 return ENGINE_FAILED;
7957 }
7958
7959 memcpy(info.info.value[0].iov_base, value, nb)__builtin___memcpy_chk (info.info.value[0].iov_base, value, nb
, __builtin_object_size (info.info.value[0].iov_base, 0))
;
7960 ret = e->store(handle, cookie, it, cas, OPERATION_CAS, vbucket);
7961 e->release(handle, cookie, it);
7962 }
7963
7964 /* We had a race condition.. just call ourself recursively to retry */
7965 if (ret == ENGINE_KEY_EEXISTS) {
7966 return internal_arithmetic(handle, cookie, key, nkey, increment, create, delta,
7967 initial, exptime, cas, datatype, result, vbucket);
7968 }
7969
7970 return ret;
7971}
7972
7973/**
7974 * Register an extension if it's not already registered
7975 *
7976 * @param type the type of the extension to register
7977 * @param extension the extension to register
7978 * @return true if success, false otherwise
7979 */
7980static bool_Bool register_extension(extension_type_t type, void *extension)
7981{
7982 if (extension == NULL((void*)0)) {
7983 return false0;
7984 }
7985
7986 switch (type) {
7987 case EXTENSION_DAEMON:
7988 {
7989 EXTENSION_DAEMON_DESCRIPTOR *ptr;
7990 for (ptr = settings.extensions.daemons; ptr != NULL((void*)0); ptr = ptr->next) {
7991 if (ptr == extension) {
7992 return false0;
7993 }
7994 }
7995 ((EXTENSION_DAEMON_DESCRIPTOR *)(extension))->next = settings.extensions.daemons;
7996 settings.extensions.daemons = extension;
7997 }
7998 return true1;
7999 case EXTENSION_LOGGER:
8000 settings.extensions.logger = extension;
8001 return true1;
8002
8003 case EXTENSION_BINARY_PROTOCOL:
8004 if (settings.extensions.binary != NULL((void*)0)) {
8005 EXTENSION_BINARY_PROTOCOL_DESCRIPTOR *last;
8006 for (last = settings.extensions.binary; last->next != NULL((void*)0);
8007 last = last->next) {
8008 if (last == extension) {
8009 return false0;
8010 }
8011 }
8012 if (last == extension) {
8013 return false0;
8014 }
8015 last->next = extension;
8016 last->next->next = NULL((void*)0);
8017 } else {
8018 settings.extensions.binary = extension;
8019 settings.extensions.binary->next = NULL((void*)0);
8020 }
8021
8022 ((EXTENSION_BINARY_PROTOCOL_DESCRIPTOR*)extension)->setup(setup_binary_lookup_cmd);
8023 return true1;
8024
8025 default:
8026 return false0;
8027 }
8028}
8029
8030/**
8031 * Unregister an extension
8032 *
8033 * @param type the type of the extension to remove
8034 * @param extension the extension to remove
8035 */
8036static void unregister_extension(extension_type_t type, void *extension)
8037{
8038 switch (type) {
8039 case EXTENSION_DAEMON:
8040 {
8041 EXTENSION_DAEMON_DESCRIPTOR *prev = NULL((void*)0);
8042 EXTENSION_DAEMON_DESCRIPTOR *ptr = settings.extensions.daemons;
8043
8044 while (ptr != NULL((void*)0) && ptr != extension) {
8045 prev = ptr;
8046 ptr = ptr->next;
8047 }
8048
8049 if (ptr != NULL((void*)0) && prev != NULL((void*)0)) {
8050 prev->next = ptr->next;
8051 }
8052
8053 if (ptr != NULL((void*)0) && settings.extensions.daemons == ptr) {
8054 settings.extensions.daemons = ptr->next;
8055 }
8056 }
8057 break;
8058 case EXTENSION_LOGGER:
8059 if (settings.extensions.logger == extension) {
8060 if (get_stderr_logger() == extension) {
8061 settings.extensions.logger = get_null_logger();
8062 } else {
8063 settings.extensions.logger = get_stderr_logger();
8064 }
8065 }
8066 break;
8067 case EXTENSION_BINARY_PROTOCOL:
8068 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8069 "You can't unregister a binary command handler!");
8070 abort();
8071 break;
8072
8073 default:
8074 ;
8075 }
8076
8077}
8078
8079/**
8080 * Get the named extension
8081 */
8082static void* get_extension(extension_type_t type)
8083{
8084 switch (type) {
8085 case EXTENSION_DAEMON:
8086 return settings.extensions.daemons;
8087
8088 case EXTENSION_LOGGER:
8089 return settings.extensions.logger;
8090
8091 case EXTENSION_BINARY_PROTOCOL:
8092 return settings.extensions.binary;
8093
8094 default:
8095 return NULL((void*)0);
8096 }
8097}
8098
8099static void shutdown_server(void) {
8100 memcached_shutdown = 1;
8101}
8102
8103static EXTENSION_LOGGER_DESCRIPTOR* get_logger(void)
8104{
8105 return settings.extensions.logger;
8106}
8107
8108static EXTENSION_LOG_LEVEL get_log_level(void)
8109{
8110 EXTENSION_LOG_LEVEL ret;
8111 switch (settings.verbose) {
8112 case 0: ret = EXTENSION_LOG_WARNING; break;
8113 case 1: ret = EXTENSION_LOG_INFO; break;
8114 case 2: ret = EXTENSION_LOG_DEBUG; break;
8115 default:
8116 ret = EXTENSION_LOG_DETAIL;
8117 }
8118 return ret;
8119}
8120
8121static void set_log_level(EXTENSION_LOG_LEVEL severity)
8122{
8123 switch (severity) {
8124 case EXTENSION_LOG_WARNING: settings.verbose = 0; break;
8125 case EXTENSION_LOG_INFO: settings.verbose = 1; break;
8126 case EXTENSION_LOG_DEBUG: settings.verbose = 2; break;
8127 default:
8128 settings.verbose = 3;
8129 }
8130}
8131
8132static void get_config_append_stats(const char *key, const uint16_t klen,
8133 const char *val, const uint32_t vlen,
8134 const void *cookie)
8135{
8136 char *pos;
8137 size_t nbytes;
8138
8139 if (klen == 0 || vlen == 0) {
8140 return ;
8141 }
8142
8143 pos = (char*)cookie;
8144 nbytes = strlen(pos);
8145
8146 if ((nbytes + klen + vlen + 3) > 1024) {
8147 /* Not enough size in the buffer.. */
8148 return;
8149 }
8150
8151 memcpy(pos + nbytes, key, klen)__builtin___memcpy_chk (pos + nbytes, key, klen, __builtin_object_size
(pos + nbytes, 0))
;
8152 nbytes += klen;
8153 pos[nbytes] = '=';
8154 ++nbytes;
8155 memcpy(pos + nbytes, val, vlen)__builtin___memcpy_chk (pos + nbytes, val, vlen, __builtin_object_size
(pos + nbytes, 0))
;
8156 nbytes += vlen;
8157 memcpy(pos + nbytes, ";", 2)__builtin___memcpy_chk (pos + nbytes, ";", 2, __builtin_object_size
(pos + nbytes, 0))
;
8158}
8159
8160static bool_Bool get_config(struct config_item items[]) {
8161 char config[1024];
8162 int rval;
8163
8164 config[0] = '\0';
8165 process_stat_settings(get_config_append_stats, config);
8166 rval = parse_config(config, items, NULL((void*)0));
8167 return rval >= 0;
8168}
8169
8170/**
8171 * Callback the engines may call to get the public server interface
8172 * @return pointer to a structure containing the interface. The client should
8173 * know the layout and perform the proper casts.
8174 */
8175static SERVER_HANDLE_V1 *get_server_api(void)
8176{
8177 static int init;
8178 static SERVER_CORE_API core_api;
8179 static SERVER_COOKIE_API server_cookie_api;
8180 static SERVER_STAT_API server_stat_api;
8181 static SERVER_LOG_API server_log_api;
8182 static SERVER_EXTENSION_API extension_api;
8183 static SERVER_CALLBACK_API callback_api;
8184 static ALLOCATOR_HOOKS_API hooks_api;
8185 static SERVER_HANDLE_V1 rv;
8186
8187 if (!init) {
8188 init = 1;
8189 core_api.server_version = get_server_version;
8190 core_api.hash = hash;
8191 core_api.realtime = realtime;
8192 core_api.abstime = abstime;
8193 core_api.get_current_time = get_current_time;
8194 core_api.parse_config = parse_config;
8195 core_api.shutdown = shutdown_server;
8196 core_api.get_config = get_config;
8197
8198 server_cookie_api.get_auth_data = get_auth_data;
8199 server_cookie_api.store_engine_specific = store_engine_specific;
8200 server_cookie_api.get_engine_specific = get_engine_specific;
8201 server_cookie_api.is_datatype_supported = is_datatype_supported;
8202 server_cookie_api.get_opcode_if_ewouldblock_set = get_opcode_if_ewouldblock_set;
8203 server_cookie_api.validate_session_cas = validate_session_cas;
8204 server_cookie_api.decrement_session_ctr = decrement_session_ctr;
8205 server_cookie_api.get_socket_fd = get_socket_fd;
8206 server_cookie_api.notify_io_complete = notify_io_complete;
8207 server_cookie_api.reserve = reserve_cookie;
8208 server_cookie_api.release = release_cookie;
8209
8210 server_stat_api.new_stats = new_independent_stats;
8211 server_stat_api.release_stats = release_independent_stats;
8212 server_stat_api.evicting = count_eviction;
8213
8214 server_log_api.get_logger = get_logger;
8215 server_log_api.get_level = get_log_level;
8216 server_log_api.set_level = set_log_level;
8217
8218 extension_api.register_extension = register_extension;
8219 extension_api.unregister_extension = unregister_extension;
8220 extension_api.get_extension = get_extension;
8221
8222 callback_api.register_callback = register_callback;
8223 callback_api.perform_callbacks = perform_callbacks;
8224
8225 hooks_api.add_new_hook = mc_add_new_hook;
8226 hooks_api.remove_new_hook = mc_remove_new_hook;
8227 hooks_api.add_delete_hook = mc_add_delete_hook;
8228 hooks_api.remove_delete_hook = mc_remove_delete_hook;
8229 hooks_api.get_extra_stats_size = mc_get_extra_stats_size;
8230 hooks_api.get_allocator_stats = mc_get_allocator_stats;
8231 hooks_api.get_allocation_size = mc_get_allocation_size;
8232 hooks_api.get_detailed_stats = mc_get_detailed_stats;
8233
8234 rv.interface = 1;
8235 rv.core = &core_api;
8236 rv.stat = &server_stat_api;
8237 rv.extension = &extension_api;
8238 rv.callback = &callback_api;
8239 rv.log = &server_log_api;
8240 rv.cookie = &server_cookie_api;
8241 rv.alloc_hooks = &hooks_api;
8242 }
8243
8244 if (rv.engine == NULL((void*)0)) {
8245 rv.engine = settings.engine.v0;
8246 }
8247
8248 return &rv;
8249}
8250
8251static void process_bin_upr_response(conn *c) {
8252 char *packet;
8253 ENGINE_ERROR_CODE ret = ENGINE_DISCONNECT;
8254
8255 c->supports_datatype = true1;
8256 packet = (c->rcurr - (c->binary_header.request.bodylen + sizeof(c->binary_header)));
8257 if (settings.engine.v1->upr.response_handler != NULL((void*)0)) {
8258 ret = settings.engine.v1->upr.response_handler(settings.engine.v0, c,
8259 (void*)packet);
8260 }
8261
8262 if (ret == ENGINE_DISCONNECT) {
8263 conn_set_state(c, conn_closing);
8264 } else {
8265 conn_set_state(c, conn_ship_log);
8266 }
8267}
8268
8269
8270static void initialize_binary_lookup_map(void) {
8271 int ii;
8272 for (ii = 0; ii < 0x100; ++ii) {
8273 request_handlers[ii].descriptor = NULL((void*)0);
8274 request_handlers[ii].callback = default_unknown_command;
8275 }
8276
8277 response_handlers[PROTOCOL_BINARY_CMD_NOOP] = process_bin_noop_response;
8278 response_handlers[PROTOCOL_BINARY_CMD_TAP_MUTATION] = process_bin_tap_ack;
8279 response_handlers[PROTOCOL_BINARY_CMD_TAP_DELETE] = process_bin_tap_ack;
8280 response_handlers[PROTOCOL_BINARY_CMD_TAP_FLUSH] = process_bin_tap_ack;
8281 response_handlers[PROTOCOL_BINARY_CMD_TAP_OPAQUE] = process_bin_tap_ack;
8282 response_handlers[PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET] = process_bin_tap_ack;
8283 response_handlers[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START] = process_bin_tap_ack;
8284 response_handlers[PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END] = process_bin_tap_ack;
8285
8286 response_handlers[PROTOCOL_BINARY_CMD_UPR_OPEN] = process_bin_upr_response;
8287 response_handlers[PROTOCOL_BINARY_CMD_UPR_ADD_STREAM] = process_bin_upr_response;
8288 response_handlers[PROTOCOL_BINARY_CMD_UPR_CLOSE_STREAM] = process_bin_upr_response;
8289 response_handlers[PROTOCOL_BINARY_CMD_UPR_STREAM_REQ] = process_bin_upr_response;
8290 response_handlers[PROTOCOL_BINARY_CMD_UPR_GET_FAILOVER_LOG] = process_bin_upr_response;
8291 response_handlers[PROTOCOL_BINARY_CMD_UPR_STREAM_END] = process_bin_upr_response;
8292 response_handlers[PROTOCOL_BINARY_CMD_UPR_SNAPSHOT_MARKER] = process_bin_upr_response;
8293 response_handlers[PROTOCOL_BINARY_CMD_UPR_MUTATION] = process_bin_upr_response;
8294 response_handlers[PROTOCOL_BINARY_CMD_UPR_DELETION] = process_bin_upr_response;
8295 response_handlers[PROTOCOL_BINARY_CMD_UPR_EXPIRATION] = process_bin_upr_response;
8296 response_handlers[PROTOCOL_BINARY_CMD_UPR_FLUSH] = process_bin_upr_response;
8297 response_handlers[PROTOCOL_BINARY_CMD_UPR_SET_VBUCKET_STATE] = process_bin_upr_response;
8298 response_handlers[PROTOCOL_BINARY_CMD_UPR_NOOP] = process_bin_upr_response;
8299 response_handlers[PROTOCOL_BINARY_CMD_UPR_BUFFER_ACKNOWLEDGEMENT] = process_bin_upr_response;
8300 response_handlers[PROTOCOL_BINARY_CMD_UPR_CONTROL] = process_bin_upr_response;
8301 response_handlers[PROTOCOL_BINARY_CMD_UPR_RESERVED4] = process_bin_upr_response;
8302}
8303
8304/**
8305 * Load a shared object and initialize all the extensions in there.
8306 *
8307 * @param soname the name of the shared object (may not be NULL)
8308 * @param config optional configuration parameters
8309 * @return true if success, false otherwise
8310 */
8311bool_Bool load_extension(const char *soname, const char *config) {
8312 cb_dlhandle_t handle;
8313 void *symbol;
8314 EXTENSION_ERROR_CODE error;
8315 union my_hack {
8316 MEMCACHED_EXTENSIONS_INITIALIZE initialize;
8317 void* voidptr;
8318 } funky;
8319 char *error_msg;
8320
8321 if (soname == NULL((void*)0)) {
8322 return false0;
8323 }
8324
8325 handle = cb_dlopen(soname, &error_msg);
8326 if (handle == NULL((void*)0)) {
8327 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8328 "Failed to open library \"%s\": %s\n",
8329 soname, error_msg);
8330 free(error_msg);
8331 return false0;
8332 }
8333
8334 symbol = cb_dlsym(handle, "memcached_extensions_initialize", &error_msg);
8335 if (symbol == NULL((void*)0)) {
8336 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8337 "Could not find symbol \"memcached_extensions_initialize\" in %s: %s\n",
8338 soname, error_msg);
8339 free(error_msg);
8340 return false0;
8341 }
8342 funky.voidptr = symbol;
8343
8344 error = (*funky.initialize)(config, get_server_api);
8345 if (error != EXTENSION_SUCCESS) {
8346 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8347 "Failed to initalize extensions from %s. Error code: %d\n",
8348 soname, error);
8349 cb_dlclose(handle);
8350 return false0;
8351 }
8352
8353 if (settings.verbose > 0) {
8354 settings.extensions.logger->log(EXTENSION_LOG_INFO, NULL((void*)0),
8355 "Loaded extensions from: %s\n", soname);
8356 }
8357
8358 return true1;
8359}
8360
8361/**
8362 * Do basic sanity check of the runtime environment
8363 * @return true if no errors found, false if we can't use this env
8364 */
8365static bool_Bool sanitycheck(void) {
8366 /* One of our biggest problems is old and bogus libevents */
8367 const char *ever = event_get_version();
8368 if (ever != NULL((void*)0)) {
8369 if (strncmp(ever, "1.", 2) == 0) {
8370 /* Require at least 1.3 (that's still a couple of years old) */
8371 if ((ever[2] == '1' || ever[2] == '2') && !isdigit(ever[3])) {
8372 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8373 "You are using libevent %s.\nPlease upgrade to"
8374 " a more recent version (1.3 or newer)\n",
8375 event_get_version());
8376 return false0;
8377 }
8378 }
8379 }
8380
8381 return true1;
8382}
8383
8384/**
8385 * Log a socket error message.
8386 *
8387 * @param severity the severity to put in the log
8388 * @param cookie cookie representing the client
8389 * @param prefix What to put as a prefix (MUST INCLUDE
8390 * the %s for where the string should go)
8391 */
8392void log_socket_error(EXTENSION_LOG_LEVEL severity,
8393 const void* cookie,
8394 const char* prefix)
8395{
8396#ifdef WIN32
8397 log_errcode_error(severity, cookie, prefix,
8398 WSAGetLastError());
8399#else
8400 log_errcode_error(severity, cookie, prefix, errno(*__error()));
8401#endif
8402}
8403
8404/**
8405 * Log a system error message.
8406 *
8407 * @param severity the severity to put in the log
8408 * @param cookie cookie representing the client
8409 * @param prefix What to put as a prefix (MUST INCLUDE
8410 * the %s for where the string should go)
8411 */
8412void log_system_error(EXTENSION_LOG_LEVEL severity,
8413 const void* cookie,
8414 const char* prefix)
8415{
8416#ifdef WIN32
8417 log_errcode_error(severity, cookie, prefix,
8418 GetLastError());
8419#else
8420 log_errcode_error(severity, cookie, prefix, errno(*__error()));
8421#endif
8422}
8423
8424#ifdef WIN32
8425void log_errcode_error(EXTENSION_LOG_LEVEL severity,
8426 const void* cookie,
8427 const char* prefix, DWORD err) {
8428 LPVOID error_msg;
8429
8430 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
8431 FORMAT_MESSAGE_FROM_SYSTEM |
8432 FORMAT_MESSAGE_IGNORE_INSERTS,
8433 NULL((void*)0), err, 0,
8434 (LPTSTR)&error_msg, 0, NULL((void*)0)) != 0) {
8435 settings.extensions.logger->log(severity, cookie,
8436 prefix, error_msg);
8437 LocalFree(error_msg);
8438 } else {
8439 settings.extensions.logger->log(severity, cookie,
8440 prefix, "unknown error");
8441 }
8442}
8443#else
8444void log_errcode_error(EXTENSION_LOG_LEVEL severity,
8445 const void* cookie,
8446 const char* prefix, int err) {
8447 settings.extensions.logger->log(severity,
8448 cookie,
8449 prefix,
8450 strerror(err));
8451}
8452#endif
8453
8454#ifdef WIN32
8455static void parent_monitor_thread(void *arg) {
8456 HANDLE parent = arg;
8457 WaitForSingleObject(parent, INFINITE);
8458 ExitProcess(EXIT_FAILURE1);
8459}
8460
8461static void setup_parent_monitor(void) {
8462 char *env = getenv("MEMCACHED_PARENT_MONITOR");
8463 if (env != NULL((void*)0)) {
8464 HANDLE handle = OpenProcess(SYNCHRONIZE, FALSE, atoi(env));
8465 if (handle == INVALID_HANDLE_VALUE) {
8466 log_system_error(EXTENSION_LOG_WARNING, NULL((void*)0),
8467 "Failed to open parent process: %s");
8468 exit(EXIT_FAILURE1);
8469 }
8470 cb_create_thread(NULL((void*)0), parent_monitor_thread, handle, 1);
8471 }
8472}
8473
8474static void set_max_filehandles(void) {
8475 /* EMPTY */
8476}
8477
8478#else
8479static void parent_monitor_thread(void *arg) {
8480 pid_t pid = atoi(arg);
8481 while (true1) {
8482 sleep(1);
8483 if (kill(pid, 0) == -1 && errno(*__error()) == ESRCH3) {
8484 _exit(1);
8485 }
8486 }
8487}
8488
8489static void setup_parent_monitor(void) {
8490 char *env = getenv("MEMCACHED_PARENT_MONITOR");
8491 if (env != NULL((void*)0)) {
8492 cb_thread_t t;
8493 if (cb_create_thread(&t, parent_monitor_thread, env, 1) != 0) {
8494 log_system_error(EXTENSION_LOG_WARNING, NULL((void*)0),
8495 "Failed to open parent process: %s");
8496 exit(EXIT_FAILURE1);
8497 }
8498 }
8499}
8500
8501static void set_max_filehandles(void) {
8502 struct rlimit rlim;
8503
8504 if (getrlimit(RLIMIT_NOFILE8, &rlim) != 0) {
8505 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8506 "failed to getrlimit number of files\n");
8507 exit(EX_OSERR71);
8508 } else {
8509 int maxfiles = settings.maxconns + (3 * (settings.num_threads + 2));
8510 int syslimit = rlim.rlim_cur;
8511 if (rlim.rlim_cur < maxfiles) {
8512 rlim.rlim_cur = maxfiles;
8513 }
8514 if (rlim.rlim_max < rlim.rlim_cur) {
8515 rlim.rlim_max = rlim.rlim_cur;
8516 }
8517 if (setrlimit(RLIMIT_NOFILE8, &rlim) != 0) {
8518 const char *fmt;
8519 int req;
8520 fmt = "WARNING: maxconns cannot be set to (%d) connections due to "
8521 "system\nresouce restrictions. Increase the number of file "
8522 "descriptors allowed\nto the memcached user process or start "
8523 "memcached as root (remember\nto use the -u parameter).\n"
8524 "The maximum number of connections is set to %d.\n";
8525 req = settings.maxconns;
8526 settings.maxconns = syslimit - (3 * (settings.num_threads + 2));
8527 if (settings.maxconns < 0) {
8528 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8529 "failed to set rlimit for open files. Try starting as"
8530 " root or requesting smaller maxconns value.\n");
8531 exit(EX_OSERR71);
8532 }
8533 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8534 fmt, req, settings.maxconns);
8535 }
8536 }
8537}
8538
8539#endif
8540
8541static cb_mutex_t *openssl_lock_cs;
8542
8543static unsigned long get_thread_id(void) {
8544 return (unsigned long)cb_thread_self();
8545}
8546
8547static void openssl_locking_callback(int mode, int type, char *file, int line)
8548{
8549 if (mode & CRYPTO_LOCK1) {
8550 cb_mutex_enter(&(openssl_lock_cs[type]));
8551 } else {
8552 cb_mutex_exit(&(openssl_lock_cs[type]));
8553 }
8554}
8555
8556static void initialize_openssl(void) {
8557 int ii;
8558
8559 CRYPTO_malloc_init()CRYPTO_set_mem_functions( malloc, realloc, free);
8560 SSL_library_init();
8561 SSL_load_error_strings();
8562 ERR_load_BIO_strings();
8563 OpenSSL_add_all_algorithms()OPENSSL_add_all_algorithms_noconf();
8564
8565 openssl_lock_cs = calloc(CRYPTO_num_locks(), sizeof(cb_mutex_t));
8566 for (ii = 0; ii < CRYPTO_num_locks(); ii++) {
8567 cb_mutex_initialize(&(openssl_lock_cs[ii]));
8568 }
8569
8570 CRYPTO_set_id_callback((unsigned long (*)())get_thread_id);
8571 CRYPTO_set_locking_callback((void (*)())openssl_locking_callback);
8572}
8573
8574static void calculate_maxconns(void) {
8575 int ii;
8576 settings.maxconns = 0;
8577 for (ii = 0; ii < settings.num_interfaces; ++ii) {
8578 settings.maxconns += settings.interfaces[ii].maxconn;
8579 }
8580}
8581
8582int main (int argc, char **argv) {
8583 int c;
8584 ENGINE_HANDLE *engine_handle = NULL((void*)0);
8585 const char *config_file = NULL((void*)0);
8586
8587 initialize_openssl();
8588 /* make the time we started always be 2 seconds before we really
8589 did, so time(0) - time.started is never zero. if so, things
8590 like 'settings.oldest_live' which act as booleans as well as
8591 values are now false in boolean context... */
8592 process_started = time(0) - 2;
8593 set_current_time();
8594
8595 initialize_timings();
8596
8597 /* Initialize global variables */
8598 cb_mutex_initialize(&listen_state.mutex);
8599 cb_mutex_initialize(&connections.mutex);
8600 cb_mutex_initialize(&tap_stats.mutex);
8601 cb_mutex_initialize(&stats_lock);
8602 cb_mutex_initialize(&session_cas.mutex);
8603
8604 session_cas.value = 0;
8605 session_cas.ctr = 0;
8606
8607 /* Initialize the socket subsystem */
8608 cb_initialize_sockets();
8609
8610 init_alloc_hooks();
8611
8612 /* init settings */
8613 settings_init();
8614
8615 initialize_binary_lookup_map();
8616
8617 setup_bin_packet_handlers();
8618
8619 if (memcached_initialize_stderr_logger(get_server_api) != EXTENSION_SUCCESS) {
8620 fprintf(stderr__stderrp, "Failed to initialize log system\n");
8621 return EX_OSERR71;
8622 }
8623
8624 if (!sanitycheck()) {
8625 return EX_OSERR71;
8626 }
8627
8628 /* process arguments */
8629 while ((c = getopt(argc, argv,
8630 "C:" /* Read configuration file */
8631 "h" /* help */
8632 )) != -1) {
8633 switch (c) {
8634
8635 case 'h':
8636 usage();
8637 exit(EXIT_SUCCESS0);
8638 case 'C':
8639 config_file = optarg;
8640 break;
8641
8642 default:
8643 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8644 "Illegal argument \"%c\"\n", c);
8645 return 1;
8646 }
8647 }
8648
8649 if (config_file) {
8650 read_config_file(config_file);
8651 }
8652
8653 set_max_filehandles();
8654
8655 if (getenv("MEMCACHED_REQS_TAP_EVENT") != NULL((void*)0)) {
8656 settings.reqs_per_tap_event = atoi(getenv("MEMCACHED_REQS_TAP_EVENT"));
8657 }
8658
8659 if (settings.reqs_per_tap_event <= 0) {
8660 settings.reqs_per_tap_event = DEFAULT_REQS_PER_TAP_EVENT50;
8661 }
8662
8663 if (install_sigterm_handler() != 0) {
8664 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8665 "Failed to install SIGTERM handler\n");
8666 exit(EXIT_FAILURE1);
8667 }
8668
8669 /* Aggregate the maximum number of connections */
8670 calculate_maxconns();
8671
8672 /* allocate the connection array */
8673 initialize_connections();
8674
8675 cbsasl_server_init();
8676
8677 /* initialize main thread libevent instance */
8678 main_base = event_base_new();
8679
8680 /* Load the storage engine */
8681 if (!load_engine(settings.engine_module,
8682 get_server_api,settings.extensions.logger,
8683 &engine_handle)) {
8684 /* Error already reported */
8685 exit(EXIT_FAILURE1);
8686 }
8687
8688 if (!init_engine(engine_handle,
8689 settings.engine_config,
8690 settings.extensions.logger)) {
8691 return false0;
8692 }
8693
8694 if (settings.verbose > 0) {
8695 log_engine_details(engine_handle,settings.extensions.logger);
8696 }
8697 settings.engine.v1 = (ENGINE_HANDLE_V1 *) engine_handle;
8698
8699 if (settings.engine.v1->arithmetic == NULL((void*)0)) {
8700 settings.engine.v1->arithmetic = internal_arithmetic;
8701 }
8702
8703 setup_not_supported_handlers();
8704
8705 /* initialize other stuff */
8706 stats_init();
8707
8708 default_independent_stats = new_independent_stats();
8709
8710#ifndef WIN32
8711 /* daemonize if requested */
8712 /* if we want to ensure our ability to dump core, don't chdir to / */
8713 if (settings.daemonize) {
8714 if (sigignore(SIGHUP1) == -1) {
8715 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8716 "Failed to ignore SIGHUP: ", strerror(errno(*__error())));
8717 }
8718 if (daemonize(1, settings.verbose) == -1) {
8719 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8720 "failed to daemon() in order to daemonize\n");
8721 exit(EXIT_FAILURE1);
8722 }
8723 }
8724
8725 /*
8726 * ignore SIGPIPE signals; we can use errno == EPIPE if we
8727 * need that information
8728 */
8729 if (sigignore(SIGPIPE13) == -1) {
8730 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8731 "failed to ignore SIGPIPE; sigaction");
8732 exit(EX_OSERR71);
8733 }
8734#endif
8735
8736 /* start up worker threads if MT mode */
8737 thread_init(settings.num_threads, main_base, dispatch_event_handler);
8738
8739 /* initialise clock event */
8740 clock_handler(0, 0, 0);
8741
8742 /* create the listening socket, bind it, and init */
8743 {
8744 const char *portnumber_filename = getenv("MEMCACHED_PORT_FILENAME");
8745 char temp_portnumber_filename[PATH_MAX1024];
8746 FILE *portnumber_file = NULL((void*)0);
8747
8748 if (portnumber_filename != NULL((void*)0)) {
8749 snprintf(temp_portnumber_filename,__builtin___snprintf_chk (temp_portnumber_filename, sizeof(temp_portnumber_filename
), 0, __builtin_object_size (temp_portnumber_filename, 2 >
1 ? 1 : 0), "%s.lck", portnumber_filename)
8750 sizeof(temp_portnumber_filename),__builtin___snprintf_chk (temp_portnumber_filename, sizeof(temp_portnumber_filename
), 0, __builtin_object_size (temp_portnumber_filename, 2 >
1 ? 1 : 0), "%s.lck", portnumber_filename)
8751 "%s.lck", portnumber_filename)__builtin___snprintf_chk (temp_portnumber_filename, sizeof(temp_portnumber_filename
), 0, __builtin_object_size (temp_portnumber_filename, 2 >
1 ? 1 : 0), "%s.lck", portnumber_filename)
;
8752
8753 portnumber_file = fopen(temp_portnumber_filename, "a");
8754 if (portnumber_file == NULL((void*)0)) {
8755 settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL((void*)0),
8756 "Failed to open \"%s\": %s\n",
8757 temp_portnumber_filename, strerror(errno(*__error())));
8758 }
8759 }
8760
8761 if (server_sockets(portnumber_file)) {
8762 exit(EX_OSERR71);
8763 }
8764
8765 if (portnumber_file) {
8766 fclose(portnumber_file);
8767 rename(temp_portnumber_filename, portnumber_filename);
8768 }
8769 }
8770
8771#ifndef WIN32
8772 if (settings.pid_file != NULL((void*)0)) {
8773 save_pid(settings.pid_file);
8774 }
8775#endif
8776
8777 /* Drop privileges no longer needed */
8778 drop_privileges();
8779
8780 /* Optional parent monitor */
8781 setup_parent_monitor();
8782
8783 if (!memcached_shutdown) {
8784 /* enter the event loop */
8785 event_base_loop(main_base, 0);
8786 }
8787
8788 if (settings.verbose) {
8789 settings.extensions.logger->log(EXTENSION_LOG_INFO, NULL((void*)0),
8790 "Initiating shutdown\n");
8791 }
8792 threads_shutdown();
8793
8794 settings.engine.v1->destroy(settings.engine.v0, false0);
8795
8796 threads_cleanup();
8797
8798 /* remove the PID file if we're a daemon */
8799#ifndef WIN32
8800 if (settings.daemonize)
8801 remove_pidfile(settings.pid_file);
8802#endif
8803
8804 /* Free the memory used by listening_port structure */
8805 if (stats.listening_ports) {
8806 free(stats.listening_ports);
8807 }
8808
8809 event_base_free(main_base);
8810 release_independent_stats(default_independent_stats);
8811 destroy_connections();
8812
8813 if (get_alloc_hooks_type() == none) {
8814 unload_engine();
8815 }
8816
8817 free(settings.config);
8818
8819 return EXIT_SUCCESS0;
8820}